ECC384.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. // This is EASY-ECC by Kenneth MacKay with some very minor modifications for ZeroTier
  2. // https://github.com/esxgx/easy-ecc
  3. // This code is under the BSD 2-clause license, not ZeroTier's license
  4. #include "Constants.hpp"
  5. #include "ECC384.hpp"
  6. #include "Utils.hpp"
  7. #include <cstdio>
  8. #include <cstdlib>
  9. #include <cstdint>
  10. namespace ZeroTier {
  11. namespace {
  12. #define secp384r1 48
  13. #define ECC_CURVE secp384r1
  14. #define ECC_BYTES ECC_CURVE
  15. #define NUM_ECC_DIGITS (ECC_BYTES/8)
  16. #define MAX_TRIES 1024
  17. #ifdef ZT_HAVE_UINT128
  18. #define SUPPORTS_INT128 1
  19. #else
  20. #define SUPPORTS_INT128 0
  21. typedef struct
  22. {
  23. uint64_t m_low;
  24. uint64_t m_high;
  25. } uint128_t;
  26. #endif
  27. typedef struct EccPoint
  28. {
  29. uint64_t x[NUM_ECC_DIGITS];
  30. uint64_t y[NUM_ECC_DIGITS];
  31. } EccPoint;
  32. #define CONCAT1(a, b) a##b
  33. #define CONCAT(a, b) CONCAT1(a, b)
  34. #define Curve_P_48 {0x00000000FFFFFFFF, 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  35. #define Curve_B_48 {0x2A85C8EDD3EC2AEF, 0xC656398D8A2ED19D, 0x0314088F5013875A, 0x181D9C6EFE814112, 0x988E056BE3F82D19, 0xB3312FA7E23EE7E4}
  36. #define Curve_G_48 {{0x3A545E3872760AB7, 0x5502F25DBF55296C, 0x59F741E082542A38, 0x6E1D3B628BA79B98, 0x8EB1C71EF320AD74, 0xAA87CA22BE8B0537}, {0x7A431D7C90EA0E5F, 0x0A60B1CE1D7E819D, 0xE9DA3113B5F0B8C0, 0xF8F41DBD289A147C, 0x5D9E98BF9292DC29, 0x3617DE4A96262C6F}}
  37. #define Curve_N_48 {0xECEC196ACCC52973, 0x581A0DB248B0A77A, 0xC7634D81F4372DDF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  38. const uint64_t curve_p[NUM_ECC_DIGITS] = CONCAT(Curve_P_, ECC_CURVE);
  39. const uint64_t curve_b[NUM_ECC_DIGITS] = CONCAT(Curve_B_, ECC_CURVE);
  40. const EccPoint curve_G = CONCAT(Curve_G_, ECC_CURVE);
  41. const uint64_t curve_n[NUM_ECC_DIGITS] = CONCAT(Curve_N_, ECC_CURVE);
  42. // Use ZeroTier's secure PRNG
  43. ZT_INLINE int getRandomNumber(uint64_t *p_vli)
  44. {
  45. Utils::getSecureRandom(p_vli,ECC_BYTES);
  46. return 1;
  47. }
  48. ZT_INLINE void vli_clear(uint64_t *p_vli)
  49. {
  50. uint i;
  51. for(i=0; i<NUM_ECC_DIGITS; ++i)
  52. {
  53. p_vli[i] = 0;
  54. }
  55. }
  56. /* Returns 1 if p_vli == 0, 0 otherwise. */
  57. ZT_INLINE int vli_isZero(const uint64_t *p_vli)
  58. {
  59. uint i;
  60. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  61. {
  62. if(p_vli[i])
  63. {
  64. return 0;
  65. }
  66. }
  67. return 1;
  68. }
  69. /* Returns nonzero if bit p_bit of p_vli is set. */
  70. ZT_INLINE uint64_t vli_testBit(const uint64_t *p_vli,uint p_bit)
  71. {
  72. return (p_vli[p_bit/64] & ((uint64_t)1 << (p_bit % 64)));
  73. }
  74. /* Counts the number of 64-bit "digits" in p_vli. */
  75. ZT_INLINE uint vli_numDigits(const uint64_t *p_vli)
  76. {
  77. int i;
  78. /* Search from the end until we find a non-zero digit.
  79. We do it in reverse because we expect that most digits will be nonzero. */
  80. for(i = NUM_ECC_DIGITS - 1; i >= 0 && p_vli[i] == 0; --i)
  81. {
  82. }
  83. return (i + 1);
  84. }
  85. /* Counts the number of bits required for p_vli. */
  86. ZT_INLINE uint vli_numBits(const uint64_t *p_vli)
  87. {
  88. uint i;
  89. uint64_t l_digit;
  90. uint l_numDigits = vli_numDigits(p_vli);
  91. if(l_numDigits == 0)
  92. {
  93. return 0;
  94. }
  95. l_digit = p_vli[l_numDigits - 1];
  96. for(i=0; l_digit; ++i)
  97. {
  98. l_digit >>= 1;
  99. }
  100. return ((l_numDigits - 1) * 64 + i);
  101. }
  102. /* Sets p_dest = p_src. */
  103. ZT_INLINE void vli_set(uint64_t *p_dest,const uint64_t *p_src)
  104. {
  105. uint i;
  106. for(i=0; i<NUM_ECC_DIGITS; ++i)
  107. {
  108. p_dest[i] = p_src[i];
  109. }
  110. }
  111. /* Returns sign of p_left - p_right. */
  112. ZT_INLINE int vli_cmp(const uint64_t *p_left,const uint64_t *p_right)
  113. {
  114. int i;
  115. for(i = NUM_ECC_DIGITS-1; i >= 0; --i)
  116. {
  117. if(p_left[i] > p_right[i])
  118. {
  119. return 1;
  120. }
  121. else if(p_left[i] < p_right[i])
  122. {
  123. return -1;
  124. }
  125. }
  126. return 0;
  127. }
  128. /* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */
  129. ZT_INLINE uint64_t vli_lshift(uint64_t *p_result,const uint64_t *p_in,uint p_shift)
  130. {
  131. uint64_t l_carry = 0;
  132. uint i;
  133. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  134. {
  135. uint64_t l_temp = p_in[i];
  136. p_result[i] = (l_temp << p_shift) | l_carry;
  137. l_carry = l_temp >> (64 - p_shift);
  138. }
  139. return l_carry;
  140. }
  141. /* Computes p_vli = p_vli >> 1. */
  142. ZT_INLINE void vli_rshift1(uint64_t *p_vli)
  143. {
  144. uint64_t *l_end = p_vli;
  145. uint64_t l_carry = 0;
  146. p_vli += NUM_ECC_DIGITS;
  147. while(p_vli-- > l_end)
  148. {
  149. uint64_t l_temp = *p_vli;
  150. *p_vli = (l_temp >> 1) | l_carry;
  151. l_carry = l_temp << 63;
  152. }
  153. }
  154. /* Computes p_result = p_left + p_right, returning carry. Can modify in place. */
  155. ZT_INLINE uint64_t vli_add(uint64_t *p_result,const uint64_t *p_left,const uint64_t *p_right)
  156. {
  157. uint64_t l_carry = 0;
  158. uint i;
  159. for(i=0; i<NUM_ECC_DIGITS; ++i)
  160. {
  161. uint64_t l_sum = p_left[i] + p_right[i] + l_carry;
  162. if(l_sum != p_left[i])
  163. {
  164. l_carry = (l_sum < p_left[i]);
  165. }
  166. p_result[i] = l_sum;
  167. }
  168. return l_carry;
  169. }
  170. /* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */
  171. ZT_INLINE uint64_t vli_sub(uint64_t *p_result,const uint64_t *p_left,const uint64_t *p_right)
  172. {
  173. uint64_t l_borrow = 0;
  174. uint i;
  175. for(i=0; i<NUM_ECC_DIGITS; ++i)
  176. {
  177. uint64_t l_diff = p_left[i] - p_right[i] - l_borrow;
  178. if(l_diff != p_left[i])
  179. {
  180. l_borrow = (l_diff > p_left[i]);
  181. }
  182. p_result[i] = l_diff;
  183. }
  184. return l_borrow;
  185. }
  186. #if SUPPORTS_INT128 == 1
  187. /* Computes p_result = p_left * p_right. */
  188. void vli_mult(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_right)
  189. {
  190. uint128_t r01 = 0;
  191. uint64_t r2 = 0;
  192. uint i, k;
  193. /* Compute each digit of p_result in sequence, maintaining the carries. */
  194. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  195. {
  196. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  197. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  198. {
  199. uint128_t l_product = (uint128_t)p_left[i] * p_right[k-i];
  200. r01 += l_product;
  201. r2 += (r01 < l_product);
  202. }
  203. p_result[k] = (uint64_t)r01;
  204. r01 = (r01 >> 64U) | (((uint128_t)r2) << 64U);
  205. r2 = 0;
  206. }
  207. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  208. }
  209. /* Computes p_result = p_left^2. */
  210. void vli_square(uint64_t *p_result, const uint64_t *p_left)
  211. {
  212. uint128_t r01 = 0;
  213. uint64_t r2 = 0;
  214. uint i, k;
  215. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  216. {
  217. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  218. for(i=l_min; i<=k && i<=k-i; ++i)
  219. {
  220. uint128_t l_product = (uint128_t)p_left[i] * p_left[k-i];
  221. if(i < k-i)
  222. {
  223. r2 += l_product >> 127;
  224. l_product *= 2;
  225. }
  226. r01 += l_product;
  227. r2 += (r01 < l_product);
  228. }
  229. p_result[k] = (uint64_t)r01;
  230. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  231. r2 = 0;
  232. }
  233. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  234. }
  235. #else /* #if SUPPORTS_INT128 */
  236. uint128_t mul_64_64(uint64_t p_left, uint64_t p_right)
  237. {
  238. uint128_t l_result;
  239. uint64_t a0 = p_left & 0xffffffffull;
  240. uint64_t a1 = p_left >> 32;
  241. uint64_t b0 = p_right & 0xffffffffull;
  242. uint64_t b1 = p_right >> 32;
  243. uint64_t m0 = a0 * b0;
  244. uint64_t m1 = a0 * b1;
  245. uint64_t m2 = a1 * b0;
  246. uint64_t m3 = a1 * b1;
  247. m2 += (m0 >> 32);
  248. m2 += m1;
  249. if(m2 < m1)
  250. { // overflow
  251. m3 += 0x100000000ull;
  252. }
  253. l_result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
  254. l_result.m_high = m3 + (m2 >> 32);
  255. return l_result;
  256. }
  257. ZT_INLINE uint128_t add_128_128(uint128_t a, uint128_t b)
  258. {
  259. uint128_t l_result;
  260. l_result.m_low = a.m_low + b.m_low;
  261. l_result.m_high = a.m_high + b.m_high + (l_result.m_low < a.m_low);
  262. return l_result;
  263. }
  264. void vli_mult(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right)
  265. {
  266. uint128_t r01 = {0, 0};
  267. uint64_t r2 = 0;
  268. uint i, k;
  269. /* Compute each digit of p_result in sequence, maintaining the carries. */
  270. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  271. {
  272. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  273. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  274. {
  275. uint128_t l_product = mul_64_64(p_left[i], p_right[k-i]);
  276. r01 = add_128_128(r01, l_product);
  277. r2 += (r01.m_high < l_product.m_high);
  278. }
  279. p_result[k] = r01.m_low;
  280. r01.m_low = r01.m_high;
  281. r01.m_high = r2;
  282. r2 = 0;
  283. }
  284. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  285. }
  286. void vli_square(uint64_t *p_result, uint64_t *p_left)
  287. {
  288. uint128_t r01 = {0, 0};
  289. uint64_t r2 = 0;
  290. uint i, k;
  291. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  292. {
  293. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  294. for(i=l_min; i<=k && i<=k-i; ++i)
  295. {
  296. uint128_t l_product = mul_64_64(p_left[i], p_left[k-i]);
  297. if(i < k-i)
  298. {
  299. r2 += l_product.m_high >> 63;
  300. l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63);
  301. l_product.m_low <<= 1;
  302. }
  303. r01 = add_128_128(r01, l_product);
  304. r2 += (r01.m_high < l_product.m_high);
  305. }
  306. p_result[k] = r01.m_low;
  307. r01.m_low = r01.m_high;
  308. r01.m_high = r2;
  309. r2 = 0;
  310. }
  311. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  312. }
  313. #endif /* SUPPORTS_INT128 */
  314. /* Computes p_result = (p_left + p_right) % p_mod.
  315. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  316. void vli_modAdd(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right, const uint64_t *p_mod)
  317. {
  318. uint64_t l_carry = vli_add(p_result, p_left, p_right);
  319. if(l_carry || vli_cmp(p_result, p_mod) >= 0)
  320. { /* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
  321. vli_sub(p_result, p_result, p_mod);
  322. }
  323. }
  324. /* Computes p_result = (p_left - p_right) % p_mod.
  325. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  326. void vli_modSub(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right, const uint64_t *p_mod)
  327. {
  328. uint64_t l_borrow = vli_sub(p_result, p_left, p_right);
  329. if(l_borrow)
  330. { /* In this case, p_result == -diff == (max int) - diff.
  331. Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
  332. vli_add(p_result, p_result, p_mod);
  333. }
  334. }
  335. void omega_mult(uint64_t *p_result, uint64_t *p_right)
  336. {
  337. uint64_t l_tmp[NUM_ECC_DIGITS];
  338. uint64_t l_carry, l_diff;
  339. /* Multiply by (2^128 + 2^96 - 2^32 + 1). */
  340. vli_set(p_result, p_right); /* 1 */
  341. l_carry = vli_lshift(l_tmp, p_right, 32);
  342. p_result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp); /* 2^96 + 1 */
  343. p_result[2 + NUM_ECC_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right); /* 2^128 + 2^96 + 1 */
  344. l_carry += vli_sub(p_result, p_result, l_tmp); /* 2^128 + 2^96 - 2^32 + 1 */
  345. l_diff = p_result[NUM_ECC_DIGITS] - l_carry;
  346. if(l_diff > p_result[NUM_ECC_DIGITS])
  347. { /* Propagate borrow if necessary. */
  348. uint i;
  349. for(i = 1 + NUM_ECC_DIGITS; ; ++i)
  350. {
  351. --p_result[i];
  352. if(p_result[i] != (uint64_t)-1)
  353. {
  354. break;
  355. }
  356. }
  357. }
  358. p_result[NUM_ECC_DIGITS] = l_diff;
  359. }
  360. /* Computes p_result = p_product % curve_p
  361. see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
  362. section "Curve-Specific Optimizations" */
  363. void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  364. {
  365. uint64_t l_tmp[2*NUM_ECC_DIGITS];
  366. while(!vli_isZero(p_product + NUM_ECC_DIGITS)) /* While c1 != 0 */
  367. {
  368. uint64_t l_carry = 0;
  369. uint i;
  370. vli_clear(l_tmp);
  371. vli_clear(l_tmp + NUM_ECC_DIGITS);
  372. omega_mult(l_tmp, p_product + NUM_ECC_DIGITS); /* tmp = w * c1 */
  373. vli_clear(p_product + NUM_ECC_DIGITS); /* p = c0 */
  374. /* (c1, c0) = c0 + w * c1 */
  375. for(i=0; i<NUM_ECC_DIGITS+3; ++i)
  376. {
  377. uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry;
  378. if(l_sum != p_product[i])
  379. {
  380. l_carry = (l_sum < p_product[i]);
  381. }
  382. p_product[i] = l_sum;
  383. }
  384. }
  385. while(vli_cmp(p_product, curve_p) > 0)
  386. {
  387. vli_sub(p_product, p_product, curve_p);
  388. }
  389. vli_set(p_result, p_product);
  390. }
  391. /* Computes p_result = (p_left * p_right) % curve_p. */
  392. ZT_INLINE void vli_modMult_fast(uint64_t *p_result,uint64_t *p_left,const uint64_t *p_right)
  393. {
  394. uint64_t l_product[2 * NUM_ECC_DIGITS];
  395. vli_mult(l_product, p_left, p_right);
  396. vli_mmod_fast(p_result, l_product);
  397. }
  398. /* Computes p_result = p_left^2 % curve_p. */
  399. ZT_INLINE void vli_modSquare_fast(uint64_t *p_result,uint64_t *p_left)
  400. {
  401. uint64_t l_product[2 * NUM_ECC_DIGITS];
  402. vli_square(l_product, p_left);
  403. vli_mmod_fast(p_result, l_product);
  404. }
  405. #define EVEN(vli) (!(vli[0] & 1))
  406. /* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
  407. See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
  408. https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
  409. void vli_modInv(uint64_t *p_result, uint64_t *p_input, const uint64_t *p_mod)
  410. {
  411. uint64_t a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS], u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS];
  412. uint64_t l_carry;
  413. int l_cmpResult;
  414. if(vli_isZero(p_input))
  415. {
  416. vli_clear(p_result);
  417. return;
  418. }
  419. vli_set(a, p_input);
  420. vli_set(b, p_mod);
  421. vli_clear(u);
  422. u[0] = 1;
  423. vli_clear(v);
  424. while((l_cmpResult = vli_cmp(a, b)) != 0)
  425. {
  426. l_carry = 0;
  427. if(EVEN(a))
  428. {
  429. vli_rshift1(a);
  430. if(!EVEN(u))
  431. {
  432. l_carry = vli_add(u, u, p_mod);
  433. }
  434. vli_rshift1(u);
  435. if(l_carry)
  436. {
  437. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  438. }
  439. }
  440. else if(EVEN(b))
  441. {
  442. vli_rshift1(b);
  443. if(!EVEN(v))
  444. {
  445. l_carry = vli_add(v, v, p_mod);
  446. }
  447. vli_rshift1(v);
  448. if(l_carry)
  449. {
  450. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  451. }
  452. }
  453. else if(l_cmpResult > 0)
  454. {
  455. vli_sub(a, a, b);
  456. vli_rshift1(a);
  457. if(vli_cmp(u, v) < 0)
  458. {
  459. vli_add(u, u, p_mod);
  460. }
  461. vli_sub(u, u, v);
  462. if(!EVEN(u))
  463. {
  464. l_carry = vli_add(u, u, p_mod);
  465. }
  466. vli_rshift1(u);
  467. if(l_carry)
  468. {
  469. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  470. }
  471. }
  472. else
  473. {
  474. vli_sub(b, b, a);
  475. vli_rshift1(b);
  476. if(vli_cmp(v, u) < 0)
  477. {
  478. vli_add(v, v, p_mod);
  479. }
  480. vli_sub(v, v, u);
  481. if(!EVEN(v))
  482. {
  483. l_carry = vli_add(v, v, p_mod);
  484. }
  485. vli_rshift1(v);
  486. if(l_carry)
  487. {
  488. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  489. }
  490. }
  491. }
  492. vli_set(p_result, u);
  493. }
  494. /* ------ Point operations ------ */
  495. /* Returns 1 if p_point is the point at infinity, 0 otherwise. */
  496. ZT_INLINE int EccPoint_isZero(EccPoint *p_point)
  497. {
  498. return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
  499. }
  500. /* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
  501. From http://eprint.iacr.org/2011/338.pdf
  502. */
  503. /* Double in place */
  504. void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
  505. {
  506. /* t1 = X, t2 = Y, t3 = Z */
  507. uint64_t t4[NUM_ECC_DIGITS];
  508. uint64_t t5[NUM_ECC_DIGITS];
  509. if(vli_isZero(Z1))
  510. {
  511. return;
  512. }
  513. vli_modSquare_fast(t4, Y1); /* t4 = y1^2 */
  514. vli_modMult_fast(t5, X1, t4); /* t5 = x1*y1^2 = A */
  515. vli_modSquare_fast(t4, t4); /* t4 = y1^4 */
  516. vli_modMult_fast(Y1, Y1, Z1); /* t2 = y1*z1 = z3 */
  517. vli_modSquare_fast(Z1, Z1); /* t3 = z1^2 */
  518. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = x1 + z1^2 */
  519. vli_modAdd(Z1, Z1, Z1, curve_p); /* t3 = 2*z1^2 */
  520. vli_modSub(Z1, X1, Z1, curve_p); /* t3 = x1 - z1^2 */
  521. vli_modMult_fast(X1, X1, Z1); /* t1 = x1^2 - z1^4 */
  522. vli_modAdd(Z1, X1, X1, curve_p); /* t3 = 2*(x1^2 - z1^4) */
  523. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */
  524. if(vli_testBit(X1, 0))
  525. {
  526. uint64_t l_carry = vli_add(X1, X1, curve_p);
  527. vli_rshift1(X1);
  528. X1[NUM_ECC_DIGITS-1] |= l_carry << 63U;
  529. }
  530. else
  531. {
  532. vli_rshift1(X1);
  533. }
  534. /* t1 = 3/2*(x1^2 - z1^4) = B */
  535. vli_modSquare_fast(Z1, X1); /* t3 = B^2 */
  536. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - A */
  537. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */
  538. vli_modSub(t5, t5, Z1, curve_p); /* t5 = A - x3 */
  539. vli_modMult_fast(X1, X1, t5); /* t1 = B * (A - x3) */
  540. vli_modSub(t4, X1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */
  541. vli_set(X1, Z1);
  542. vli_set(Z1, Y1);
  543. vli_set(Y1, t4);
  544. }
  545. /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
  546. void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z)
  547. {
  548. uint64_t t1[NUM_ECC_DIGITS];
  549. vli_modSquare_fast(t1, Z); /* z^2 */
  550. vli_modMult_fast(X1, X1, t1); /* x1 * z^2 */
  551. vli_modMult_fast(t1, t1, Z); /* z^3 */
  552. vli_modMult_fast(Y1, Y1, t1); /* y1 * z^3 */
  553. }
  554. /* P = (x1, y1) => 2P, (x2, y2) => P' */
  555. void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2, uint64_t *p_initialZ)
  556. {
  557. uint64_t z[NUM_ECC_DIGITS];
  558. vli_set(X2, X1);
  559. vli_set(Y2, Y1);
  560. vli_clear(z);
  561. z[0] = 1;
  562. if(p_initialZ)
  563. {
  564. vli_set(z, p_initialZ);
  565. }
  566. apply_z(X1, Y1, z);
  567. EccPoint_double_jacobian(X1, Y1, z);
  568. apply_z(X2, Y2, z);
  569. }
  570. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  571. Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
  572. or P => P', Q => P + Q
  573. */
  574. void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  575. {
  576. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  577. uint64_t t5[NUM_ECC_DIGITS];
  578. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  579. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  580. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  581. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  582. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  583. vli_modSquare_fast(t5, Y2); /* t5 = (y2 - y1)^2 = D */
  584. vli_modSub(t5, t5, X1, curve_p); /* t5 = D - B */
  585. vli_modSub(t5, t5, X2, curve_p); /* t5 = D - B - C = x3 */
  586. vli_modSub(X2, X2, X1, curve_p); /* t3 = C - B */
  587. vli_modMult_fast(Y1, Y1, X2); /* t2 = y1*(C - B) */
  588. vli_modSub(X2, X1, t5, curve_p); /* t3 = B - x3 */
  589. vli_modMult_fast(Y2, Y2, X2); /* t4 = (y2 - y1)*(B - x3) */
  590. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  591. vli_set(X2, t5);
  592. }
  593. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  594. Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
  595. or P => P - Q, Q => P + Q
  596. */
  597. void XYcZ_addC(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  598. {
  599. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  600. uint64_t t5[NUM_ECC_DIGITS];
  601. uint64_t t6[NUM_ECC_DIGITS];
  602. uint64_t t7[NUM_ECC_DIGITS];
  603. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  604. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  605. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  606. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  607. vli_modAdd(t5, Y2, Y1, curve_p); /* t4 = y2 + y1 */
  608. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  609. vli_modSub(t6, X2, X1, curve_p); /* t6 = C - B */
  610. vli_modMult_fast(Y1, Y1, t6); /* t2 = y1 * (C - B) */
  611. vli_modAdd(t6, X1, X2, curve_p); /* t6 = B + C */
  612. vli_modSquare_fast(X2, Y2); /* t3 = (y2 - y1)^2 */
  613. vli_modSub(X2, X2, t6, curve_p); /* t3 = x3 */
  614. vli_modSub(t7, X1, X2, curve_p); /* t7 = B - x3 */
  615. vli_modMult_fast(Y2, Y2, t7); /* t4 = (y2 - y1)*(B - x3) */
  616. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  617. vli_modSquare_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */
  618. vli_modSub(t7, t7, t6, curve_p); /* t7 = x3' */
  619. vli_modSub(t6, t7, X1, curve_p); /* t6 = x3' - B */
  620. vli_modMult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */
  621. vli_modSub(Y1, t6, Y1, curve_p); /* t2 = y3' */
  622. vli_set(X1, t7);
  623. }
  624. void EccPoint_mult(EccPoint *p_result, const EccPoint *p_point, uint64_t *p_scalar, uint64_t *p_initialZ)
  625. {
  626. /* R0 and R1 */
  627. uint64_t Rx[2][NUM_ECC_DIGITS];
  628. uint64_t Ry[2][NUM_ECC_DIGITS];
  629. uint64_t z[NUM_ECC_DIGITS];
  630. int i, nb;
  631. vli_set(Rx[1], p_point->x);
  632. vli_set(Ry[1], p_point->y);
  633. XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ);
  634. for(i = (int)vli_numBits(p_scalar) - 2; i > 0; --i)
  635. {
  636. nb = !vli_testBit(p_scalar, i);
  637. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  638. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  639. }
  640. nb = !vli_testBit(p_scalar, 0);
  641. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  642. /* Find final 1/Z value. */
  643. vli_modSub(z, Rx[1], Rx[0], curve_p); /* X1 - X0 */
  644. vli_modMult_fast(z, z, Ry[1-nb]); /* Yb * (X1 - X0) */
  645. vli_modMult_fast(z, z, p_point->x); /* xP * Yb * (X1 - X0) */
  646. vli_modInv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */
  647. vli_modMult_fast(z, z, p_point->y); /* yP / (xP * Yb * (X1 - X0)) */
  648. vli_modMult_fast(z, z, Rx[1-nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */
  649. /* End 1/Z calculation */
  650. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  651. apply_z(Rx[0], Ry[0], z);
  652. vli_set(p_result->x, Rx[0]);
  653. vli_set(p_result->y, Ry[0]);
  654. }
  655. ZT_INLINE void ecc_bytes2native(uint64_t p_native[NUM_ECC_DIGITS],const uint8_t p_bytes[ECC_BYTES])
  656. {
  657. unsigned i;
  658. for(i=0; i<NUM_ECC_DIGITS; ++i)
  659. {
  660. const uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  661. p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) |
  662. ((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7];
  663. }
  664. }
  665. ZT_INLINE void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES],const uint64_t p_native[NUM_ECC_DIGITS])
  666. {
  667. unsigned i;
  668. for(i=0; i<NUM_ECC_DIGITS; ++i)
  669. {
  670. uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  671. p_digit[0] = p_native[i] >> 56;
  672. p_digit[1] = p_native[i] >> 48;
  673. p_digit[2] = p_native[i] >> 40;
  674. p_digit[3] = p_native[i] >> 32;
  675. p_digit[4] = p_native[i] >> 24;
  676. p_digit[5] = p_native[i] >> 16;
  677. p_digit[6] = p_native[i] >> 8;
  678. p_digit[7] = p_native[i];
  679. }
  680. }
  681. /* Compute a = sqrt(a) (mod curve_p). */
  682. void mod_sqrt(uint64_t a[NUM_ECC_DIGITS])
  683. {
  684. unsigned i;
  685. uint64_t p1[NUM_ECC_DIGITS] = {1};
  686. uint64_t l_result[NUM_ECC_DIGITS] = {1};
  687. /* Since curve_p == 3 (mod 4) for all supported curves, we can
  688. compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
  689. vli_add(p1, curve_p, p1); /* p1 = curve_p + 1 */
  690. for(i = vli_numBits(p1) - 1; i > 1; --i)
  691. {
  692. vli_modSquare_fast(l_result, l_result);
  693. if(vli_testBit(p1, i))
  694. {
  695. vli_modMult_fast(l_result, l_result, a);
  696. }
  697. }
  698. vli_set(a, l_result);
  699. }
  700. void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTES+1])
  701. {
  702. uint64_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */
  703. ecc_bytes2native(p_point->x, p_compressed+1);
  704. vli_modSquare_fast(p_point->y, p_point->x); /* y = x^2 */
  705. vli_modSub(p_point->y, p_point->y, _3, curve_p); /* y = x^2 - 3 */
  706. vli_modMult_fast(p_point->y, p_point->y, p_point->x); /* y = x^3 - 3x */
  707. vli_modAdd(p_point->y, p_point->y, curve_b, curve_p); /* y = x^3 - 3x + b */
  708. mod_sqrt(p_point->y);
  709. if((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01))
  710. {
  711. vli_sub(p_point->y, curve_p, p_point->y);
  712. }
  713. }
  714. ZT_INLINE int ecc_make_key(uint8_t p_publicKey[ECC_BYTES + 1],uint8_t p_privateKey[ECC_BYTES])
  715. {
  716. uint64_t l_private[NUM_ECC_DIGITS];
  717. EccPoint l_public;
  718. unsigned l_tries = 0;
  719. do
  720. {
  721. if(!getRandomNumber(l_private) || (l_tries++ >= MAX_TRIES))
  722. {
  723. return 0;
  724. }
  725. if(vli_isZero(l_private))
  726. {
  727. continue;
  728. }
  729. /* Make sure the private key is in the range [1, n-1].
  730. For the supported curves, n is always large enough that we only need to subtract once at most. */
  731. if(vli_cmp(curve_n, l_private) != 1)
  732. {
  733. vli_sub(l_private, l_private, curve_n);
  734. }
  735. EccPoint_mult(&l_public, &curve_G, l_private, NULL);
  736. } while(EccPoint_isZero(&l_public));
  737. ecc_native2bytes(p_privateKey, l_private);
  738. ecc_native2bytes(p_publicKey + 1, l_public.x);
  739. p_publicKey[0] = 2 + (l_public.y[0] & 0x01);
  740. return 1;
  741. }
  742. ZT_INLINE int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_t p_privateKey[ECC_BYTES],uint8_t p_secret[ECC_BYTES])
  743. {
  744. EccPoint l_public;
  745. uint64_t l_private[NUM_ECC_DIGITS];
  746. uint64_t l_random[NUM_ECC_DIGITS];
  747. if(!getRandomNumber(l_random))
  748. {
  749. return 0;
  750. }
  751. ecc_point_decompress(&l_public, p_publicKey);
  752. ecc_bytes2native(l_private, p_privateKey);
  753. EccPoint l_product;
  754. EccPoint_mult(&l_product, &l_public, l_private, l_random);
  755. ecc_native2bytes(p_secret, l_product.x);
  756. return !EccPoint_isZero(&l_product);
  757. }
  758. /* -------- ECDSA code -------- */
  759. /* Computes p_result = (p_left * p_right) % p_mod. */
  760. void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, const uint64_t *p_mod)
  761. {
  762. uint64_t l_product[2 * NUM_ECC_DIGITS];
  763. uint64_t l_modMultiple[2 * NUM_ECC_DIGITS];
  764. uint l_digitShift, l_bitShift;
  765. uint l_productBits;
  766. uint l_modBits = vli_numBits(p_mod);
  767. vli_mult(l_product, p_left, p_right);
  768. l_productBits = vli_numBits(l_product + NUM_ECC_DIGITS);
  769. if(l_productBits)
  770. {
  771. l_productBits += NUM_ECC_DIGITS * 64;
  772. }
  773. else
  774. {
  775. l_productBits = vli_numBits(l_product);
  776. }
  777. if(l_productBits < l_modBits)
  778. { /* l_product < p_mod. */
  779. vli_set(p_result, l_product);
  780. return;
  781. }
  782. /* Shift p_mod by (l_leftBits - l_modBits). This multiplies p_mod by the largest
  783. power of two possible while still resulting in a number less than p_left. */
  784. vli_clear(l_modMultiple);
  785. vli_clear(l_modMultiple + NUM_ECC_DIGITS);
  786. l_digitShift = (l_productBits - l_modBits) / 64;
  787. l_bitShift = (l_productBits - l_modBits) % 64;
  788. if(l_bitShift)
  789. {
  790. l_modMultiple[l_digitShift + NUM_ECC_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift);
  791. }
  792. else
  793. {
  794. vli_set(l_modMultiple + l_digitShift, p_mod);
  795. }
  796. /* Subtract all multiples of p_mod to get the remainder. */
  797. vli_clear(p_result);
  798. p_result[0] = 1; /* Use p_result as a temp var to store 1 (for subtraction) */
  799. while(l_productBits > NUM_ECC_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0)
  800. {
  801. int l_cmp = vli_cmp(l_modMultiple + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS);
  802. if(l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0))
  803. {
  804. if(vli_sub(l_product, l_product, l_modMultiple))
  805. { /* borrow */
  806. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, p_result);
  807. }
  808. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, l_modMultiple + NUM_ECC_DIGITS);
  809. }
  810. uint64_t l_carry = (l_modMultiple[NUM_ECC_DIGITS] & 0x01) << 63;
  811. vli_rshift1(l_modMultiple + NUM_ECC_DIGITS);
  812. vli_rshift1(l_modMultiple);
  813. l_modMultiple[NUM_ECC_DIGITS-1] |= l_carry;
  814. --l_productBits;
  815. }
  816. vli_set(p_result, l_product);
  817. }
  818. ZT_INLINE uint umax(uint a,uint b)
  819. {
  820. return (a > b ? a : b);
  821. }
  822. ZT_INLINE int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES],const uint8_t p_hash[ECC_BYTES],uint8_t p_signature[ECC_BYTES * 2])
  823. {
  824. uint64_t k[NUM_ECC_DIGITS];
  825. uint64_t l_tmp[NUM_ECC_DIGITS];
  826. uint64_t l_s[NUM_ECC_DIGITS];
  827. EccPoint p;
  828. unsigned l_tries = 0;
  829. do
  830. {
  831. if(!getRandomNumber(k) || (l_tries++ >= MAX_TRIES))
  832. {
  833. return 0;
  834. }
  835. if(vli_isZero(k))
  836. {
  837. continue;
  838. }
  839. if(vli_cmp(curve_n, k) != 1)
  840. {
  841. vli_sub(k, k, curve_n);
  842. }
  843. /* tmp = k * G */
  844. EccPoint_mult(&p, &curve_G, k, NULL);
  845. /* r = x1 (mod n) */
  846. if(vli_cmp(curve_n, p.x) != 1)
  847. {
  848. vli_sub(p.x, p.x, curve_n);
  849. }
  850. } while(vli_isZero(p.x));
  851. ecc_native2bytes(p_signature, p.x);
  852. ecc_bytes2native(l_tmp, p_privateKey);
  853. vli_modMult(l_s, p.x, l_tmp, curve_n); /* s = r*d */
  854. ecc_bytes2native(l_tmp, p_hash);
  855. vli_modAdd(l_s, l_tmp, l_s, curve_n); /* s = e + r*d */
  856. vli_modInv(k, k, curve_n); /* k = 1 / k */
  857. vli_modMult(l_s, l_s, k, curve_n); /* s = (e + r*d) / k */
  858. ecc_native2bytes(p_signature + ECC_BYTES, l_s);
  859. return 1;
  860. }
  861. ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_t p_hash[ECC_BYTES],const uint8_t p_signature[ECC_BYTES * 2])
  862. {
  863. uint64_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS];
  864. uint64_t z[NUM_ECC_DIGITS];
  865. EccPoint l_public, l_sum;
  866. uint64_t rx[NUM_ECC_DIGITS];
  867. uint64_t ry[NUM_ECC_DIGITS];
  868. uint64_t tx[NUM_ECC_DIGITS];
  869. uint64_t ty[NUM_ECC_DIGITS];
  870. uint64_t tz[NUM_ECC_DIGITS];
  871. uint64_t l_r[NUM_ECC_DIGITS], l_s[NUM_ECC_DIGITS];
  872. ecc_point_decompress(&l_public, p_publicKey);
  873. ecc_bytes2native(l_r, p_signature);
  874. ecc_bytes2native(l_s, p_signature + ECC_BYTES);
  875. if(vli_isZero(l_r) || vli_isZero(l_s))
  876. { /* r, s must not be 0. */
  877. return 0;
  878. }
  879. if(vli_cmp(curve_n, l_r) != 1 || vli_cmp(curve_n, l_s) != 1)
  880. { /* r, s must be < n. */
  881. return 0;
  882. }
  883. /* Calculate u1 and u2. */
  884. vli_modInv(z, l_s, curve_n); /* Z = s^-1 */
  885. ecc_bytes2native(u1, p_hash);
  886. vli_modMult(u1, u1, z, curve_n); /* u1 = e/s */
  887. vli_modMult(u2, l_r, z, curve_n); /* u2 = r/s */
  888. /* Calculate l_sum = G + Q. */
  889. vli_set(l_sum.x, l_public.x);
  890. vli_set(l_sum.y, l_public.y);
  891. vli_set(tx, curve_G.x);
  892. vli_set(ty, curve_G.y);
  893. vli_modSub(z, l_sum.x, tx, curve_p); /* Z = x2 - x1 */
  894. XYcZ_add(tx, ty, l_sum.x, l_sum.y);
  895. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  896. apply_z(l_sum.x, l_sum.y, z);
  897. /* Use Shamir's trick to calculate u1*G + u2*Q */
  898. const EccPoint *l_points[4] = {NULL, &curve_G, &l_public, &l_sum};
  899. uint l_numBits = umax(vli_numBits(u1), vli_numBits(u2));
  900. const EccPoint *l_point = l_points[(!!vli_testBit(u1, l_numBits-1)) | ((!!vli_testBit(u2, l_numBits-1)) << 1)];
  901. vli_set(rx, l_point->x);
  902. vli_set(ry, l_point->y);
  903. vli_clear(z);
  904. z[0] = 1;
  905. int i;
  906. for(i = l_numBits - 2; i >= 0; --i)
  907. {
  908. EccPoint_double_jacobian(rx, ry, z);
  909. int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1);
  910. const EccPoint *l_point = l_points[l_index];
  911. if(l_point)
  912. {
  913. vli_set(tx, l_point->x);
  914. vli_set(ty, l_point->y);
  915. apply_z(tx, ty, z);
  916. vli_modSub(tz, rx, tx, curve_p); /* Z = x2 - x1 */
  917. XYcZ_add(tx, ty, rx, ry);
  918. vli_modMult_fast(z, z, tz);
  919. }
  920. }
  921. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  922. apply_z(rx, ry, z);
  923. /* v = x1 (mod n) */
  924. if(vli_cmp(curve_n, rx) != 1)
  925. {
  926. vli_sub(rx, rx, curve_n);
  927. }
  928. /* Accept only if v == r. */
  929. return (vli_cmp(rx, l_r) == 0);
  930. }
  931. } // anonymous namespace
  932. void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE])
  933. {
  934. if (!ecc_make_key(pub,priv)) {
  935. fprintf(stderr,"FATAL: ecdsa_make_key() failed!" ZT_EOL_S);
  936. abort();
  937. }
  938. }
  939. void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  940. {
  941. if (!ecdsa_sign(priv,hash,sig)) {
  942. fprintf(stderr,"FATAL: ecdsa_sign() failed!" ZT_EOL_S);
  943. abort();
  944. }
  945. }
  946. bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  947. {
  948. return (ecdsa_verify(pub,hash,sig) != 0);
  949. }
  950. bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE])
  951. {
  952. return (ecdh_shared_secret(theirPub,ourPriv,secret) != 0);
  953. }
  954. } // namespace ZeroTier