ECC384.cpp 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2019 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * You can be released from the requirements of the license by purchasing
  21. * a commercial license. Buying such a license is mandatory as soon as you
  22. * develop commercial closed-source software that incorporates or links
  23. * directly against ZeroTier software without disclosing the source code
  24. * of your own application.
  25. */
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <stdint.h>
  29. #include <string.h>
  30. #include "Constants.hpp"
  31. #include "ECC384.hpp"
  32. #include "Utils.hpp"
  33. namespace ZeroTier {
  34. namespace {
  35. //////////////////////////////////////////////////////////////////////////////
  36. // This is EASY-ECC by Kenneth MacKay
  37. // https://github.com/esxgx/easy-ecc
  38. // This code is under the BSD 2-clause license, not ZeroTier's license
  39. //////////////////////////////////////////////////////////////////////////////
  40. //////////////////////////////////////////////////////////////////////////////
  41. // ecc.h from easy-ecc
  42. //////////////////////////////////////////////////////////////////////////////
  43. #define secp128r1 16
  44. #define secp192r1 24
  45. #define secp256r1 32
  46. #define secp384r1 48
  47. //#ifndef ECC_CURVE
  48. // #define ECC_CURVE secp256r1
  49. //#endif
  50. #define ECC_CURVE secp384r1
  51. //#if (ECC_CURVE != secp128r1 && ECC_CURVE != secp192r1 && ECC_CURVE != secp256r1 && ECC_CURVE != secp384r1)
  52. // #error "Must define ECC_CURVE to one of the available curves"
  53. //#endif
  54. #define ECC_BYTES ECC_CURVE
  55. //////////////////////////////////////////////////////////////////////////////
  56. // ecc.c from easy-ecc
  57. //////////////////////////////////////////////////////////////////////////////
  58. //#include "ecc.h"
  59. //#include <string.h>
  60. #define NUM_ECC_DIGITS (ECC_BYTES/8)
  61. #define MAX_TRIES 1024
  62. typedef unsigned int uint;
  63. #if defined(__SIZEOF_INT128__) || ((__clang_major__ * 100 + __clang_minor__) >= 302)
  64. #define SUPPORTS_INT128 1
  65. #else
  66. #define SUPPORTS_INT128 0
  67. #endif
  68. #if SUPPORTS_INT128
  69. typedef unsigned __int128 uint128_t;
  70. #else
  71. typedef struct
  72. {
  73. uint64_t m_low;
  74. uint64_t m_high;
  75. } uint128_t;
  76. #endif
  77. typedef struct EccPoint
  78. {
  79. uint64_t x[NUM_ECC_DIGITS];
  80. uint64_t y[NUM_ECC_DIGITS];
  81. } EccPoint;
  82. #define CONCAT1(a, b) a##b
  83. #define CONCAT(a, b) CONCAT1(a, b)
  84. #define Curve_P_16 {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFDFFFFFFFF}
  85. #define Curve_P_24 {0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull}
  86. #define Curve_P_32 {0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, 0x0000000000000000ull, 0xFFFFFFFF00000001ull}
  87. #define Curve_P_48 {0x00000000FFFFFFFF, 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  88. #define Curve_B_16 {0xD824993C2CEE5ED3, 0xE87579C11079F43D}
  89. #define Curve_B_24 {0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull, 0x64210519E59C80E7ull}
  90. #define Curve_B_32 {0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull, 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull}
  91. #define Curve_B_48 {0x2A85C8EDD3EC2AEF, 0xC656398D8A2ED19D, 0x0314088F5013875A, 0x181D9C6EFE814112, 0x988E056BE3F82D19, 0xB3312FA7E23EE7E4}
  92. #define Curve_G_16 { \
  93. {0x0C28607CA52C5B86, 0x161FF7528B899B2D}, \
  94. {0xC02DA292DDED7A83, 0xCF5AC8395BAFEB13}}
  95. #define Curve_G_24 { \
  96. {0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, 0x188DA80EB03090F6ull}, \
  97. {0x73F977A11E794811ull, 0x631011ED6B24CDD5ull, 0x07192B95FFC8DA78ull}}
  98. #define Curve_G_32 { \
  99. {0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull}, \
  100. {0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull}}
  101. #define Curve_G_48 { \
  102. {0x3A545E3872760AB7, 0x5502F25DBF55296C, 0x59F741E082542A38, 0x6E1D3B628BA79B98, 0x8EB1C71EF320AD74, 0xAA87CA22BE8B0537}, \
  103. {0x7A431D7C90EA0E5F, 0x0A60B1CE1D7E819D, 0xE9DA3113B5F0B8C0, 0xF8F41DBD289A147C, 0x5D9E98BF9292DC29, 0x3617DE4A96262C6F}}
  104. #define Curve_N_16 {0x75A30D1B9038A115, 0xFFFFFFFE00000000}
  105. #define Curve_N_24 {0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull, 0xFFFFFFFFFFFFFFFFull}
  106. #define Curve_N_32 {0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull}
  107. #define Curve_N_48 {0xECEC196ACCC52973, 0x581A0DB248B0A77A, 0xC7634D81F4372DDF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  108. static uint64_t curve_p[NUM_ECC_DIGITS] = CONCAT(Curve_P_, ECC_CURVE);
  109. static uint64_t curve_b[NUM_ECC_DIGITS] = CONCAT(Curve_B_, ECC_CURVE);
  110. static EccPoint curve_G = CONCAT(Curve_G_, ECC_CURVE);
  111. static uint64_t curve_n[NUM_ECC_DIGITS] = CONCAT(Curve_N_, ECC_CURVE);
  112. // Use ZeroTier's secure PRNG
  113. static inline int getRandomNumber(uint64_t *p_vli)
  114. {
  115. Utils::getSecureRandom(p_vli,ECC_BYTES);
  116. return 1;
  117. }
  118. static inline void vli_clear(uint64_t *p_vli)
  119. {
  120. uint i;
  121. for(i=0; i<NUM_ECC_DIGITS; ++i)
  122. {
  123. p_vli[i] = 0;
  124. }
  125. }
  126. /* Returns 1 if p_vli == 0, 0 otherwise. */
  127. static inline int vli_isZero(uint64_t *p_vli)
  128. {
  129. uint i;
  130. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  131. {
  132. if(p_vli[i])
  133. {
  134. return 0;
  135. }
  136. }
  137. return 1;
  138. }
  139. /* Returns nonzero if bit p_bit of p_vli is set. */
  140. static inline uint64_t vli_testBit(uint64_t *p_vli, uint p_bit)
  141. {
  142. return (p_vli[p_bit/64] & ((uint64_t)1 << (p_bit % 64)));
  143. }
  144. /* Counts the number of 64-bit "digits" in p_vli. */
  145. static inline uint vli_numDigits(uint64_t *p_vli)
  146. {
  147. int i;
  148. /* Search from the end until we find a non-zero digit.
  149. We do it in reverse because we expect that most digits will be nonzero. */
  150. for(i = NUM_ECC_DIGITS - 1; i >= 0 && p_vli[i] == 0; --i)
  151. {
  152. }
  153. return (i + 1);
  154. }
  155. /* Counts the number of bits required for p_vli. */
  156. static inline uint vli_numBits(uint64_t *p_vli)
  157. {
  158. uint i;
  159. uint64_t l_digit;
  160. uint l_numDigits = vli_numDigits(p_vli);
  161. if(l_numDigits == 0)
  162. {
  163. return 0;
  164. }
  165. l_digit = p_vli[l_numDigits - 1];
  166. for(i=0; l_digit; ++i)
  167. {
  168. l_digit >>= 1;
  169. }
  170. return ((l_numDigits - 1) * 64 + i);
  171. }
  172. /* Sets p_dest = p_src. */
  173. static inline void vli_set(uint64_t *p_dest, uint64_t *p_src)
  174. {
  175. uint i;
  176. for(i=0; i<NUM_ECC_DIGITS; ++i)
  177. {
  178. p_dest[i] = p_src[i];
  179. }
  180. }
  181. /* Returns sign of p_left - p_right. */
  182. static inline int vli_cmp(uint64_t *p_left, uint64_t *p_right)
  183. {
  184. int i;
  185. for(i = NUM_ECC_DIGITS-1; i >= 0; --i)
  186. {
  187. if(p_left[i] > p_right[i])
  188. {
  189. return 1;
  190. }
  191. else if(p_left[i] < p_right[i])
  192. {
  193. return -1;
  194. }
  195. }
  196. return 0;
  197. }
  198. /* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */
  199. static inline uint64_t vli_lshift(uint64_t *p_result, uint64_t *p_in, uint p_shift)
  200. {
  201. uint64_t l_carry = 0;
  202. uint i;
  203. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  204. {
  205. uint64_t l_temp = p_in[i];
  206. p_result[i] = (l_temp << p_shift) | l_carry;
  207. l_carry = l_temp >> (64 - p_shift);
  208. }
  209. return l_carry;
  210. }
  211. /* Computes p_vli = p_vli >> 1. */
  212. static inline void vli_rshift1(uint64_t *p_vli)
  213. {
  214. uint64_t *l_end = p_vli;
  215. uint64_t l_carry = 0;
  216. p_vli += NUM_ECC_DIGITS;
  217. while(p_vli-- > l_end)
  218. {
  219. uint64_t l_temp = *p_vli;
  220. *p_vli = (l_temp >> 1) | l_carry;
  221. l_carry = l_temp << 63;
  222. }
  223. }
  224. /* Computes p_result = p_left + p_right, returning carry. Can modify in place. */
  225. static inline uint64_t vli_add(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  226. {
  227. uint64_t l_carry = 0;
  228. uint i;
  229. for(i=0; i<NUM_ECC_DIGITS; ++i)
  230. {
  231. uint64_t l_sum = p_left[i] + p_right[i] + l_carry;
  232. if(l_sum != p_left[i])
  233. {
  234. l_carry = (l_sum < p_left[i]);
  235. }
  236. p_result[i] = l_sum;
  237. }
  238. return l_carry;
  239. }
  240. /* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */
  241. static inline uint64_t vli_sub(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  242. {
  243. uint64_t l_borrow = 0;
  244. uint i;
  245. for(i=0; i<NUM_ECC_DIGITS; ++i)
  246. {
  247. uint64_t l_diff = p_left[i] - p_right[i] - l_borrow;
  248. if(l_diff != p_left[i])
  249. {
  250. l_borrow = (l_diff > p_left[i]);
  251. }
  252. p_result[i] = l_diff;
  253. }
  254. return l_borrow;
  255. }
  256. #if SUPPORTS_INT128
  257. /* Computes p_result = p_left * p_right. */
  258. static inline void vli_mult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  259. {
  260. uint128_t r01 = 0;
  261. uint64_t r2 = 0;
  262. uint i, k;
  263. /* Compute each digit of p_result in sequence, maintaining the carries. */
  264. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  265. {
  266. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  267. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  268. {
  269. uint128_t l_product = (uint128_t)p_left[i] * p_right[k-i];
  270. r01 += l_product;
  271. r2 += (r01 < l_product);
  272. }
  273. p_result[k] = (uint64_t)r01;
  274. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  275. r2 = 0;
  276. }
  277. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  278. }
  279. /* Computes p_result = p_left^2. */
  280. static inline void vli_square(uint64_t *p_result, uint64_t *p_left)
  281. {
  282. uint128_t r01 = 0;
  283. uint64_t r2 = 0;
  284. uint i, k;
  285. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  286. {
  287. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  288. for(i=l_min; i<=k && i<=k-i; ++i)
  289. {
  290. uint128_t l_product = (uint128_t)p_left[i] * p_left[k-i];
  291. if(i < k-i)
  292. {
  293. r2 += l_product >> 127;
  294. l_product *= 2;
  295. }
  296. r01 += l_product;
  297. r2 += (r01 < l_product);
  298. }
  299. p_result[k] = (uint64_t)r01;
  300. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  301. r2 = 0;
  302. }
  303. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  304. }
  305. #else /* #if SUPPORTS_INT128 */
  306. static inline uint128_t mul_64_64(uint64_t p_left, uint64_t p_right)
  307. {
  308. uint128_t l_result;
  309. uint64_t a0 = p_left & 0xffffffffull;
  310. uint64_t a1 = p_left >> 32;
  311. uint64_t b0 = p_right & 0xffffffffull;
  312. uint64_t b1 = p_right >> 32;
  313. uint64_t m0 = a0 * b0;
  314. uint64_t m1 = a0 * b1;
  315. uint64_t m2 = a1 * b0;
  316. uint64_t m3 = a1 * b1;
  317. m2 += (m0 >> 32);
  318. m2 += m1;
  319. if(m2 < m1)
  320. { // overflow
  321. m3 += 0x100000000ull;
  322. }
  323. l_result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
  324. l_result.m_high = m3 + (m2 >> 32);
  325. return l_result;
  326. }
  327. static inline uint128_t add_128_128(uint128_t a, uint128_t b)
  328. {
  329. uint128_t l_result;
  330. l_result.m_low = a.m_low + b.m_low;
  331. l_result.m_high = a.m_high + b.m_high + (l_result.m_low < a.m_low);
  332. return l_result;
  333. }
  334. static inline void vli_mult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  335. {
  336. uint128_t r01 = {0, 0};
  337. uint64_t r2 = 0;
  338. uint i, k;
  339. /* Compute each digit of p_result in sequence, maintaining the carries. */
  340. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  341. {
  342. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  343. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  344. {
  345. uint128_t l_product = mul_64_64(p_left[i], p_right[k-i]);
  346. r01 = add_128_128(r01, l_product);
  347. r2 += (r01.m_high < l_product.m_high);
  348. }
  349. p_result[k] = r01.m_low;
  350. r01.m_low = r01.m_high;
  351. r01.m_high = r2;
  352. r2 = 0;
  353. }
  354. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  355. }
  356. static inline void vli_square(uint64_t *p_result, uint64_t *p_left)
  357. {
  358. uint128_t r01 = {0, 0};
  359. uint64_t r2 = 0;
  360. uint i, k;
  361. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  362. {
  363. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  364. for(i=l_min; i<=k && i<=k-i; ++i)
  365. {
  366. uint128_t l_product = mul_64_64(p_left[i], p_left[k-i]);
  367. if(i < k-i)
  368. {
  369. r2 += l_product.m_high >> 63;
  370. l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63);
  371. l_product.m_low <<= 1;
  372. }
  373. r01 = add_128_128(r01, l_product);
  374. r2 += (r01.m_high < l_product.m_high);
  375. }
  376. p_result[k] = r01.m_low;
  377. r01.m_low = r01.m_high;
  378. r01.m_high = r2;
  379. r2 = 0;
  380. }
  381. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  382. }
  383. #endif /* SUPPORTS_INT128 */
  384. /* Computes p_result = (p_left + p_right) % p_mod.
  385. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  386. static inline void vli_modAdd(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  387. {
  388. uint64_t l_carry = vli_add(p_result, p_left, p_right);
  389. if(l_carry || vli_cmp(p_result, p_mod) >= 0)
  390. { /* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
  391. vli_sub(p_result, p_result, p_mod);
  392. }
  393. }
  394. /* Computes p_result = (p_left - p_right) % p_mod.
  395. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  396. static inline void vli_modSub(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  397. {
  398. uint64_t l_borrow = vli_sub(p_result, p_left, p_right);
  399. if(l_borrow)
  400. { /* In this case, p_result == -diff == (max int) - diff.
  401. Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
  402. vli_add(p_result, p_result, p_mod);
  403. }
  404. }
  405. #if ECC_CURVE == secp128r1
  406. /* Computes p_result = p_product % curve_p.
  407. See algorithm 5 and 6 from http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf */
  408. static void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  409. {
  410. uint64_t l_tmp[NUM_ECC_DIGITS];
  411. int l_carry;
  412. vli_set(p_result, p_product);
  413. l_tmp[0] = p_product[2];
  414. l_tmp[1] = (p_product[3] & 0x1FFFFFFFFull) | (p_product[2] << 33);
  415. l_carry = vli_add(p_result, p_result, l_tmp);
  416. l_tmp[0] = (p_product[2] >> 31) | (p_product[3] << 33);
  417. l_tmp[1] = (p_product[3] >> 31) | ((p_product[2] & 0xFFFFFFFF80000000ull) << 2);
  418. l_carry += vli_add(p_result, p_result, l_tmp);
  419. l_tmp[0] = (p_product[2] >> 62) | (p_product[3] << 2);
  420. l_tmp[1] = (p_product[3] >> 62) | ((p_product[2] & 0xC000000000000000ull) >> 29) | (p_product[3] << 35);
  421. l_carry += vli_add(p_result, p_result, l_tmp);
  422. l_tmp[0] = (p_product[3] >> 29);
  423. l_tmp[1] = ((p_product[3] & 0xFFFFFFFFE0000000ull) << 4);
  424. l_carry += vli_add(p_result, p_result, l_tmp);
  425. l_tmp[0] = (p_product[3] >> 60);
  426. l_tmp[1] = (p_product[3] & 0xFFFFFFFE00000000ull);
  427. l_carry += vli_add(p_result, p_result, l_tmp);
  428. l_tmp[0] = 0;
  429. l_tmp[1] = ((p_product[3] & 0xF000000000000000ull) >> 27);
  430. l_carry += vli_add(p_result, p_result, l_tmp);
  431. while(l_carry || vli_cmp(curve_p, p_result) != 1)
  432. {
  433. l_carry -= vli_sub(p_result, p_result, curve_p);
  434. }
  435. }
  436. #elif ECC_CURVE == secp192r1
  437. /* Computes p_result = p_product % curve_p.
  438. See algorithm 5 and 6 from http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf */
  439. static void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  440. {
  441. uint64_t l_tmp[NUM_ECC_DIGITS];
  442. int l_carry;
  443. vli_set(p_result, p_product);
  444. vli_set(l_tmp, &p_product[3]);
  445. l_carry = vli_add(p_result, p_result, l_tmp);
  446. l_tmp[0] = 0;
  447. l_tmp[1] = p_product[3];
  448. l_tmp[2] = p_product[4];
  449. l_carry += vli_add(p_result, p_result, l_tmp);
  450. l_tmp[0] = l_tmp[1] = p_product[5];
  451. l_tmp[2] = 0;
  452. l_carry += vli_add(p_result, p_result, l_tmp);
  453. while(l_carry || vli_cmp(curve_p, p_result) != 1)
  454. {
  455. l_carry -= vli_sub(p_result, p_result, curve_p);
  456. }
  457. }
  458. #elif ECC_CURVE == secp256r1
  459. /* Computes p_result = p_product % curve_p
  460. from http://www.nsa.gov/ia/_files/nist-routines.pdf */
  461. static void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  462. {
  463. uint64_t l_tmp[NUM_ECC_DIGITS];
  464. int l_carry;
  465. /* t */
  466. vli_set(p_result, p_product);
  467. /* s1 */
  468. l_tmp[0] = 0;
  469. l_tmp[1] = p_product[5] & 0xffffffff00000000ull;
  470. l_tmp[2] = p_product[6];
  471. l_tmp[3] = p_product[7];
  472. l_carry = vli_lshift(l_tmp, l_tmp, 1);
  473. l_carry += vli_add(p_result, p_result, l_tmp);
  474. /* s2 */
  475. l_tmp[1] = p_product[6] << 32;
  476. l_tmp[2] = (p_product[6] >> 32) | (p_product[7] << 32);
  477. l_tmp[3] = p_product[7] >> 32;
  478. l_carry += vli_lshift(l_tmp, l_tmp, 1);
  479. l_carry += vli_add(p_result, p_result, l_tmp);
  480. /* s3 */
  481. l_tmp[0] = p_product[4];
  482. l_tmp[1] = p_product[5] & 0xffffffff;
  483. l_tmp[2] = 0;
  484. l_tmp[3] = p_product[7];
  485. l_carry += vli_add(p_result, p_result, l_tmp);
  486. /* s4 */
  487. l_tmp[0] = (p_product[4] >> 32) | (p_product[5] << 32);
  488. l_tmp[1] = (p_product[5] >> 32) | (p_product[6] & 0xffffffff00000000ull);
  489. l_tmp[2] = p_product[7];
  490. l_tmp[3] = (p_product[6] >> 32) | (p_product[4] << 32);
  491. l_carry += vli_add(p_result, p_result, l_tmp);
  492. /* d1 */
  493. l_tmp[0] = (p_product[5] >> 32) | (p_product[6] << 32);
  494. l_tmp[1] = (p_product[6] >> 32);
  495. l_tmp[2] = 0;
  496. l_tmp[3] = (p_product[4] & 0xffffffff) | (p_product[5] << 32);
  497. l_carry -= vli_sub(p_result, p_result, l_tmp);
  498. /* d2 */
  499. l_tmp[0] = p_product[6];
  500. l_tmp[1] = p_product[7];
  501. l_tmp[2] = 0;
  502. l_tmp[3] = (p_product[4] >> 32) | (p_product[5] & 0xffffffff00000000ull);
  503. l_carry -= vli_sub(p_result, p_result, l_tmp);
  504. /* d3 */
  505. l_tmp[0] = (p_product[6] >> 32) | (p_product[7] << 32);
  506. l_tmp[1] = (p_product[7] >> 32) | (p_product[4] << 32);
  507. l_tmp[2] = (p_product[4] >> 32) | (p_product[5] << 32);
  508. l_tmp[3] = (p_product[6] << 32);
  509. l_carry -= vli_sub(p_result, p_result, l_tmp);
  510. /* d4 */
  511. l_tmp[0] = p_product[7];
  512. l_tmp[1] = p_product[4] & 0xffffffff00000000ull;
  513. l_tmp[2] = p_product[5];
  514. l_tmp[3] = p_product[6] & 0xffffffff00000000ull;
  515. l_carry -= vli_sub(p_result, p_result, l_tmp);
  516. if(l_carry < 0)
  517. {
  518. do
  519. {
  520. l_carry += vli_add(p_result, p_result, curve_p);
  521. } while(l_carry < 0);
  522. }
  523. else
  524. {
  525. while(l_carry || vli_cmp(curve_p, p_result) != 1)
  526. {
  527. l_carry -= vli_sub(p_result, p_result, curve_p);
  528. }
  529. }
  530. }
  531. #elif ECC_CURVE == secp384r1
  532. static inline void omega_mult(uint64_t *p_result, uint64_t *p_right)
  533. {
  534. uint64_t l_tmp[NUM_ECC_DIGITS];
  535. uint64_t l_carry, l_diff;
  536. /* Multiply by (2^128 + 2^96 - 2^32 + 1). */
  537. vli_set(p_result, p_right); /* 1 */
  538. l_carry = vli_lshift(l_tmp, p_right, 32);
  539. p_result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp); /* 2^96 + 1 */
  540. p_result[2 + NUM_ECC_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right); /* 2^128 + 2^96 + 1 */
  541. l_carry += vli_sub(p_result, p_result, l_tmp); /* 2^128 + 2^96 - 2^32 + 1 */
  542. l_diff = p_result[NUM_ECC_DIGITS] - l_carry;
  543. if(l_diff > p_result[NUM_ECC_DIGITS])
  544. { /* Propagate borrow if necessary. */
  545. uint i;
  546. for(i = 1 + NUM_ECC_DIGITS; ; ++i)
  547. {
  548. --p_result[i];
  549. if(p_result[i] != (uint64_t)-1)
  550. {
  551. break;
  552. }
  553. }
  554. }
  555. p_result[NUM_ECC_DIGITS] = l_diff;
  556. }
  557. /* Computes p_result = p_product % curve_p
  558. see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
  559. section "Curve-Specific Optimizations" */
  560. static inline void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  561. {
  562. uint64_t l_tmp[2*NUM_ECC_DIGITS];
  563. while(!vli_isZero(p_product + NUM_ECC_DIGITS)) /* While c1 != 0 */
  564. {
  565. uint64_t l_carry = 0;
  566. uint i;
  567. vli_clear(l_tmp);
  568. vli_clear(l_tmp + NUM_ECC_DIGITS);
  569. omega_mult(l_tmp, p_product + NUM_ECC_DIGITS); /* tmp = w * c1 */
  570. vli_clear(p_product + NUM_ECC_DIGITS); /* p = c0 */
  571. /* (c1, c0) = c0 + w * c1 */
  572. for(i=0; i<NUM_ECC_DIGITS+3; ++i)
  573. {
  574. uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry;
  575. if(l_sum != p_product[i])
  576. {
  577. l_carry = (l_sum < p_product[i]);
  578. }
  579. p_product[i] = l_sum;
  580. }
  581. }
  582. while(vli_cmp(p_product, curve_p) > 0)
  583. {
  584. vli_sub(p_product, p_product, curve_p);
  585. }
  586. vli_set(p_result, p_product);
  587. }
  588. #endif
  589. /* Computes p_result = (p_left * p_right) % curve_p. */
  590. static inline void vli_modMult_fast(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  591. {
  592. uint64_t l_product[2 * NUM_ECC_DIGITS];
  593. vli_mult(l_product, p_left, p_right);
  594. vli_mmod_fast(p_result, l_product);
  595. }
  596. /* Computes p_result = p_left^2 % curve_p. */
  597. static inline void vli_modSquare_fast(uint64_t *p_result, uint64_t *p_left)
  598. {
  599. uint64_t l_product[2 * NUM_ECC_DIGITS];
  600. vli_square(l_product, p_left);
  601. vli_mmod_fast(p_result, l_product);
  602. }
  603. #define EVEN(vli) (!(vli[0] & 1))
  604. /* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
  605. See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
  606. https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
  607. static inline void vli_modInv(uint64_t *p_result, uint64_t *p_input, uint64_t *p_mod)
  608. {
  609. uint64_t a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS], u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS];
  610. uint64_t l_carry;
  611. int l_cmpResult;
  612. if(vli_isZero(p_input))
  613. {
  614. vli_clear(p_result);
  615. return;
  616. }
  617. vli_set(a, p_input);
  618. vli_set(b, p_mod);
  619. vli_clear(u);
  620. u[0] = 1;
  621. vli_clear(v);
  622. while((l_cmpResult = vli_cmp(a, b)) != 0)
  623. {
  624. l_carry = 0;
  625. if(EVEN(a))
  626. {
  627. vli_rshift1(a);
  628. if(!EVEN(u))
  629. {
  630. l_carry = vli_add(u, u, p_mod);
  631. }
  632. vli_rshift1(u);
  633. if(l_carry)
  634. {
  635. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  636. }
  637. }
  638. else if(EVEN(b))
  639. {
  640. vli_rshift1(b);
  641. if(!EVEN(v))
  642. {
  643. l_carry = vli_add(v, v, p_mod);
  644. }
  645. vli_rshift1(v);
  646. if(l_carry)
  647. {
  648. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  649. }
  650. }
  651. else if(l_cmpResult > 0)
  652. {
  653. vli_sub(a, a, b);
  654. vli_rshift1(a);
  655. if(vli_cmp(u, v) < 0)
  656. {
  657. vli_add(u, u, p_mod);
  658. }
  659. vli_sub(u, u, v);
  660. if(!EVEN(u))
  661. {
  662. l_carry = vli_add(u, u, p_mod);
  663. }
  664. vli_rshift1(u);
  665. if(l_carry)
  666. {
  667. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  668. }
  669. }
  670. else
  671. {
  672. vli_sub(b, b, a);
  673. vli_rshift1(b);
  674. if(vli_cmp(v, u) < 0)
  675. {
  676. vli_add(v, v, p_mod);
  677. }
  678. vli_sub(v, v, u);
  679. if(!EVEN(v))
  680. {
  681. l_carry = vli_add(v, v, p_mod);
  682. }
  683. vli_rshift1(v);
  684. if(l_carry)
  685. {
  686. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  687. }
  688. }
  689. }
  690. vli_set(p_result, u);
  691. }
  692. /* ------ Point operations ------ */
  693. /* Returns 1 if p_point is the point at infinity, 0 otherwise. */
  694. static inline int EccPoint_isZero(EccPoint *p_point)
  695. {
  696. return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
  697. }
  698. /* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
  699. From http://eprint.iacr.org/2011/338.pdf
  700. */
  701. /* Double in place */
  702. static inline void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
  703. {
  704. /* t1 = X, t2 = Y, t3 = Z */
  705. uint64_t t4[NUM_ECC_DIGITS];
  706. uint64_t t5[NUM_ECC_DIGITS];
  707. if(vli_isZero(Z1))
  708. {
  709. return;
  710. }
  711. vli_modSquare_fast(t4, Y1); /* t4 = y1^2 */
  712. vli_modMult_fast(t5, X1, t4); /* t5 = x1*y1^2 = A */
  713. vli_modSquare_fast(t4, t4); /* t4 = y1^4 */
  714. vli_modMult_fast(Y1, Y1, Z1); /* t2 = y1*z1 = z3 */
  715. vli_modSquare_fast(Z1, Z1); /* t3 = z1^2 */
  716. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = x1 + z1^2 */
  717. vli_modAdd(Z1, Z1, Z1, curve_p); /* t3 = 2*z1^2 */
  718. vli_modSub(Z1, X1, Z1, curve_p); /* t3 = x1 - z1^2 */
  719. vli_modMult_fast(X1, X1, Z1); /* t1 = x1^2 - z1^4 */
  720. vli_modAdd(Z1, X1, X1, curve_p); /* t3 = 2*(x1^2 - z1^4) */
  721. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */
  722. if(vli_testBit(X1, 0))
  723. {
  724. uint64_t l_carry = vli_add(X1, X1, curve_p);
  725. vli_rshift1(X1);
  726. X1[NUM_ECC_DIGITS-1] |= l_carry << 63;
  727. }
  728. else
  729. {
  730. vli_rshift1(X1);
  731. }
  732. /* t1 = 3/2*(x1^2 - z1^4) = B */
  733. vli_modSquare_fast(Z1, X1); /* t3 = B^2 */
  734. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - A */
  735. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */
  736. vli_modSub(t5, t5, Z1, curve_p); /* t5 = A - x3 */
  737. vli_modMult_fast(X1, X1, t5); /* t1 = B * (A - x3) */
  738. vli_modSub(t4, X1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */
  739. vli_set(X1, Z1);
  740. vli_set(Z1, Y1);
  741. vli_set(Y1, t4);
  742. }
  743. /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
  744. static inline void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z)
  745. {
  746. uint64_t t1[NUM_ECC_DIGITS];
  747. vli_modSquare_fast(t1, Z); /* z^2 */
  748. vli_modMult_fast(X1, X1, t1); /* x1 * z^2 */
  749. vli_modMult_fast(t1, t1, Z); /* z^3 */
  750. vli_modMult_fast(Y1, Y1, t1); /* y1 * z^3 */
  751. }
  752. /* P = (x1, y1) => 2P, (x2, y2) => P' */
  753. static inline void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2, uint64_t *p_initialZ)
  754. {
  755. uint64_t z[NUM_ECC_DIGITS];
  756. vli_set(X2, X1);
  757. vli_set(Y2, Y1);
  758. vli_clear(z);
  759. z[0] = 1;
  760. if(p_initialZ)
  761. {
  762. vli_set(z, p_initialZ);
  763. }
  764. apply_z(X1, Y1, z);
  765. EccPoint_double_jacobian(X1, Y1, z);
  766. apply_z(X2, Y2, z);
  767. }
  768. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  769. Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
  770. or P => P', Q => P + Q
  771. */
  772. static inline void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  773. {
  774. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  775. uint64_t t5[NUM_ECC_DIGITS];
  776. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  777. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  778. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  779. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  780. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  781. vli_modSquare_fast(t5, Y2); /* t5 = (y2 - y1)^2 = D */
  782. vli_modSub(t5, t5, X1, curve_p); /* t5 = D - B */
  783. vli_modSub(t5, t5, X2, curve_p); /* t5 = D - B - C = x3 */
  784. vli_modSub(X2, X2, X1, curve_p); /* t3 = C - B */
  785. vli_modMult_fast(Y1, Y1, X2); /* t2 = y1*(C - B) */
  786. vli_modSub(X2, X1, t5, curve_p); /* t3 = B - x3 */
  787. vli_modMult_fast(Y2, Y2, X2); /* t4 = (y2 - y1)*(B - x3) */
  788. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  789. vli_set(X2, t5);
  790. }
  791. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  792. Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
  793. or P => P - Q, Q => P + Q
  794. */
  795. static inline void XYcZ_addC(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  796. {
  797. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  798. uint64_t t5[NUM_ECC_DIGITS];
  799. uint64_t t6[NUM_ECC_DIGITS];
  800. uint64_t t7[NUM_ECC_DIGITS];
  801. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  802. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  803. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  804. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  805. vli_modAdd(t5, Y2, Y1, curve_p); /* t4 = y2 + y1 */
  806. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  807. vli_modSub(t6, X2, X1, curve_p); /* t6 = C - B */
  808. vli_modMult_fast(Y1, Y1, t6); /* t2 = y1 * (C - B) */
  809. vli_modAdd(t6, X1, X2, curve_p); /* t6 = B + C */
  810. vli_modSquare_fast(X2, Y2); /* t3 = (y2 - y1)^2 */
  811. vli_modSub(X2, X2, t6, curve_p); /* t3 = x3 */
  812. vli_modSub(t7, X1, X2, curve_p); /* t7 = B - x3 */
  813. vli_modMult_fast(Y2, Y2, t7); /* t4 = (y2 - y1)*(B - x3) */
  814. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  815. vli_modSquare_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */
  816. vli_modSub(t7, t7, t6, curve_p); /* t7 = x3' */
  817. vli_modSub(t6, t7, X1, curve_p); /* t6 = x3' - B */
  818. vli_modMult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */
  819. vli_modSub(Y1, t6, Y1, curve_p); /* t2 = y3' */
  820. vli_set(X1, t7);
  821. }
  822. static inline void EccPoint_mult(EccPoint *p_result, EccPoint *p_point, uint64_t *p_scalar, uint64_t *p_initialZ)
  823. {
  824. /* R0 and R1 */
  825. uint64_t Rx[2][NUM_ECC_DIGITS];
  826. uint64_t Ry[2][NUM_ECC_DIGITS];
  827. uint64_t z[NUM_ECC_DIGITS];
  828. int i, nb;
  829. vli_set(Rx[1], p_point->x);
  830. vli_set(Ry[1], p_point->y);
  831. XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ);
  832. for(i = vli_numBits(p_scalar) - 2; i > 0; --i)
  833. {
  834. nb = !vli_testBit(p_scalar, i);
  835. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  836. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  837. }
  838. nb = !vli_testBit(p_scalar, 0);
  839. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  840. /* Find final 1/Z value. */
  841. vli_modSub(z, Rx[1], Rx[0], curve_p); /* X1 - X0 */
  842. vli_modMult_fast(z, z, Ry[1-nb]); /* Yb * (X1 - X0) */
  843. vli_modMult_fast(z, z, p_point->x); /* xP * Yb * (X1 - X0) */
  844. vli_modInv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */
  845. vli_modMult_fast(z, z, p_point->y); /* yP / (xP * Yb * (X1 - X0)) */
  846. vli_modMult_fast(z, z, Rx[1-nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */
  847. /* End 1/Z calculation */
  848. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  849. apply_z(Rx[0], Ry[0], z);
  850. vli_set(p_result->x, Rx[0]);
  851. vli_set(p_result->y, Ry[0]);
  852. }
  853. static inline void ecc_bytes2native(uint64_t p_native[NUM_ECC_DIGITS], const uint8_t p_bytes[ECC_BYTES])
  854. {
  855. unsigned i;
  856. for(i=0; i<NUM_ECC_DIGITS; ++i)
  857. {
  858. const uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  859. p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) |
  860. ((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7];
  861. }
  862. }
  863. static inline void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES], const uint64_t p_native[NUM_ECC_DIGITS])
  864. {
  865. unsigned i;
  866. for(i=0; i<NUM_ECC_DIGITS; ++i)
  867. {
  868. uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  869. p_digit[0] = p_native[i] >> 56;
  870. p_digit[1] = p_native[i] >> 48;
  871. p_digit[2] = p_native[i] >> 40;
  872. p_digit[3] = p_native[i] >> 32;
  873. p_digit[4] = p_native[i] >> 24;
  874. p_digit[5] = p_native[i] >> 16;
  875. p_digit[6] = p_native[i] >> 8;
  876. p_digit[7] = p_native[i];
  877. }
  878. }
  879. /* Compute a = sqrt(a) (mod curve_p). */
  880. static inline void mod_sqrt(uint64_t a[NUM_ECC_DIGITS])
  881. {
  882. unsigned i;
  883. uint64_t p1[NUM_ECC_DIGITS] = {1};
  884. uint64_t l_result[NUM_ECC_DIGITS] = {1};
  885. /* Since curve_p == 3 (mod 4) for all supported curves, we can
  886. compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
  887. vli_add(p1, curve_p, p1); /* p1 = curve_p + 1 */
  888. for(i = vli_numBits(p1) - 1; i > 1; --i)
  889. {
  890. vli_modSquare_fast(l_result, l_result);
  891. if(vli_testBit(p1, i))
  892. {
  893. vli_modMult_fast(l_result, l_result, a);
  894. }
  895. }
  896. vli_set(a, l_result);
  897. }
  898. static inline void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTES+1])
  899. {
  900. uint64_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */
  901. ecc_bytes2native(p_point->x, p_compressed+1);
  902. vli_modSquare_fast(p_point->y, p_point->x); /* y = x^2 */
  903. vli_modSub(p_point->y, p_point->y, _3, curve_p); /* y = x^2 - 3 */
  904. vli_modMult_fast(p_point->y, p_point->y, p_point->x); /* y = x^3 - 3x */
  905. vli_modAdd(p_point->y, p_point->y, curve_b, curve_p); /* y = x^3 - 3x + b */
  906. mod_sqrt(p_point->y);
  907. if((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01))
  908. {
  909. vli_sub(p_point->y, curve_p, p_point->y);
  910. }
  911. }
  912. static inline int ecc_make_key(uint8_t p_publicKey[ECC_BYTES+1], uint8_t p_privateKey[ECC_BYTES])
  913. {
  914. uint64_t l_private[NUM_ECC_DIGITS];
  915. EccPoint l_public;
  916. unsigned l_tries = 0;
  917. do
  918. {
  919. if(!getRandomNumber(l_private) || (l_tries++ >= MAX_TRIES))
  920. {
  921. return 0;
  922. }
  923. if(vli_isZero(l_private))
  924. {
  925. continue;
  926. }
  927. /* Make sure the private key is in the range [1, n-1].
  928. For the supported curves, n is always large enough that we only need to subtract once at most. */
  929. if(vli_cmp(curve_n, l_private) != 1)
  930. {
  931. vli_sub(l_private, l_private, curve_n);
  932. }
  933. EccPoint_mult(&l_public, &curve_G, l_private, NULL);
  934. } while(EccPoint_isZero(&l_public));
  935. ecc_native2bytes(p_privateKey, l_private);
  936. ecc_native2bytes(p_publicKey + 1, l_public.x);
  937. p_publicKey[0] = 2 + (l_public.y[0] & 0x01);
  938. return 1;
  939. }
  940. static inline int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES+1], const uint8_t p_privateKey[ECC_BYTES], uint8_t p_secret[ECC_BYTES])
  941. {
  942. EccPoint l_public;
  943. uint64_t l_private[NUM_ECC_DIGITS];
  944. uint64_t l_random[NUM_ECC_DIGITS];
  945. if(!getRandomNumber(l_random))
  946. {
  947. return 0;
  948. }
  949. ecc_point_decompress(&l_public, p_publicKey);
  950. ecc_bytes2native(l_private, p_privateKey);
  951. EccPoint l_product;
  952. EccPoint_mult(&l_product, &l_public, l_private, l_random);
  953. ecc_native2bytes(p_secret, l_product.x);
  954. return !EccPoint_isZero(&l_product);
  955. }
  956. /* -------- ECDSA code -------- */
  957. /* Computes p_result = (p_left * p_right) % p_mod. */
  958. static inline void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  959. {
  960. uint64_t l_product[2 * NUM_ECC_DIGITS];
  961. uint64_t l_modMultiple[2 * NUM_ECC_DIGITS];
  962. uint l_digitShift, l_bitShift;
  963. uint l_productBits;
  964. uint l_modBits = vli_numBits(p_mod);
  965. vli_mult(l_product, p_left, p_right);
  966. l_productBits = vli_numBits(l_product + NUM_ECC_DIGITS);
  967. if(l_productBits)
  968. {
  969. l_productBits += NUM_ECC_DIGITS * 64;
  970. }
  971. else
  972. {
  973. l_productBits = vli_numBits(l_product);
  974. }
  975. if(l_productBits < l_modBits)
  976. { /* l_product < p_mod. */
  977. vli_set(p_result, l_product);
  978. return;
  979. }
  980. /* Shift p_mod by (l_leftBits - l_modBits). This multiplies p_mod by the largest
  981. power of two possible while still resulting in a number less than p_left. */
  982. vli_clear(l_modMultiple);
  983. vli_clear(l_modMultiple + NUM_ECC_DIGITS);
  984. l_digitShift = (l_productBits - l_modBits) / 64;
  985. l_bitShift = (l_productBits - l_modBits) % 64;
  986. if(l_bitShift)
  987. {
  988. l_modMultiple[l_digitShift + NUM_ECC_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift);
  989. }
  990. else
  991. {
  992. vli_set(l_modMultiple + l_digitShift, p_mod);
  993. }
  994. /* Subtract all multiples of p_mod to get the remainder. */
  995. vli_clear(p_result);
  996. p_result[0] = 1; /* Use p_result as a temp var to store 1 (for subtraction) */
  997. while(l_productBits > NUM_ECC_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0)
  998. {
  999. int l_cmp = vli_cmp(l_modMultiple + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS);
  1000. if(l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0))
  1001. {
  1002. if(vli_sub(l_product, l_product, l_modMultiple))
  1003. { /* borrow */
  1004. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, p_result);
  1005. }
  1006. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, l_modMultiple + NUM_ECC_DIGITS);
  1007. }
  1008. uint64_t l_carry = (l_modMultiple[NUM_ECC_DIGITS] & 0x01) << 63;
  1009. vli_rshift1(l_modMultiple + NUM_ECC_DIGITS);
  1010. vli_rshift1(l_modMultiple);
  1011. l_modMultiple[NUM_ECC_DIGITS-1] |= l_carry;
  1012. --l_productBits;
  1013. }
  1014. vli_set(p_result, l_product);
  1015. }
  1016. static inline uint umax(uint a, uint b)
  1017. {
  1018. return (a > b ? a : b);
  1019. }
  1020. static inline int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES], const uint8_t p_hash[ECC_BYTES], uint8_t p_signature[ECC_BYTES*2])
  1021. {
  1022. uint64_t k[NUM_ECC_DIGITS];
  1023. uint64_t l_tmp[NUM_ECC_DIGITS];
  1024. uint64_t l_s[NUM_ECC_DIGITS];
  1025. EccPoint p;
  1026. unsigned l_tries = 0;
  1027. do
  1028. {
  1029. if(!getRandomNumber(k) || (l_tries++ >= MAX_TRIES))
  1030. {
  1031. return 0;
  1032. }
  1033. if(vli_isZero(k))
  1034. {
  1035. continue;
  1036. }
  1037. if(vli_cmp(curve_n, k) != 1)
  1038. {
  1039. vli_sub(k, k, curve_n);
  1040. }
  1041. /* tmp = k * G */
  1042. EccPoint_mult(&p, &curve_G, k, NULL);
  1043. /* r = x1 (mod n) */
  1044. if(vli_cmp(curve_n, p.x) != 1)
  1045. {
  1046. vli_sub(p.x, p.x, curve_n);
  1047. }
  1048. } while(vli_isZero(p.x));
  1049. ecc_native2bytes(p_signature, p.x);
  1050. ecc_bytes2native(l_tmp, p_privateKey);
  1051. vli_modMult(l_s, p.x, l_tmp, curve_n); /* s = r*d */
  1052. ecc_bytes2native(l_tmp, p_hash);
  1053. vli_modAdd(l_s, l_tmp, l_s, curve_n); /* s = e + r*d */
  1054. vli_modInv(k, k, curve_n); /* k = 1 / k */
  1055. vli_modMult(l_s, l_s, k, curve_n); /* s = (e + r*d) / k */
  1056. ecc_native2bytes(p_signature + ECC_BYTES, l_s);
  1057. return 1;
  1058. }
  1059. static inline int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES+1], const uint8_t p_hash[ECC_BYTES], const uint8_t p_signature[ECC_BYTES*2])
  1060. {
  1061. uint64_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS];
  1062. uint64_t z[NUM_ECC_DIGITS];
  1063. EccPoint l_public, l_sum;
  1064. uint64_t rx[NUM_ECC_DIGITS];
  1065. uint64_t ry[NUM_ECC_DIGITS];
  1066. uint64_t tx[NUM_ECC_DIGITS];
  1067. uint64_t ty[NUM_ECC_DIGITS];
  1068. uint64_t tz[NUM_ECC_DIGITS];
  1069. uint64_t l_r[NUM_ECC_DIGITS], l_s[NUM_ECC_DIGITS];
  1070. ecc_point_decompress(&l_public, p_publicKey);
  1071. ecc_bytes2native(l_r, p_signature);
  1072. ecc_bytes2native(l_s, p_signature + ECC_BYTES);
  1073. if(vli_isZero(l_r) || vli_isZero(l_s))
  1074. { /* r, s must not be 0. */
  1075. return 0;
  1076. }
  1077. if(vli_cmp(curve_n, l_r) != 1 || vli_cmp(curve_n, l_s) != 1)
  1078. { /* r, s must be < n. */
  1079. return 0;
  1080. }
  1081. /* Calculate u1 and u2. */
  1082. vli_modInv(z, l_s, curve_n); /* Z = s^-1 */
  1083. ecc_bytes2native(u1, p_hash);
  1084. vli_modMult(u1, u1, z, curve_n); /* u1 = e/s */
  1085. vli_modMult(u2, l_r, z, curve_n); /* u2 = r/s */
  1086. /* Calculate l_sum = G + Q. */
  1087. vli_set(l_sum.x, l_public.x);
  1088. vli_set(l_sum.y, l_public.y);
  1089. vli_set(tx, curve_G.x);
  1090. vli_set(ty, curve_G.y);
  1091. vli_modSub(z, l_sum.x, tx, curve_p); /* Z = x2 - x1 */
  1092. XYcZ_add(tx, ty, l_sum.x, l_sum.y);
  1093. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  1094. apply_z(l_sum.x, l_sum.y, z);
  1095. /* Use Shamir's trick to calculate u1*G + u2*Q */
  1096. EccPoint *l_points[4] = {NULL, &curve_G, &l_public, &l_sum};
  1097. uint l_numBits = umax(vli_numBits(u1), vli_numBits(u2));
  1098. EccPoint *l_point = l_points[(!!vli_testBit(u1, l_numBits-1)) | ((!!vli_testBit(u2, l_numBits-1)) << 1)];
  1099. vli_set(rx, l_point->x);
  1100. vli_set(ry, l_point->y);
  1101. vli_clear(z);
  1102. z[0] = 1;
  1103. int i;
  1104. for(i = l_numBits - 2; i >= 0; --i)
  1105. {
  1106. EccPoint_double_jacobian(rx, ry, z);
  1107. int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1);
  1108. EccPoint *l_point = l_points[l_index];
  1109. if(l_point)
  1110. {
  1111. vli_set(tx, l_point->x);
  1112. vli_set(ty, l_point->y);
  1113. apply_z(tx, ty, z);
  1114. vli_modSub(tz, rx, tx, curve_p); /* Z = x2 - x1 */
  1115. XYcZ_add(tx, ty, rx, ry);
  1116. vli_modMult_fast(z, z, tz);
  1117. }
  1118. }
  1119. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  1120. apply_z(rx, ry, z);
  1121. /* v = x1 (mod n) */
  1122. if(vli_cmp(curve_n, rx) != 1)
  1123. {
  1124. vli_sub(rx, rx, curve_n);
  1125. }
  1126. /* Accept only if v == r. */
  1127. return (vli_cmp(rx, l_r) == 0);
  1128. }
  1129. //////////////////////////////////////////////////////////////////////////////
  1130. //////////////////////////////////////////////////////////////////////////////
  1131. //////////////////////////////////////////////////////////////////////////////
  1132. } // anonymous namespace
  1133. void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE])
  1134. {
  1135. if (!ecc_make_key(pub,priv)) {
  1136. fprintf(stderr,"FATAL: ecdsa_make_key() failed!" ZT_EOL_S);
  1137. abort();
  1138. }
  1139. }
  1140. void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  1141. {
  1142. if (!ecdsa_sign(priv,hash,sig)) {
  1143. fprintf(stderr,"FATAL: ecdsa_sign() failed!" ZT_EOL_S);
  1144. abort();
  1145. }
  1146. }
  1147. bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  1148. {
  1149. return (ecdsa_verify(pub,hash,sig) != 0);
  1150. }
  1151. bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE])
  1152. {
  1153. return (ecdh_shared_secret(theirPub,ourPriv,secret) != 0);
  1154. }
  1155. } // namespace ZeroTier