ECC384.cpp 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. //////////////////////////////////////////////////////////////////////////////
  2. // This is EASY-ECC by Kenneth MacKay
  3. // https://github.com/esxgx/easy-ecc
  4. // This code is under the BSD 2-clause license, not ZeroTier's license
  5. //////////////////////////////////////////////////////////////////////////////
  6. #include <stdio.h>
  7. #include <stdlib.h>
  8. #include <stdint.h>
  9. #include <string.h>
  10. #include "Constants.hpp"
  11. #include "ECC384.hpp"
  12. #include "Utils.hpp"
  13. namespace ZeroTier {
  14. namespace {
  15. //////////////////////////////////////////////////////////////////////////////
  16. // ecc.h from easy-ecc
  17. //////////////////////////////////////////////////////////////////////////////
  18. #define secp128r1 16
  19. #define secp192r1 24
  20. #define secp256r1 32
  21. #define secp384r1 48
  22. //#ifndef ECC_CURVE
  23. // #define ECC_CURVE secp256r1
  24. //#endif
  25. #define ECC_CURVE secp384r1
  26. //#if (ECC_CURVE != secp128r1 && ECC_CURVE != secp192r1 && ECC_CURVE != secp256r1 && ECC_CURVE != secp384r1)
  27. // #error "Must define ECC_CURVE to one of the available curves"
  28. //#endif
  29. #define ECC_BYTES ECC_CURVE
  30. //////////////////////////////////////////////////////////////////////////////
  31. // ecc.c from easy-ecc
  32. //////////////////////////////////////////////////////////////////////////////
  33. //#include "ecc.h"
  34. //#include <string.h>
  35. #define NUM_ECC_DIGITS (ECC_BYTES/8)
  36. #define MAX_TRIES 1024
  37. typedef unsigned int uint;
  38. #if defined(__SIZEOF_INT128__) || ((__clang_major__ * 100 + __clang_minor__) >= 302)
  39. #define SUPPORTS_INT128 1
  40. #else
  41. #define SUPPORTS_INT128 0
  42. #endif
  43. #if SUPPORTS_INT128
  44. typedef unsigned __int128 uint128_t;
  45. #else
  46. typedef struct
  47. {
  48. uint64_t m_low;
  49. uint64_t m_high;
  50. } uint128_t;
  51. #endif
  52. typedef struct EccPoint
  53. {
  54. uint64_t x[NUM_ECC_DIGITS];
  55. uint64_t y[NUM_ECC_DIGITS];
  56. } EccPoint;
  57. #define CONCAT1(a, b) a##b
  58. #define CONCAT(a, b) CONCAT1(a, b)
  59. #define Curve_P_16 {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFDFFFFFFFF}
  60. #define Curve_P_24 {0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull}
  61. #define Curve_P_32 {0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, 0x0000000000000000ull, 0xFFFFFFFF00000001ull}
  62. #define Curve_P_48 {0x00000000FFFFFFFF, 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  63. #define Curve_B_16 {0xD824993C2CEE5ED3, 0xE87579C11079F43D}
  64. #define Curve_B_24 {0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull, 0x64210519E59C80E7ull}
  65. #define Curve_B_32 {0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull, 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull}
  66. #define Curve_B_48 {0x2A85C8EDD3EC2AEF, 0xC656398D8A2ED19D, 0x0314088F5013875A, 0x181D9C6EFE814112, 0x988E056BE3F82D19, 0xB3312FA7E23EE7E4}
  67. #define Curve_G_16 { \
  68. {0x0C28607CA52C5B86, 0x161FF7528B899B2D}, \
  69. {0xC02DA292DDED7A83, 0xCF5AC8395BAFEB13}}
  70. #define Curve_G_24 { \
  71. {0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, 0x188DA80EB03090F6ull}, \
  72. {0x73F977A11E794811ull, 0x631011ED6B24CDD5ull, 0x07192B95FFC8DA78ull}}
  73. #define Curve_G_32 { \
  74. {0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull}, \
  75. {0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull}}
  76. #define Curve_G_48 { \
  77. {0x3A545E3872760AB7, 0x5502F25DBF55296C, 0x59F741E082542A38, 0x6E1D3B628BA79B98, 0x8EB1C71EF320AD74, 0xAA87CA22BE8B0537}, \
  78. {0x7A431D7C90EA0E5F, 0x0A60B1CE1D7E819D, 0xE9DA3113B5F0B8C0, 0xF8F41DBD289A147C, 0x5D9E98BF9292DC29, 0x3617DE4A96262C6F}}
  79. #define Curve_N_16 {0x75A30D1B9038A115, 0xFFFFFFFE00000000}
  80. #define Curve_N_24 {0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull, 0xFFFFFFFFFFFFFFFFull}
  81. #define Curve_N_32 {0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull}
  82. #define Curve_N_48 {0xECEC196ACCC52973, 0x581A0DB248B0A77A, 0xC7634D81F4372DDF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  83. static uint64_t curve_p[NUM_ECC_DIGITS] = CONCAT(Curve_P_, ECC_CURVE);
  84. static uint64_t curve_b[NUM_ECC_DIGITS] = CONCAT(Curve_B_, ECC_CURVE);
  85. static EccPoint curve_G = CONCAT(Curve_G_, ECC_CURVE);
  86. static uint64_t curve_n[NUM_ECC_DIGITS] = CONCAT(Curve_N_, ECC_CURVE);
  87. // Use ZeroTier's secure PRNG
  88. static inline int getRandomNumber(uint64_t *p_vli)
  89. {
  90. Utils::getSecureRandom(p_vli,ECC_BYTES);
  91. return 1;
  92. }
  93. static inline void vli_clear(uint64_t *p_vli)
  94. {
  95. uint i;
  96. for(i=0; i<NUM_ECC_DIGITS; ++i)
  97. {
  98. p_vli[i] = 0;
  99. }
  100. }
  101. /* Returns 1 if p_vli == 0, 0 otherwise. */
  102. static inline int vli_isZero(uint64_t *p_vli)
  103. {
  104. uint i;
  105. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  106. {
  107. if(p_vli[i])
  108. {
  109. return 0;
  110. }
  111. }
  112. return 1;
  113. }
  114. /* Returns nonzero if bit p_bit of p_vli is set. */
  115. static inline uint64_t vli_testBit(uint64_t *p_vli, uint p_bit)
  116. {
  117. return (p_vli[p_bit/64] & ((uint64_t)1 << (p_bit % 64)));
  118. }
  119. /* Counts the number of 64-bit "digits" in p_vli. */
  120. static inline uint vli_numDigits(uint64_t *p_vli)
  121. {
  122. int i;
  123. /* Search from the end until we find a non-zero digit.
  124. We do it in reverse because we expect that most digits will be nonzero. */
  125. for(i = NUM_ECC_DIGITS - 1; i >= 0 && p_vli[i] == 0; --i)
  126. {
  127. }
  128. return (i + 1);
  129. }
  130. /* Counts the number of bits required for p_vli. */
  131. static inline uint vli_numBits(uint64_t *p_vli)
  132. {
  133. uint i;
  134. uint64_t l_digit;
  135. uint l_numDigits = vli_numDigits(p_vli);
  136. if(l_numDigits == 0)
  137. {
  138. return 0;
  139. }
  140. l_digit = p_vli[l_numDigits - 1];
  141. for(i=0; l_digit; ++i)
  142. {
  143. l_digit >>= 1;
  144. }
  145. return ((l_numDigits - 1) * 64 + i);
  146. }
  147. /* Sets p_dest = p_src. */
  148. static inline void vli_set(uint64_t *p_dest, uint64_t *p_src)
  149. {
  150. uint i;
  151. for(i=0; i<NUM_ECC_DIGITS; ++i)
  152. {
  153. p_dest[i] = p_src[i];
  154. }
  155. }
  156. /* Returns sign of p_left - p_right. */
  157. static inline int vli_cmp(uint64_t *p_left, uint64_t *p_right)
  158. {
  159. int i;
  160. for(i = NUM_ECC_DIGITS-1; i >= 0; --i)
  161. {
  162. if(p_left[i] > p_right[i])
  163. {
  164. return 1;
  165. }
  166. else if(p_left[i] < p_right[i])
  167. {
  168. return -1;
  169. }
  170. }
  171. return 0;
  172. }
  173. /* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */
  174. static inline uint64_t vli_lshift(uint64_t *p_result, uint64_t *p_in, uint p_shift)
  175. {
  176. uint64_t l_carry = 0;
  177. uint i;
  178. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  179. {
  180. uint64_t l_temp = p_in[i];
  181. p_result[i] = (l_temp << p_shift) | l_carry;
  182. l_carry = l_temp >> (64 - p_shift);
  183. }
  184. return l_carry;
  185. }
  186. /* Computes p_vli = p_vli >> 1. */
  187. static inline void vli_rshift1(uint64_t *p_vli)
  188. {
  189. uint64_t *l_end = p_vli;
  190. uint64_t l_carry = 0;
  191. p_vli += NUM_ECC_DIGITS;
  192. while(p_vli-- > l_end)
  193. {
  194. uint64_t l_temp = *p_vli;
  195. *p_vli = (l_temp >> 1) | l_carry;
  196. l_carry = l_temp << 63;
  197. }
  198. }
  199. /* Computes p_result = p_left + p_right, returning carry. Can modify in place. */
  200. static inline uint64_t vli_add(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  201. {
  202. uint64_t l_carry = 0;
  203. uint i;
  204. for(i=0; i<NUM_ECC_DIGITS; ++i)
  205. {
  206. uint64_t l_sum = p_left[i] + p_right[i] + l_carry;
  207. if(l_sum != p_left[i])
  208. {
  209. l_carry = (l_sum < p_left[i]);
  210. }
  211. p_result[i] = l_sum;
  212. }
  213. return l_carry;
  214. }
  215. /* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */
  216. static inline uint64_t vli_sub(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  217. {
  218. uint64_t l_borrow = 0;
  219. uint i;
  220. for(i=0; i<NUM_ECC_DIGITS; ++i)
  221. {
  222. uint64_t l_diff = p_left[i] - p_right[i] - l_borrow;
  223. if(l_diff != p_left[i])
  224. {
  225. l_borrow = (l_diff > p_left[i]);
  226. }
  227. p_result[i] = l_diff;
  228. }
  229. return l_borrow;
  230. }
  231. #if SUPPORTS_INT128
  232. /* Computes p_result = p_left * p_right. */
  233. static inline void vli_mult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  234. {
  235. uint128_t r01 = 0;
  236. uint64_t r2 = 0;
  237. uint i, k;
  238. /* Compute each digit of p_result in sequence, maintaining the carries. */
  239. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  240. {
  241. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  242. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  243. {
  244. uint128_t l_product = (uint128_t)p_left[i] * p_right[k-i];
  245. r01 += l_product;
  246. r2 += (r01 < l_product);
  247. }
  248. p_result[k] = (uint64_t)r01;
  249. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  250. r2 = 0;
  251. }
  252. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  253. }
  254. /* Computes p_result = p_left^2. */
  255. static inline void vli_square(uint64_t *p_result, uint64_t *p_left)
  256. {
  257. uint128_t r01 = 0;
  258. uint64_t r2 = 0;
  259. uint i, k;
  260. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  261. {
  262. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  263. for(i=l_min; i<=k && i<=k-i; ++i)
  264. {
  265. uint128_t l_product = (uint128_t)p_left[i] * p_left[k-i];
  266. if(i < k-i)
  267. {
  268. r2 += l_product >> 127;
  269. l_product *= 2;
  270. }
  271. r01 += l_product;
  272. r2 += (r01 < l_product);
  273. }
  274. p_result[k] = (uint64_t)r01;
  275. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  276. r2 = 0;
  277. }
  278. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  279. }
  280. #else /* #if SUPPORTS_INT128 */
  281. static inline uint128_t mul_64_64(uint64_t p_left, uint64_t p_right)
  282. {
  283. uint128_t l_result;
  284. uint64_t a0 = p_left & 0xffffffffull;
  285. uint64_t a1 = p_left >> 32;
  286. uint64_t b0 = p_right & 0xffffffffull;
  287. uint64_t b1 = p_right >> 32;
  288. uint64_t m0 = a0 * b0;
  289. uint64_t m1 = a0 * b1;
  290. uint64_t m2 = a1 * b0;
  291. uint64_t m3 = a1 * b1;
  292. m2 += (m0 >> 32);
  293. m2 += m1;
  294. if(m2 < m1)
  295. { // overflow
  296. m3 += 0x100000000ull;
  297. }
  298. l_result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
  299. l_result.m_high = m3 + (m2 >> 32);
  300. return l_result;
  301. }
  302. static inline uint128_t add_128_128(uint128_t a, uint128_t b)
  303. {
  304. uint128_t l_result;
  305. l_result.m_low = a.m_low + b.m_low;
  306. l_result.m_high = a.m_high + b.m_high + (l_result.m_low < a.m_low);
  307. return l_result;
  308. }
  309. static inline void vli_mult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  310. {
  311. uint128_t r01 = {0, 0};
  312. uint64_t r2 = 0;
  313. uint i, k;
  314. /* Compute each digit of p_result in sequence, maintaining the carries. */
  315. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  316. {
  317. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  318. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  319. {
  320. uint128_t l_product = mul_64_64(p_left[i], p_right[k-i]);
  321. r01 = add_128_128(r01, l_product);
  322. r2 += (r01.m_high < l_product.m_high);
  323. }
  324. p_result[k] = r01.m_low;
  325. r01.m_low = r01.m_high;
  326. r01.m_high = r2;
  327. r2 = 0;
  328. }
  329. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  330. }
  331. static inline void vli_square(uint64_t *p_result, uint64_t *p_left)
  332. {
  333. uint128_t r01 = {0, 0};
  334. uint64_t r2 = 0;
  335. uint i, k;
  336. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  337. {
  338. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  339. for(i=l_min; i<=k && i<=k-i; ++i)
  340. {
  341. uint128_t l_product = mul_64_64(p_left[i], p_left[k-i]);
  342. if(i < k-i)
  343. {
  344. r2 += l_product.m_high >> 63;
  345. l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63);
  346. l_product.m_low <<= 1;
  347. }
  348. r01 = add_128_128(r01, l_product);
  349. r2 += (r01.m_high < l_product.m_high);
  350. }
  351. p_result[k] = r01.m_low;
  352. r01.m_low = r01.m_high;
  353. r01.m_high = r2;
  354. r2 = 0;
  355. }
  356. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  357. }
  358. #endif /* SUPPORTS_INT128 */
  359. /* Computes p_result = (p_left + p_right) % p_mod.
  360. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  361. static inline void vli_modAdd(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  362. {
  363. uint64_t l_carry = vli_add(p_result, p_left, p_right);
  364. if(l_carry || vli_cmp(p_result, p_mod) >= 0)
  365. { /* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
  366. vli_sub(p_result, p_result, p_mod);
  367. }
  368. }
  369. /* Computes p_result = (p_left - p_right) % p_mod.
  370. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  371. static inline void vli_modSub(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  372. {
  373. uint64_t l_borrow = vli_sub(p_result, p_left, p_right);
  374. if(l_borrow)
  375. { /* In this case, p_result == -diff == (max int) - diff.
  376. Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
  377. vli_add(p_result, p_result, p_mod);
  378. }
  379. }
  380. #if ECC_CURVE == secp128r1
  381. /* Computes p_result = p_product % curve_p.
  382. See algorithm 5 and 6 from http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf */
  383. static void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  384. {
  385. uint64_t l_tmp[NUM_ECC_DIGITS];
  386. int l_carry;
  387. vli_set(p_result, p_product);
  388. l_tmp[0] = p_product[2];
  389. l_tmp[1] = (p_product[3] & 0x1FFFFFFFFull) | (p_product[2] << 33);
  390. l_carry = vli_add(p_result, p_result, l_tmp);
  391. l_tmp[0] = (p_product[2] >> 31) | (p_product[3] << 33);
  392. l_tmp[1] = (p_product[3] >> 31) | ((p_product[2] & 0xFFFFFFFF80000000ull) << 2);
  393. l_carry += vli_add(p_result, p_result, l_tmp);
  394. l_tmp[0] = (p_product[2] >> 62) | (p_product[3] << 2);
  395. l_tmp[1] = (p_product[3] >> 62) | ((p_product[2] & 0xC000000000000000ull) >> 29) | (p_product[3] << 35);
  396. l_carry += vli_add(p_result, p_result, l_tmp);
  397. l_tmp[0] = (p_product[3] >> 29);
  398. l_tmp[1] = ((p_product[3] & 0xFFFFFFFFE0000000ull) << 4);
  399. l_carry += vli_add(p_result, p_result, l_tmp);
  400. l_tmp[0] = (p_product[3] >> 60);
  401. l_tmp[1] = (p_product[3] & 0xFFFFFFFE00000000ull);
  402. l_carry += vli_add(p_result, p_result, l_tmp);
  403. l_tmp[0] = 0;
  404. l_tmp[1] = ((p_product[3] & 0xF000000000000000ull) >> 27);
  405. l_carry += vli_add(p_result, p_result, l_tmp);
  406. while(l_carry || vli_cmp(curve_p, p_result) != 1)
  407. {
  408. l_carry -= vli_sub(p_result, p_result, curve_p);
  409. }
  410. }
  411. #elif ECC_CURVE == secp192r1
  412. /* Computes p_result = p_product % curve_p.
  413. See algorithm 5 and 6 from http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf */
  414. static void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  415. {
  416. uint64_t l_tmp[NUM_ECC_DIGITS];
  417. int l_carry;
  418. vli_set(p_result, p_product);
  419. vli_set(l_tmp, &p_product[3]);
  420. l_carry = vli_add(p_result, p_result, l_tmp);
  421. l_tmp[0] = 0;
  422. l_tmp[1] = p_product[3];
  423. l_tmp[2] = p_product[4];
  424. l_carry += vli_add(p_result, p_result, l_tmp);
  425. l_tmp[0] = l_tmp[1] = p_product[5];
  426. l_tmp[2] = 0;
  427. l_carry += vli_add(p_result, p_result, l_tmp);
  428. while(l_carry || vli_cmp(curve_p, p_result) != 1)
  429. {
  430. l_carry -= vli_sub(p_result, p_result, curve_p);
  431. }
  432. }
  433. #elif ECC_CURVE == secp256r1
  434. /* Computes p_result = p_product % curve_p
  435. from http://www.nsa.gov/ia/_files/nist-routines.pdf */
  436. static void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  437. {
  438. uint64_t l_tmp[NUM_ECC_DIGITS];
  439. int l_carry;
  440. /* t */
  441. vli_set(p_result, p_product);
  442. /* s1 */
  443. l_tmp[0] = 0;
  444. l_tmp[1] = p_product[5] & 0xffffffff00000000ull;
  445. l_tmp[2] = p_product[6];
  446. l_tmp[3] = p_product[7];
  447. l_carry = vli_lshift(l_tmp, l_tmp, 1);
  448. l_carry += vli_add(p_result, p_result, l_tmp);
  449. /* s2 */
  450. l_tmp[1] = p_product[6] << 32;
  451. l_tmp[2] = (p_product[6] >> 32) | (p_product[7] << 32);
  452. l_tmp[3] = p_product[7] >> 32;
  453. l_carry += vli_lshift(l_tmp, l_tmp, 1);
  454. l_carry += vli_add(p_result, p_result, l_tmp);
  455. /* s3 */
  456. l_tmp[0] = p_product[4];
  457. l_tmp[1] = p_product[5] & 0xffffffff;
  458. l_tmp[2] = 0;
  459. l_tmp[3] = p_product[7];
  460. l_carry += vli_add(p_result, p_result, l_tmp);
  461. /* s4 */
  462. l_tmp[0] = (p_product[4] >> 32) | (p_product[5] << 32);
  463. l_tmp[1] = (p_product[5] >> 32) | (p_product[6] & 0xffffffff00000000ull);
  464. l_tmp[2] = p_product[7];
  465. l_tmp[3] = (p_product[6] >> 32) | (p_product[4] << 32);
  466. l_carry += vli_add(p_result, p_result, l_tmp);
  467. /* d1 */
  468. l_tmp[0] = (p_product[5] >> 32) | (p_product[6] << 32);
  469. l_tmp[1] = (p_product[6] >> 32);
  470. l_tmp[2] = 0;
  471. l_tmp[3] = (p_product[4] & 0xffffffff) | (p_product[5] << 32);
  472. l_carry -= vli_sub(p_result, p_result, l_tmp);
  473. /* d2 */
  474. l_tmp[0] = p_product[6];
  475. l_tmp[1] = p_product[7];
  476. l_tmp[2] = 0;
  477. l_tmp[3] = (p_product[4] >> 32) | (p_product[5] & 0xffffffff00000000ull);
  478. l_carry -= vli_sub(p_result, p_result, l_tmp);
  479. /* d3 */
  480. l_tmp[0] = (p_product[6] >> 32) | (p_product[7] << 32);
  481. l_tmp[1] = (p_product[7] >> 32) | (p_product[4] << 32);
  482. l_tmp[2] = (p_product[4] >> 32) | (p_product[5] << 32);
  483. l_tmp[3] = (p_product[6] << 32);
  484. l_carry -= vli_sub(p_result, p_result, l_tmp);
  485. /* d4 */
  486. l_tmp[0] = p_product[7];
  487. l_tmp[1] = p_product[4] & 0xffffffff00000000ull;
  488. l_tmp[2] = p_product[5];
  489. l_tmp[3] = p_product[6] & 0xffffffff00000000ull;
  490. l_carry -= vli_sub(p_result, p_result, l_tmp);
  491. if(l_carry < 0)
  492. {
  493. do
  494. {
  495. l_carry += vli_add(p_result, p_result, curve_p);
  496. } while(l_carry < 0);
  497. }
  498. else
  499. {
  500. while(l_carry || vli_cmp(curve_p, p_result) != 1)
  501. {
  502. l_carry -= vli_sub(p_result, p_result, curve_p);
  503. }
  504. }
  505. }
  506. #elif ECC_CURVE == secp384r1
  507. static inline void omega_mult(uint64_t *p_result, uint64_t *p_right)
  508. {
  509. uint64_t l_tmp[NUM_ECC_DIGITS];
  510. uint64_t l_carry, l_diff;
  511. /* Multiply by (2^128 + 2^96 - 2^32 + 1). */
  512. vli_set(p_result, p_right); /* 1 */
  513. l_carry = vli_lshift(l_tmp, p_right, 32);
  514. p_result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp); /* 2^96 + 1 */
  515. p_result[2 + NUM_ECC_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right); /* 2^128 + 2^96 + 1 */
  516. l_carry += vli_sub(p_result, p_result, l_tmp); /* 2^128 + 2^96 - 2^32 + 1 */
  517. l_diff = p_result[NUM_ECC_DIGITS] - l_carry;
  518. if(l_diff > p_result[NUM_ECC_DIGITS])
  519. { /* Propagate borrow if necessary. */
  520. uint i;
  521. for(i = 1 + NUM_ECC_DIGITS; ; ++i)
  522. {
  523. --p_result[i];
  524. if(p_result[i] != (uint64_t)-1)
  525. {
  526. break;
  527. }
  528. }
  529. }
  530. p_result[NUM_ECC_DIGITS] = l_diff;
  531. }
  532. /* Computes p_result = p_product % curve_p
  533. see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
  534. section "Curve-Specific Optimizations" */
  535. static inline void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  536. {
  537. uint64_t l_tmp[2*NUM_ECC_DIGITS];
  538. while(!vli_isZero(p_product + NUM_ECC_DIGITS)) /* While c1 != 0 */
  539. {
  540. uint64_t l_carry = 0;
  541. uint i;
  542. vli_clear(l_tmp);
  543. vli_clear(l_tmp + NUM_ECC_DIGITS);
  544. omega_mult(l_tmp, p_product + NUM_ECC_DIGITS); /* tmp = w * c1 */
  545. vli_clear(p_product + NUM_ECC_DIGITS); /* p = c0 */
  546. /* (c1, c0) = c0 + w * c1 */
  547. for(i=0; i<NUM_ECC_DIGITS+3; ++i)
  548. {
  549. uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry;
  550. if(l_sum != p_product[i])
  551. {
  552. l_carry = (l_sum < p_product[i]);
  553. }
  554. p_product[i] = l_sum;
  555. }
  556. }
  557. while(vli_cmp(p_product, curve_p) > 0)
  558. {
  559. vli_sub(p_product, p_product, curve_p);
  560. }
  561. vli_set(p_result, p_product);
  562. }
  563. #endif
  564. /* Computes p_result = (p_left * p_right) % curve_p. */
  565. static inline void vli_modMult_fast(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  566. {
  567. uint64_t l_product[2 * NUM_ECC_DIGITS];
  568. vli_mult(l_product, p_left, p_right);
  569. vli_mmod_fast(p_result, l_product);
  570. }
  571. /* Computes p_result = p_left^2 % curve_p. */
  572. static inline void vli_modSquare_fast(uint64_t *p_result, uint64_t *p_left)
  573. {
  574. uint64_t l_product[2 * NUM_ECC_DIGITS];
  575. vli_square(l_product, p_left);
  576. vli_mmod_fast(p_result, l_product);
  577. }
  578. #define EVEN(vli) (!(vli[0] & 1))
  579. /* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
  580. See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
  581. https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
  582. static inline void vli_modInv(uint64_t *p_result, uint64_t *p_input, uint64_t *p_mod)
  583. {
  584. uint64_t a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS], u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS];
  585. uint64_t l_carry;
  586. int l_cmpResult;
  587. if(vli_isZero(p_input))
  588. {
  589. vli_clear(p_result);
  590. return;
  591. }
  592. vli_set(a, p_input);
  593. vli_set(b, p_mod);
  594. vli_clear(u);
  595. u[0] = 1;
  596. vli_clear(v);
  597. while((l_cmpResult = vli_cmp(a, b)) != 0)
  598. {
  599. l_carry = 0;
  600. if(EVEN(a))
  601. {
  602. vli_rshift1(a);
  603. if(!EVEN(u))
  604. {
  605. l_carry = vli_add(u, u, p_mod);
  606. }
  607. vli_rshift1(u);
  608. if(l_carry)
  609. {
  610. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  611. }
  612. }
  613. else if(EVEN(b))
  614. {
  615. vli_rshift1(b);
  616. if(!EVEN(v))
  617. {
  618. l_carry = vli_add(v, v, p_mod);
  619. }
  620. vli_rshift1(v);
  621. if(l_carry)
  622. {
  623. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  624. }
  625. }
  626. else if(l_cmpResult > 0)
  627. {
  628. vli_sub(a, a, b);
  629. vli_rshift1(a);
  630. if(vli_cmp(u, v) < 0)
  631. {
  632. vli_add(u, u, p_mod);
  633. }
  634. vli_sub(u, u, v);
  635. if(!EVEN(u))
  636. {
  637. l_carry = vli_add(u, u, p_mod);
  638. }
  639. vli_rshift1(u);
  640. if(l_carry)
  641. {
  642. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  643. }
  644. }
  645. else
  646. {
  647. vli_sub(b, b, a);
  648. vli_rshift1(b);
  649. if(vli_cmp(v, u) < 0)
  650. {
  651. vli_add(v, v, p_mod);
  652. }
  653. vli_sub(v, v, u);
  654. if(!EVEN(v))
  655. {
  656. l_carry = vli_add(v, v, p_mod);
  657. }
  658. vli_rshift1(v);
  659. if(l_carry)
  660. {
  661. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  662. }
  663. }
  664. }
  665. vli_set(p_result, u);
  666. }
  667. /* ------ Point operations ------ */
  668. /* Returns 1 if p_point is the point at infinity, 0 otherwise. */
  669. static inline int EccPoint_isZero(EccPoint *p_point)
  670. {
  671. return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
  672. }
  673. /* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
  674. From http://eprint.iacr.org/2011/338.pdf
  675. */
  676. /* Double in place */
  677. static inline void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
  678. {
  679. /* t1 = X, t2 = Y, t3 = Z */
  680. uint64_t t4[NUM_ECC_DIGITS];
  681. uint64_t t5[NUM_ECC_DIGITS];
  682. if(vli_isZero(Z1))
  683. {
  684. return;
  685. }
  686. vli_modSquare_fast(t4, Y1); /* t4 = y1^2 */
  687. vli_modMult_fast(t5, X1, t4); /* t5 = x1*y1^2 = A */
  688. vli_modSquare_fast(t4, t4); /* t4 = y1^4 */
  689. vli_modMult_fast(Y1, Y1, Z1); /* t2 = y1*z1 = z3 */
  690. vli_modSquare_fast(Z1, Z1); /* t3 = z1^2 */
  691. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = x1 + z1^2 */
  692. vli_modAdd(Z1, Z1, Z1, curve_p); /* t3 = 2*z1^2 */
  693. vli_modSub(Z1, X1, Z1, curve_p); /* t3 = x1 - z1^2 */
  694. vli_modMult_fast(X1, X1, Z1); /* t1 = x1^2 - z1^4 */
  695. vli_modAdd(Z1, X1, X1, curve_p); /* t3 = 2*(x1^2 - z1^4) */
  696. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */
  697. if(vli_testBit(X1, 0))
  698. {
  699. uint64_t l_carry = vli_add(X1, X1, curve_p);
  700. vli_rshift1(X1);
  701. X1[NUM_ECC_DIGITS-1] |= l_carry << 63;
  702. }
  703. else
  704. {
  705. vli_rshift1(X1);
  706. }
  707. /* t1 = 3/2*(x1^2 - z1^4) = B */
  708. vli_modSquare_fast(Z1, X1); /* t3 = B^2 */
  709. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - A */
  710. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */
  711. vli_modSub(t5, t5, Z1, curve_p); /* t5 = A - x3 */
  712. vli_modMult_fast(X1, X1, t5); /* t1 = B * (A - x3) */
  713. vli_modSub(t4, X1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */
  714. vli_set(X1, Z1);
  715. vli_set(Z1, Y1);
  716. vli_set(Y1, t4);
  717. }
  718. /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
  719. static inline void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z)
  720. {
  721. uint64_t t1[NUM_ECC_DIGITS];
  722. vli_modSquare_fast(t1, Z); /* z^2 */
  723. vli_modMult_fast(X1, X1, t1); /* x1 * z^2 */
  724. vli_modMult_fast(t1, t1, Z); /* z^3 */
  725. vli_modMult_fast(Y1, Y1, t1); /* y1 * z^3 */
  726. }
  727. /* P = (x1, y1) => 2P, (x2, y2) => P' */
  728. static inline void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2, uint64_t *p_initialZ)
  729. {
  730. uint64_t z[NUM_ECC_DIGITS];
  731. vli_set(X2, X1);
  732. vli_set(Y2, Y1);
  733. vli_clear(z);
  734. z[0] = 1;
  735. if(p_initialZ)
  736. {
  737. vli_set(z, p_initialZ);
  738. }
  739. apply_z(X1, Y1, z);
  740. EccPoint_double_jacobian(X1, Y1, z);
  741. apply_z(X2, Y2, z);
  742. }
  743. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  744. Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
  745. or P => P', Q => P + Q
  746. */
  747. static inline void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  748. {
  749. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  750. uint64_t t5[NUM_ECC_DIGITS];
  751. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  752. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  753. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  754. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  755. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  756. vli_modSquare_fast(t5, Y2); /* t5 = (y2 - y1)^2 = D */
  757. vli_modSub(t5, t5, X1, curve_p); /* t5 = D - B */
  758. vli_modSub(t5, t5, X2, curve_p); /* t5 = D - B - C = x3 */
  759. vli_modSub(X2, X2, X1, curve_p); /* t3 = C - B */
  760. vli_modMult_fast(Y1, Y1, X2); /* t2 = y1*(C - B) */
  761. vli_modSub(X2, X1, t5, curve_p); /* t3 = B - x3 */
  762. vli_modMult_fast(Y2, Y2, X2); /* t4 = (y2 - y1)*(B - x3) */
  763. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  764. vli_set(X2, t5);
  765. }
  766. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  767. Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
  768. or P => P - Q, Q => P + Q
  769. */
  770. static inline void XYcZ_addC(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  771. {
  772. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  773. uint64_t t5[NUM_ECC_DIGITS];
  774. uint64_t t6[NUM_ECC_DIGITS];
  775. uint64_t t7[NUM_ECC_DIGITS];
  776. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  777. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  778. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  779. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  780. vli_modAdd(t5, Y2, Y1, curve_p); /* t4 = y2 + y1 */
  781. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  782. vli_modSub(t6, X2, X1, curve_p); /* t6 = C - B */
  783. vli_modMult_fast(Y1, Y1, t6); /* t2 = y1 * (C - B) */
  784. vli_modAdd(t6, X1, X2, curve_p); /* t6 = B + C */
  785. vli_modSquare_fast(X2, Y2); /* t3 = (y2 - y1)^2 */
  786. vli_modSub(X2, X2, t6, curve_p); /* t3 = x3 */
  787. vli_modSub(t7, X1, X2, curve_p); /* t7 = B - x3 */
  788. vli_modMult_fast(Y2, Y2, t7); /* t4 = (y2 - y1)*(B - x3) */
  789. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  790. vli_modSquare_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */
  791. vli_modSub(t7, t7, t6, curve_p); /* t7 = x3' */
  792. vli_modSub(t6, t7, X1, curve_p); /* t6 = x3' - B */
  793. vli_modMult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */
  794. vli_modSub(Y1, t6, Y1, curve_p); /* t2 = y3' */
  795. vli_set(X1, t7);
  796. }
  797. static inline void EccPoint_mult(EccPoint *p_result, EccPoint *p_point, uint64_t *p_scalar, uint64_t *p_initialZ)
  798. {
  799. /* R0 and R1 */
  800. uint64_t Rx[2][NUM_ECC_DIGITS];
  801. uint64_t Ry[2][NUM_ECC_DIGITS];
  802. uint64_t z[NUM_ECC_DIGITS];
  803. int i, nb;
  804. vli_set(Rx[1], p_point->x);
  805. vli_set(Ry[1], p_point->y);
  806. XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ);
  807. for(i = vli_numBits(p_scalar) - 2; i > 0; --i)
  808. {
  809. nb = !vli_testBit(p_scalar, i);
  810. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  811. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  812. }
  813. nb = !vli_testBit(p_scalar, 0);
  814. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  815. /* Find final 1/Z value. */
  816. vli_modSub(z, Rx[1], Rx[0], curve_p); /* X1 - X0 */
  817. vli_modMult_fast(z, z, Ry[1-nb]); /* Yb * (X1 - X0) */
  818. vli_modMult_fast(z, z, p_point->x); /* xP * Yb * (X1 - X0) */
  819. vli_modInv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */
  820. vli_modMult_fast(z, z, p_point->y); /* yP / (xP * Yb * (X1 - X0)) */
  821. vli_modMult_fast(z, z, Rx[1-nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */
  822. /* End 1/Z calculation */
  823. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  824. apply_z(Rx[0], Ry[0], z);
  825. vli_set(p_result->x, Rx[0]);
  826. vli_set(p_result->y, Ry[0]);
  827. }
  828. static inline void ecc_bytes2native(uint64_t p_native[NUM_ECC_DIGITS], const uint8_t p_bytes[ECC_BYTES])
  829. {
  830. unsigned i;
  831. for(i=0; i<NUM_ECC_DIGITS; ++i)
  832. {
  833. const uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  834. p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) |
  835. ((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7];
  836. }
  837. }
  838. static inline void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES], const uint64_t p_native[NUM_ECC_DIGITS])
  839. {
  840. unsigned i;
  841. for(i=0; i<NUM_ECC_DIGITS; ++i)
  842. {
  843. uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  844. p_digit[0] = p_native[i] >> 56;
  845. p_digit[1] = p_native[i] >> 48;
  846. p_digit[2] = p_native[i] >> 40;
  847. p_digit[3] = p_native[i] >> 32;
  848. p_digit[4] = p_native[i] >> 24;
  849. p_digit[5] = p_native[i] >> 16;
  850. p_digit[6] = p_native[i] >> 8;
  851. p_digit[7] = p_native[i];
  852. }
  853. }
  854. /* Compute a = sqrt(a) (mod curve_p). */
  855. static inline void mod_sqrt(uint64_t a[NUM_ECC_DIGITS])
  856. {
  857. unsigned i;
  858. uint64_t p1[NUM_ECC_DIGITS] = {1};
  859. uint64_t l_result[NUM_ECC_DIGITS] = {1};
  860. /* Since curve_p == 3 (mod 4) for all supported curves, we can
  861. compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
  862. vli_add(p1, curve_p, p1); /* p1 = curve_p + 1 */
  863. for(i = vli_numBits(p1) - 1; i > 1; --i)
  864. {
  865. vli_modSquare_fast(l_result, l_result);
  866. if(vli_testBit(p1, i))
  867. {
  868. vli_modMult_fast(l_result, l_result, a);
  869. }
  870. }
  871. vli_set(a, l_result);
  872. }
  873. static inline void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTES+1])
  874. {
  875. uint64_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */
  876. ecc_bytes2native(p_point->x, p_compressed+1);
  877. vli_modSquare_fast(p_point->y, p_point->x); /* y = x^2 */
  878. vli_modSub(p_point->y, p_point->y, _3, curve_p); /* y = x^2 - 3 */
  879. vli_modMult_fast(p_point->y, p_point->y, p_point->x); /* y = x^3 - 3x */
  880. vli_modAdd(p_point->y, p_point->y, curve_b, curve_p); /* y = x^3 - 3x + b */
  881. mod_sqrt(p_point->y);
  882. if((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01))
  883. {
  884. vli_sub(p_point->y, curve_p, p_point->y);
  885. }
  886. }
  887. static inline int ecc_make_key(uint8_t p_publicKey[ECC_BYTES+1], uint8_t p_privateKey[ECC_BYTES])
  888. {
  889. uint64_t l_private[NUM_ECC_DIGITS];
  890. EccPoint l_public;
  891. unsigned l_tries = 0;
  892. do
  893. {
  894. if(!getRandomNumber(l_private) || (l_tries++ >= MAX_TRIES))
  895. {
  896. return 0;
  897. }
  898. if(vli_isZero(l_private))
  899. {
  900. continue;
  901. }
  902. /* Make sure the private key is in the range [1, n-1].
  903. For the supported curves, n is always large enough that we only need to subtract once at most. */
  904. if(vli_cmp(curve_n, l_private) != 1)
  905. {
  906. vli_sub(l_private, l_private, curve_n);
  907. }
  908. EccPoint_mult(&l_public, &curve_G, l_private, NULL);
  909. } while(EccPoint_isZero(&l_public));
  910. ecc_native2bytes(p_privateKey, l_private);
  911. ecc_native2bytes(p_publicKey + 1, l_public.x);
  912. p_publicKey[0] = 2 + (l_public.y[0] & 0x01);
  913. return 1;
  914. }
  915. static inline int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES+1], const uint8_t p_privateKey[ECC_BYTES], uint8_t p_secret[ECC_BYTES])
  916. {
  917. EccPoint l_public;
  918. uint64_t l_private[NUM_ECC_DIGITS];
  919. uint64_t l_random[NUM_ECC_DIGITS];
  920. if(!getRandomNumber(l_random))
  921. {
  922. return 0;
  923. }
  924. ecc_point_decompress(&l_public, p_publicKey);
  925. ecc_bytes2native(l_private, p_privateKey);
  926. EccPoint l_product;
  927. EccPoint_mult(&l_product, &l_public, l_private, l_random);
  928. ecc_native2bytes(p_secret, l_product.x);
  929. return !EccPoint_isZero(&l_product);
  930. }
  931. /* -------- ECDSA code -------- */
  932. /* Computes p_result = (p_left * p_right) % p_mod. */
  933. static inline void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  934. {
  935. uint64_t l_product[2 * NUM_ECC_DIGITS];
  936. uint64_t l_modMultiple[2 * NUM_ECC_DIGITS];
  937. uint l_digitShift, l_bitShift;
  938. uint l_productBits;
  939. uint l_modBits = vli_numBits(p_mod);
  940. vli_mult(l_product, p_left, p_right);
  941. l_productBits = vli_numBits(l_product + NUM_ECC_DIGITS);
  942. if(l_productBits)
  943. {
  944. l_productBits += NUM_ECC_DIGITS * 64;
  945. }
  946. else
  947. {
  948. l_productBits = vli_numBits(l_product);
  949. }
  950. if(l_productBits < l_modBits)
  951. { /* l_product < p_mod. */
  952. vli_set(p_result, l_product);
  953. return;
  954. }
  955. /* Shift p_mod by (l_leftBits - l_modBits). This multiplies p_mod by the largest
  956. power of two possible while still resulting in a number less than p_left. */
  957. vli_clear(l_modMultiple);
  958. vli_clear(l_modMultiple + NUM_ECC_DIGITS);
  959. l_digitShift = (l_productBits - l_modBits) / 64;
  960. l_bitShift = (l_productBits - l_modBits) % 64;
  961. if(l_bitShift)
  962. {
  963. l_modMultiple[l_digitShift + NUM_ECC_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift);
  964. }
  965. else
  966. {
  967. vli_set(l_modMultiple + l_digitShift, p_mod);
  968. }
  969. /* Subtract all multiples of p_mod to get the remainder. */
  970. vli_clear(p_result);
  971. p_result[0] = 1; /* Use p_result as a temp var to store 1 (for subtraction) */
  972. while(l_productBits > NUM_ECC_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0)
  973. {
  974. int l_cmp = vli_cmp(l_modMultiple + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS);
  975. if(l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0))
  976. {
  977. if(vli_sub(l_product, l_product, l_modMultiple))
  978. { /* borrow */
  979. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, p_result);
  980. }
  981. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, l_modMultiple + NUM_ECC_DIGITS);
  982. }
  983. uint64_t l_carry = (l_modMultiple[NUM_ECC_DIGITS] & 0x01) << 63;
  984. vli_rshift1(l_modMultiple + NUM_ECC_DIGITS);
  985. vli_rshift1(l_modMultiple);
  986. l_modMultiple[NUM_ECC_DIGITS-1] |= l_carry;
  987. --l_productBits;
  988. }
  989. vli_set(p_result, l_product);
  990. }
  991. static inline uint umax(uint a, uint b)
  992. {
  993. return (a > b ? a : b);
  994. }
  995. static inline int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES], const uint8_t p_hash[ECC_BYTES], uint8_t p_signature[ECC_BYTES*2])
  996. {
  997. uint64_t k[NUM_ECC_DIGITS];
  998. uint64_t l_tmp[NUM_ECC_DIGITS];
  999. uint64_t l_s[NUM_ECC_DIGITS];
  1000. EccPoint p;
  1001. unsigned l_tries = 0;
  1002. do
  1003. {
  1004. if(!getRandomNumber(k) || (l_tries++ >= MAX_TRIES))
  1005. {
  1006. return 0;
  1007. }
  1008. if(vli_isZero(k))
  1009. {
  1010. continue;
  1011. }
  1012. if(vli_cmp(curve_n, k) != 1)
  1013. {
  1014. vli_sub(k, k, curve_n);
  1015. }
  1016. /* tmp = k * G */
  1017. EccPoint_mult(&p, &curve_G, k, NULL);
  1018. /* r = x1 (mod n) */
  1019. if(vli_cmp(curve_n, p.x) != 1)
  1020. {
  1021. vli_sub(p.x, p.x, curve_n);
  1022. }
  1023. } while(vli_isZero(p.x));
  1024. ecc_native2bytes(p_signature, p.x);
  1025. ecc_bytes2native(l_tmp, p_privateKey);
  1026. vli_modMult(l_s, p.x, l_tmp, curve_n); /* s = r*d */
  1027. ecc_bytes2native(l_tmp, p_hash);
  1028. vli_modAdd(l_s, l_tmp, l_s, curve_n); /* s = e + r*d */
  1029. vli_modInv(k, k, curve_n); /* k = 1 / k */
  1030. vli_modMult(l_s, l_s, k, curve_n); /* s = (e + r*d) / k */
  1031. ecc_native2bytes(p_signature + ECC_BYTES, l_s);
  1032. return 1;
  1033. }
  1034. static inline int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES+1], const uint8_t p_hash[ECC_BYTES], const uint8_t p_signature[ECC_BYTES*2])
  1035. {
  1036. uint64_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS];
  1037. uint64_t z[NUM_ECC_DIGITS];
  1038. EccPoint l_public, l_sum;
  1039. uint64_t rx[NUM_ECC_DIGITS];
  1040. uint64_t ry[NUM_ECC_DIGITS];
  1041. uint64_t tx[NUM_ECC_DIGITS];
  1042. uint64_t ty[NUM_ECC_DIGITS];
  1043. uint64_t tz[NUM_ECC_DIGITS];
  1044. uint64_t l_r[NUM_ECC_DIGITS], l_s[NUM_ECC_DIGITS];
  1045. ecc_point_decompress(&l_public, p_publicKey);
  1046. ecc_bytes2native(l_r, p_signature);
  1047. ecc_bytes2native(l_s, p_signature + ECC_BYTES);
  1048. if(vli_isZero(l_r) || vli_isZero(l_s))
  1049. { /* r, s must not be 0. */
  1050. return 0;
  1051. }
  1052. if(vli_cmp(curve_n, l_r) != 1 || vli_cmp(curve_n, l_s) != 1)
  1053. { /* r, s must be < n. */
  1054. return 0;
  1055. }
  1056. /* Calculate u1 and u2. */
  1057. vli_modInv(z, l_s, curve_n); /* Z = s^-1 */
  1058. ecc_bytes2native(u1, p_hash);
  1059. vli_modMult(u1, u1, z, curve_n); /* u1 = e/s */
  1060. vli_modMult(u2, l_r, z, curve_n); /* u2 = r/s */
  1061. /* Calculate l_sum = G + Q. */
  1062. vli_set(l_sum.x, l_public.x);
  1063. vli_set(l_sum.y, l_public.y);
  1064. vli_set(tx, curve_G.x);
  1065. vli_set(ty, curve_G.y);
  1066. vli_modSub(z, l_sum.x, tx, curve_p); /* Z = x2 - x1 */
  1067. XYcZ_add(tx, ty, l_sum.x, l_sum.y);
  1068. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  1069. apply_z(l_sum.x, l_sum.y, z);
  1070. /* Use Shamir's trick to calculate u1*G + u2*Q */
  1071. EccPoint *l_points[4] = {NULL, &curve_G, &l_public, &l_sum};
  1072. uint l_numBits = umax(vli_numBits(u1), vli_numBits(u2));
  1073. EccPoint *l_point = l_points[(!!vli_testBit(u1, l_numBits-1)) | ((!!vli_testBit(u2, l_numBits-1)) << 1)];
  1074. vli_set(rx, l_point->x);
  1075. vli_set(ry, l_point->y);
  1076. vli_clear(z);
  1077. z[0] = 1;
  1078. int i;
  1079. for(i = l_numBits - 2; i >= 0; --i)
  1080. {
  1081. EccPoint_double_jacobian(rx, ry, z);
  1082. int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1);
  1083. EccPoint *l_point = l_points[l_index];
  1084. if(l_point)
  1085. {
  1086. vli_set(tx, l_point->x);
  1087. vli_set(ty, l_point->y);
  1088. apply_z(tx, ty, z);
  1089. vli_modSub(tz, rx, tx, curve_p); /* Z = x2 - x1 */
  1090. XYcZ_add(tx, ty, rx, ry);
  1091. vli_modMult_fast(z, z, tz);
  1092. }
  1093. }
  1094. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  1095. apply_z(rx, ry, z);
  1096. /* v = x1 (mod n) */
  1097. if(vli_cmp(curve_n, rx) != 1)
  1098. {
  1099. vli_sub(rx, rx, curve_n);
  1100. }
  1101. /* Accept only if v == r. */
  1102. return (vli_cmp(rx, l_r) == 0);
  1103. }
  1104. //////////////////////////////////////////////////////////////////////////////
  1105. //////////////////////////////////////////////////////////////////////////////
  1106. //////////////////////////////////////////////////////////////////////////////
  1107. } // anonymous namespace
  1108. void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE])
  1109. {
  1110. if (!ecc_make_key(pub,priv)) {
  1111. fprintf(stderr,"FATAL: ecdsa_make_key() failed!" ZT_EOL_S);
  1112. abort();
  1113. }
  1114. }
  1115. void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  1116. {
  1117. if (!ecdsa_sign(priv,hash,sig)) {
  1118. fprintf(stderr,"FATAL: ecdsa_sign() failed!" ZT_EOL_S);
  1119. abort();
  1120. }
  1121. }
  1122. bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  1123. {
  1124. return (ecdsa_verify(pub,hash,sig) != 0);
  1125. }
  1126. bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE])
  1127. {
  1128. return (ecdh_shared_secret(theirPub,ourPriv,secret) != 0);
  1129. }
  1130. } // namespace ZeroTier