2
0

Peer.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2025-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "RuntimeEnvironment.hpp"
  15. #include "Trace.hpp"
  16. #include "Peer.hpp"
  17. #include "Topology.hpp"
  18. #include "SelfAwareness.hpp"
  19. #include "InetAddress.hpp"
  20. #include "Protocol.hpp"
  21. #include "Endpoint.hpp"
  22. #include "Expect.hpp"
  23. namespace ZeroTier {
  24. Peer::Peer(const RuntimeEnvironment *renv) :
  25. RR(renv),
  26. m_ephemeralPairTimestamp(0),
  27. m_lastReceive(0),
  28. m_lastSend(0),
  29. m_lastSentHello(0),
  30. m_lastWhoisRequestReceived(0),
  31. m_lastEchoRequestReceived(0),
  32. m_lastPrioritizedPaths(0),
  33. m_lastProbeReceived(0),
  34. m_alivePathCount(0),
  35. m_tryQueue(),
  36. m_vProto(0),
  37. m_vMajor(0),
  38. m_vMinor(0),
  39. m_vRevision(0)
  40. {}
  41. Peer::~Peer()
  42. { Utils::burn(m_helloMacKey, sizeof(m_helloMacKey)); }
  43. bool Peer::init(const Identity &peerIdentity)
  44. {
  45. RWMutex::Lock l(m_lock);
  46. if (m_id) // already initialized sanity check
  47. return false;
  48. m_id = peerIdentity;
  49. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  50. if (!RR->identity.agree(peerIdentity, k))
  51. return false;
  52. m_identityKey.set(new SymmetricKey(RR->node->now(), k));
  53. Utils::burn(k, sizeof(k));
  54. m_deriveSecondaryIdentityKeys();
  55. return true;
  56. }
  57. void Peer::received(
  58. void *tPtr,
  59. const SharedPtr< Path > &path,
  60. const unsigned int hops,
  61. const uint64_t packetId,
  62. const unsigned int payloadLength,
  63. const Protocol::Verb verb,
  64. const Protocol::Verb inReVerb)
  65. {
  66. const int64_t now = RR->node->now();
  67. m_lastReceive = now;
  68. m_inMeter.log(now, payloadLength);
  69. if (hops == 0) {
  70. RWMutex::RMaybeWLock l(m_lock);
  71. // If this matches an existing path, skip path learning stuff. For the small number
  72. // of paths a peer will have linear scan is the fastest way to do lookup.
  73. for (unsigned int i = 0; i < m_alivePathCount; ++i) {
  74. if (m_paths[i] == path)
  75. return;
  76. }
  77. // If we made it here, we don't already know this path.
  78. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, path->localSocket(), path->address())) {
  79. // SECURITY: note that if we've made it here we expected this OK, see Expect.hpp.
  80. // There is replay protection in effect for OK responses.
  81. if (verb == Protocol::VERB_OK) {
  82. // If we're learning a new path convert the lock to an exclusive write lock.
  83. l.writing();
  84. // If the path list is full, replace the least recently active path. Otherwise append new path.
  85. unsigned int newPathIdx = 0;
  86. if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
  87. int64_t lastReceiveTimeMax = 0;
  88. for (unsigned int i = 0; i < m_alivePathCount; ++i) {
  89. if ((m_paths[i]->address().family() == path->address().family()) &&
  90. (m_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
  91. (m_paths[i]->address().ipsEqual2(path->address()))) {
  92. // Replace older path if everything is the same except the port number, since NAT/firewall reboots
  93. // and other wacky stuff can change port number assignments.
  94. m_paths[i] = path;
  95. return;
  96. } else if (m_paths[i]->lastIn() >= lastReceiveTimeMax) {
  97. lastReceiveTimeMax = m_paths[i]->lastIn();
  98. newPathIdx = i;
  99. }
  100. }
  101. } else {
  102. newPathIdx = m_alivePathCount++;
  103. }
  104. InetAddress old;
  105. if (m_paths[newPathIdx])
  106. old = m_paths[newPathIdx]->address();
  107. m_paths[newPathIdx] = path;
  108. // Re-prioritize paths to include the new one.
  109. m_prioritizePaths(now);
  110. // Add or update entry in the endpoint cache. If this endpoint
  111. // is already present, its timesSeen count is incremented. Otherwise
  112. // it replaces the lowest ranked entry.
  113. std::sort(m_endpointCache, m_endpointCache + ZT_PEER_ENDPOINT_CACHE_SIZE);
  114. Endpoint thisEndpoint(path->address());
  115. for (unsigned int i = 0;; ++i) {
  116. if (i == (ZT_PEER_ENDPOINT_CACHE_SIZE - 1)) {
  117. m_endpointCache[i].target = thisEndpoint;
  118. m_endpointCache[i].lastSeen = now;
  119. break;
  120. } else if (m_endpointCache[i].target == thisEndpoint) {
  121. m_endpointCache[i].lastSeen = now;
  122. break;
  123. }
  124. }
  125. RR->t->learnedNewPath(tPtr, 0x582fabdd, packetId, m_id, path->address(), old);
  126. } else {
  127. path->sent(now, hello(tPtr, path->localSocket(), path->address(), now));
  128. RR->t->tryingNewPath(tPtr, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id);
  129. }
  130. }
  131. }
  132. }
  133. void Peer::send(void *tPtr, int64_t now, const void *data, unsigned int len) noexcept
  134. {
  135. SharedPtr< Path > via(this->path(now));
  136. if (via) {
  137. via->send(RR, tPtr, data, len, now);
  138. } else {
  139. const SharedPtr< Peer > root(RR->topology->root(now));
  140. if ((root) && (root.ptr() != this)) {
  141. via = root->path(now);
  142. if (via) {
  143. via->send(RR, tPtr, data, len, now);
  144. root->relayed(now, len);
  145. } else {
  146. return;
  147. }
  148. } else {
  149. return;
  150. }
  151. }
  152. sent(now, len);
  153. }
  154. unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atAddress, const int64_t now)
  155. {
  156. Buf outp;
  157. const uint64_t packetId = m_identityKey->nextMessage(RR->identity.address(), m_id.address());
  158. int ii = Protocol::newPacket(outp, packetId, m_id.address(), RR->identity.address(), Protocol::VERB_HELLO);
  159. outp.wI8(ii, ZT_PROTO_VERSION);
  160. outp.wI8(ii, ZEROTIER_VERSION_MAJOR);
  161. outp.wI8(ii, ZEROTIER_VERSION_MINOR);
  162. outp.wI16(ii, ZEROTIER_VERSION_REVISION);
  163. outp.wI64(ii, (uint64_t)now);
  164. outp.wO(ii, RR->identity);
  165. outp.wO(ii, atAddress);
  166. const int ivStart = ii;
  167. outp.wR(ii, 12);
  168. // LEGACY: the six reserved bytes after the IV exist for legacy compatibility with v1.x nodes.
  169. // Once those are dead they'll become just reserved bytes for future use as flags etc.
  170. outp.wI32(ii, 0); // reserved bytes
  171. void *const legacyMoonCountStart = outp.unsafeData + ii;
  172. outp.wI16(ii, 0);
  173. const uint64_t legacySalsaIv = packetId & ZT_CONST_TO_BE_UINT64(0xfffffffffffffff8ULL);
  174. Salsa20(m_identityKey->secret, &legacySalsaIv).crypt12(legacyMoonCountStart, legacyMoonCountStart, 2);
  175. const int cryptSectionStart = ii;
  176. FCV< uint8_t, 4096 > md;
  177. Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, RR->instanceId);
  178. outp.wI16(ii, (uint16_t)md.size());
  179. outp.wB(ii, md.data(), (unsigned int)md.size());
  180. if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check: should be impossible
  181. return 0;
  182. AES::CTR ctr(m_helloCipher);
  183. void *const cryptSection = outp.unsafeData + ii;
  184. ctr.init(outp.unsafeData + ivStart, 0, cryptSection);
  185. ctr.crypt(cryptSection, ii - cryptSectionStart);
  186. ctr.finish();
  187. HMACSHA384(m_helloMacKey, outp.unsafeData, ii, outp.unsafeData + ii);
  188. ii += ZT_HMACSHA384_LEN;
  189. // LEGACY: we also need Poly1305 for v1.x peers.
  190. uint8_t polyKey[ZT_POLY1305_KEY_SIZE], perPacketKey[ZT_SALSA20_KEY_SIZE];
  191. Protocol::salsa2012DeriveKey(m_identityKey->secret, perPacketKey, outp, ii);
  192. Salsa20(perPacketKey, &packetId).crypt12(Utils::ZERO256, polyKey, sizeof(polyKey));
  193. Poly1305 p1305(polyKey);
  194. p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
  195. uint64_t polyMac[2];
  196. p1305.finish(polyMac);
  197. Utils::storeMachineEndian< uint64_t >(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
  198. return (likely(RR->node->putPacket(tPtr, localSocket, atAddress, outp.unsafeData, ii))) ? ii : 0;
  199. }
  200. void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
  201. {
  202. RWMutex::Lock l(m_lock);
  203. // Determine if we need a new ephemeral key pair and if a new HELLO needs
  204. // to be sent. The latter happens every ZT_PEER_HELLO_INTERVAL or if a new
  205. // ephemeral key pair is generated.
  206. bool needHello = false;
  207. if ((m_vProto >= 11) && (((now - m_ephemeralPairTimestamp) >= (ZT_SYMMETRIC_KEY_TTL / 2)) || ((m_ephemeralKeys[0]) && (m_ephemeralKeys[0]->odometer() >= (ZT_SYMMETRIC_KEY_TTL_MESSAGES / 2))))) {
  208. m_ephemeralPair.generate();
  209. needHello = true;
  210. } else if ((now - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
  211. needHello = true;
  212. }
  213. // Prioritize paths and more importantly for here forget dead ones.
  214. m_prioritizePaths(now);
  215. if (m_tryQueue.empty()) {
  216. if (m_alivePathCount == 0) {
  217. // If there are no living paths and nothing in the try queue, try addresses
  218. // from any locator we have on file or that are fetched via the external API
  219. // callback (if one was supplied).
  220. if (m_locator) {
  221. for (Vector< std::pair<Endpoint, SharedPtr< const Locator::EndpointAttributes > > >::const_iterator ep(m_locator->endpoints().begin()); ep != m_locator->endpoints().end(); ++ep) {
  222. if (ep->first.type == ZT_ENDPOINT_TYPE_IP_UDP) {
  223. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, ep->first.ip())) {
  224. int64_t &lt = m_lastTried[ep->first];
  225. if ((now - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
  226. lt = now;
  227. RR->t->tryingNewPath(tPtr, 0x84b22322, m_id, ep->first.ip(), InetAddress::NIL, 0, 0, Identity::NIL);
  228. sent(now, m_sendProbe(tPtr, -1, ep->first.ip(), nullptr, 0, now));
  229. }
  230. }
  231. }
  232. }
  233. }
  234. for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
  235. if ((m_endpointCache[i].lastSeen > 0) && (m_endpointCache[i].target.type == ZT_ENDPOINT_TYPE_IP_UDP)) {
  236. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, m_endpointCache[i].target.ip())) {
  237. int64_t &lt = m_lastTried[m_endpointCache[i].target];
  238. if ((now - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
  239. lt = now;
  240. RR->t->tryingNewPath(tPtr, 0x84b22343, m_id, m_endpointCache[i].target.ip(), InetAddress::NIL, 0, 0, Identity::NIL);
  241. sent(now, m_sendProbe(tPtr, -1, m_endpointCache[i].target.ip(), nullptr, 0, now));
  242. }
  243. }
  244. }
  245. }
  246. InetAddress addr;
  247. if (RR->node->externalPathLookup(tPtr, m_id, -1, addr)) {
  248. if ((addr) && RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, addr)) {
  249. int64_t &lt = m_lastTried[Endpoint(addr)];
  250. if ((now - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
  251. lt = now;
  252. RR->t->tryingNewPath(tPtr, 0x84a10000, m_id, addr, InetAddress::NIL, 0, 0, Identity::NIL);
  253. sent(now, m_sendProbe(tPtr, -1, addr, nullptr, 0, now));
  254. }
  255. }
  256. }
  257. }
  258. } else {
  259. // Attempt up to ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE queued addresses.
  260. // Note that m_lastTried is checked when contact() is called and something
  261. // is added to the try queue, not here.
  262. unsigned int attempts = 0;
  263. for (;;) {
  264. p_TryQueueItem &qi = m_tryQueue.front();
  265. if (qi.target.isInetAddr()) {
  266. // Skip entry if it overlaps with any currently active IP.
  267. for (unsigned int i = 0; i < m_alivePathCount; ++i) {
  268. if (m_paths[i]->address().ipsEqual(qi.target.ip()))
  269. goto discard_queue_item;
  270. }
  271. }
  272. if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
  273. ++attempts;
  274. if (qi.iteration < 0) {
  275. // If iteration is less than zero, try to contact the original address.
  276. // It may be set to a larger negative value to try multiple times such
  277. // as e.g. -3 to try 3 times.
  278. sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), nullptr, 0, now));
  279. ++qi.iteration;
  280. goto requeue_item;
  281. } else if (qi.target.ip().isV4() && (m_alivePathCount == 0)) {
  282. // When iteration reaches zero the queue item is dropped unless it's
  283. // IPv4 and we have no direct paths. In that case some heavier NAT-t
  284. // strategies are attempted.
  285. if (qi.target.ip().port() < 1024) {
  286. // If the source port is privileged, we actually scan every possible
  287. // privileged port in random order slowly over multiple iterations
  288. // of pulse(). This is done in batches of ZT_NAT_T_PORT_SCAN_MAX.
  289. uint16_t ports[ZT_NAT_T_PORT_SCAN_MAX];
  290. unsigned int pn = 0;
  291. while ((pn < ZT_NAT_T_PORT_SCAN_MAX) && (qi.iteration < 1023)) {
  292. const uint16_t p = RR->randomPrivilegedPortOrder[qi.iteration++];
  293. if ((unsigned int)p != qi.target.ip().port())
  294. ports[pn++] = p;
  295. }
  296. if (pn > 0)
  297. sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), ports, pn, now));
  298. if (qi.iteration < 1023)
  299. goto requeue_item;
  300. } else {
  301. // For un-privileged ports we'll try ZT_NAT_T_PORT_SCAN_MAX ports
  302. // beyond the one we were sent to catch some sequentially assigning
  303. // symmetric NATs.
  304. InetAddress tmp(qi.target.ip());
  305. unsigned int p = tmp.port() + 1 + (unsigned int)qi.iteration++;
  306. if (p > 65535)
  307. p -= 64512; // wrap back to 1024
  308. tmp.setPort(p);
  309. sent(now, m_sendProbe(tPtr, -1, tmp, nullptr, 0, now));
  310. if (qi.iteration < ZT_NAT_T_PORT_SCAN_MAX)
  311. goto requeue_item;
  312. }
  313. }
  314. }
  315. // Discard front item unless the code skips to requeue_item.
  316. discard_queue_item:
  317. m_tryQueue.pop_front();
  318. if (attempts >= std::min((unsigned int)m_tryQueue.size(), (unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
  319. break;
  320. else continue;
  321. // If the code skips here the front item is instead moved to the back.
  322. requeue_item:
  323. if (m_tryQueue.size() > 1) // no point in doing this splice if there's only one item
  324. m_tryQueue.splice(m_tryQueue.end(), m_tryQueue, m_tryQueue.begin());
  325. if (attempts >= std::min((unsigned int)m_tryQueue.size(), (unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
  326. break;
  327. else continue;
  328. }
  329. }
  330. // Do keepalive on all currently active paths, sending HELLO to the first
  331. // if needHello is true and sending small keepalives to others.
  332. uint64_t randomJunk = Utils::random();
  333. for (unsigned int i = 0; i < m_alivePathCount; ++i) {
  334. if (needHello) {
  335. needHello = false;
  336. const unsigned int bytes = hello(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), now);
  337. m_paths[i]->sent(now, bytes);
  338. sent(now, bytes);
  339. m_lastSentHello = now;
  340. } else if ((now - m_paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
  341. m_paths[i]->send(RR, tPtr, reinterpret_cast<uint8_t *>(&randomJunk) + (i & 7U), 1, now);
  342. sent(now, 1);
  343. }
  344. }
  345. // Send a HELLO indirectly if we were not able to send one via any direct path.
  346. if (needHello) {
  347. const SharedPtr< Peer > root(RR->topology->root(now));
  348. if (root) {
  349. const SharedPtr< Path > via(root->path(now));
  350. if (via) {
  351. const unsigned int bytes = hello(tPtr, via->localSocket(), via->address(), now);
  352. via->sent(now, bytes);
  353. root->relayed(now, bytes);
  354. sent(now, bytes);
  355. m_lastSentHello = now;
  356. }
  357. }
  358. }
  359. // Clean m_lastTried
  360. for (Map< Endpoint, int64_t >::iterator i(m_lastTried.begin()); i != m_lastTried.end();) {
  361. if ((now - i->second) > (ZT_PATH_MIN_TRY_INTERVAL * 4))
  362. m_lastTried.erase(i++);
  363. else ++i;
  364. }
  365. }
  366. void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep, int tries)
  367. {
  368. static uint8_t foo = 0;
  369. RWMutex::Lock l(m_lock);
  370. // See if there's already a path to this endpoint and if so ignore it.
  371. if (ep.isInetAddr()) {
  372. if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL)
  373. m_prioritizePaths(now);
  374. for (unsigned int i = 0; i < m_alivePathCount; ++i) {
  375. if (m_paths[i]->address().ipsEqual(ep.ip()))
  376. return;
  377. }
  378. }
  379. // Check underlying path attempt rate limit.
  380. int64_t &lt = m_lastTried[ep];
  381. if ((now - lt) < ZT_PATH_MIN_TRY_INTERVAL)
  382. return;
  383. lt = now;
  384. // For IPv4 addresses we send a tiny packet with a low TTL, which helps to
  385. // traverse some NAT types. It has no effect otherwise.
  386. if (ep.isInetAddr() && ep.ip().isV4()) {
  387. ++foo;
  388. RR->node->putPacket(tPtr, -1, ep.ip(), &foo, 1, 2);
  389. }
  390. // Make sure address is not already in the try queue. If so just update it.
  391. for (List< p_TryQueueItem >::iterator i(m_tryQueue.begin()); i != m_tryQueue.end(); ++i) {
  392. if (i->target.isSameAddress(ep)) {
  393. i->target = ep;
  394. i->iteration = -tries;
  395. return;
  396. }
  397. }
  398. m_tryQueue.push_back(p_TryQueueItem(ep, -tries));
  399. }
  400. void Peer::resetWithinScope(void *tPtr, InetAddress::IpScope scope, int inetAddressFamily, int64_t now)
  401. {
  402. RWMutex::Lock l(m_lock);
  403. unsigned int pc = 0;
  404. for (unsigned int i = 0; i < m_alivePathCount; ++i) {
  405. if ((m_paths[i]) && ((m_paths[i]->address().family() == inetAddressFamily) && (m_paths[i]->address().ipScope() == scope))) {
  406. const unsigned int bytes = m_sendProbe(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), nullptr, 0, now);
  407. m_paths[i]->sent(now, bytes);
  408. sent(now, bytes);
  409. } else if (pc != i) {
  410. m_paths[pc++] = m_paths[i];
  411. }
  412. }
  413. m_alivePathCount = pc;
  414. while (pc < ZT_MAX_PEER_NETWORK_PATHS)
  415. m_paths[pc++].zero();
  416. }
  417. bool Peer::directlyConnected(int64_t now)
  418. {
  419. if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  420. RWMutex::Lock l(m_lock);
  421. m_prioritizePaths(now);
  422. return m_alivePathCount > 0;
  423. } else {
  424. RWMutex::RLock l(m_lock);
  425. return m_alivePathCount > 0;
  426. }
  427. }
  428. void Peer::getAllPaths(Vector< SharedPtr< Path > > &paths)
  429. {
  430. RWMutex::RLock l(m_lock);
  431. paths.clear();
  432. paths.reserve(m_alivePathCount);
  433. paths.assign(m_paths, m_paths + m_alivePathCount);
  434. }
  435. void Peer::save(void *tPtr) const
  436. {
  437. uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
  438. // Prefix each saved peer with the current timestamp.
  439. Utils::storeBigEndian< uint64_t >(buf, (uint64_t)RR->node->now());
  440. const int len = marshal(buf + 8);
  441. if (len > 0) {
  442. uint64_t id[2];
  443. id[0] = m_id.address().toInt();
  444. id[1] = 0;
  445. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_PEER, id, buf, (unsigned int)len + 8);
  446. }
  447. }
  448. int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
  449. {
  450. RWMutex::RLock l(m_lock);
  451. if (!m_identityKey)
  452. return -1;
  453. data[0] = 16; // serialized peer version
  454. // Include our identity's address to detect if this changes and require
  455. // recomputation of m_identityKey.
  456. RR->identity.address().copyTo(data + 1);
  457. // SECURITY: encryption in place is only to protect secrets if they are
  458. // cached to local storage. It's not used over the wire. Dumb ECB is fine
  459. // because secret keys are random and have no structure to reveal.
  460. RR->localCacheSymmetric.encrypt(m_identityKey->secret, data + 1 + ZT_ADDRESS_LENGTH);
  461. RR->localCacheSymmetric.encrypt(m_identityKey->secret + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
  462. RR->localCacheSymmetric.encrypt(m_identityKey->secret + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
  463. int p = 1 + ZT_ADDRESS_LENGTH + 48;
  464. int s = m_id.marshal(data + p, false);
  465. if (s < 0)
  466. return -1;
  467. p += s;
  468. if (m_locator) {
  469. data[p++] = 1;
  470. s = m_locator->marshal(data + p);
  471. if (s <= 0)
  472. return s;
  473. p += s;
  474. } else {
  475. data[p++] = 0;
  476. }
  477. unsigned int cachedEndpointCount = 0;
  478. for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
  479. if (m_endpointCache[i].lastSeen > 0)
  480. ++cachedEndpointCount;
  481. }
  482. Utils::storeBigEndian(data + p, (uint16_t)cachedEndpointCount);
  483. p += 2;
  484. for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
  485. Utils::storeBigEndian(data + p, (uint64_t)m_endpointCache[i].lastSeen);
  486. s = m_endpointCache[i].target.marshal(data + p);
  487. if (s <= 0)
  488. return -1;
  489. p += s;
  490. }
  491. Utils::storeBigEndian(data + p, (uint16_t)m_vProto);
  492. p += 2;
  493. Utils::storeBigEndian(data + p, (uint16_t)m_vMajor);
  494. p += 2;
  495. Utils::storeBigEndian(data + p, (uint16_t)m_vMinor);
  496. p += 2;
  497. Utils::storeBigEndian(data + p, (uint16_t)m_vRevision);
  498. p += 2;
  499. data[p++] = 0;
  500. data[p++] = 0;
  501. return p;
  502. }
  503. int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
  504. {
  505. RWMutex::Lock l(m_lock);
  506. if ((len <= (1 + ZT_ADDRESS_LENGTH + 48)) || (data[0] != 16))
  507. return -1;
  508. m_identityKey.zero();
  509. m_ephemeralKeys[0].zero();
  510. m_ephemeralKeys[1].zero();
  511. if (Address(data + 1) == RR->identity.address()) {
  512. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  513. static_assert(ZT_SYMMETRIC_KEY_SIZE == 48, "marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
  514. RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH, k);
  515. RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 16, k + 16);
  516. RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 32, k + 32);
  517. m_identityKey.set(new SymmetricKey(RR->node->now(), k));
  518. Utils::burn(k, sizeof(k));
  519. }
  520. int p = 1 + ZT_ADDRESS_LENGTH + 48;
  521. int s = m_id.unmarshal(data + p, len - p);
  522. if (s < 0)
  523. return s;
  524. p += s;
  525. if (!m_identityKey) {
  526. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  527. if (!RR->identity.agree(m_id, k))
  528. return -1;
  529. m_identityKey.set(new SymmetricKey(RR->node->now(), k));
  530. Utils::burn(k, sizeof(k));
  531. }
  532. if (p >= len)
  533. return -1;
  534. if (data[p] == 0) {
  535. ++p;
  536. m_locator.zero();
  537. } else if (data[p] == 1) {
  538. ++p;
  539. Locator *const loc = new Locator();
  540. s = loc->unmarshal(data + p, len - p);
  541. m_locator.set(loc);
  542. if (s < 0)
  543. return s;
  544. p += s;
  545. } else {
  546. return -1;
  547. }
  548. const unsigned int cachedEndpointCount = Utils::loadBigEndian< uint16_t >(data + p);
  549. p += 2;
  550. for (unsigned int i = 0; i < cachedEndpointCount; ++i) {
  551. if (i < ZT_PEER_ENDPOINT_CACHE_SIZE) {
  552. if ((p + 8) >= len)
  553. return -1;
  554. m_endpointCache[i].lastSeen = (int64_t)Utils::loadBigEndian< uint64_t >(data + p);
  555. p += 8;
  556. s = m_endpointCache[i].target.unmarshal(data + p, len - p);
  557. if (s <= 0)
  558. return -1;
  559. p += s;
  560. }
  561. }
  562. if ((p + 10) > len)
  563. return -1;
  564. m_vProto = Utils::loadBigEndian< uint16_t >(data + p);
  565. p += 2;
  566. m_vMajor = Utils::loadBigEndian< uint16_t >(data + p);
  567. p += 2;
  568. m_vMinor = Utils::loadBigEndian< uint16_t >(data + p);
  569. p += 2;
  570. m_vRevision = Utils::loadBigEndian< uint16_t >(data + p);
  571. p += 2;
  572. p += 2 + (int)Utils::loadBigEndian< uint16_t >(data + p);
  573. m_deriveSecondaryIdentityKeys();
  574. return (p > len) ? -1 : p;
  575. }
  576. struct _PathPriorityComparisonOperator
  577. {
  578. ZT_INLINE bool operator()(const SharedPtr< Path > &a, const SharedPtr< Path > &b) const noexcept
  579. {
  580. // Sort in descending order of most recent receive time.
  581. return (a->lastIn() > b->lastIn());
  582. }
  583. };
  584. void Peer::m_prioritizePaths(int64_t now)
  585. {
  586. // assumes _lock is locked for writing
  587. m_lastPrioritizedPaths = now;
  588. if (m_alivePathCount > 0) {
  589. // Sort paths in descending order of priority.
  590. std::sort(m_paths, m_paths + m_alivePathCount, _PathPriorityComparisonOperator());
  591. // Let go of paths that have expired.
  592. for (unsigned int i = 0; i < ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  593. if ((!m_paths[i]) || (!m_paths[i]->alive(now))) {
  594. m_alivePathCount = i;
  595. for (; i < ZT_MAX_PEER_NETWORK_PATHS; ++i)
  596. m_paths[i].zero();
  597. break;
  598. }
  599. }
  600. }
  601. }
  602. unsigned int Peer::m_sendProbe(void *tPtr, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, const unsigned int numPorts, int64_t now)
  603. {
  604. // Assumes m_lock is locked
  605. const SharedPtr< SymmetricKey > k(m_key());
  606. const uint64_t packetId = k->nextMessage(RR->identity.address(), m_id.address());
  607. uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH];
  608. Utils::storeMachineEndian< uint64_t >(p + ZT_PROTO_PACKET_ID_INDEX, packetId);
  609. m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
  610. RR->identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
  611. p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
  612. p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_ECHO;
  613. Protocol::armor(p, ZT_PROTO_MIN_PACKET_LENGTH, k, cipher());
  614. RR->expect->sending(packetId, now);
  615. if (numPorts > 0) {
  616. InetAddress tmp(atAddress);
  617. for (unsigned int i = 0; i < numPorts; ++i) {
  618. tmp.setPort(ports[i]);
  619. RR->node->putPacket(tPtr, -1, tmp, p, ZT_PROTO_MIN_PACKET_LENGTH);
  620. }
  621. return ZT_PROTO_MIN_PACKET_LENGTH * numPorts;
  622. } else {
  623. RR->node->putPacket(tPtr, -1, atAddress, p, ZT_PROTO_MIN_PACKET_LENGTH);
  624. return ZT_PROTO_MIN_PACKET_LENGTH;
  625. }
  626. }
  627. void Peer::m_deriveSecondaryIdentityKeys() noexcept
  628. {
  629. uint8_t hk[ZT_SYMMETRIC_KEY_SIZE];
  630. KBKDFHMACSHA384(m_identityKey->secret, ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0, hk);
  631. m_helloCipher.init(hk);
  632. Utils::burn(hk, sizeof(hk));
  633. KBKDFHMACSHA384(m_identityKey->secret, ZT_KBKDF_LABEL_PACKET_HMAC, 0, 0, m_helloMacKey);
  634. }
  635. } // namespace ZeroTier