Peer.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "RuntimeEnvironment.hpp"
  15. #include "Trace.hpp"
  16. #include "Peer.hpp"
  17. #include "Topology.hpp"
  18. #include "SelfAwareness.hpp"
  19. #include "InetAddress.hpp"
  20. #include "Protocol.hpp"
  21. #include "Endpoint.hpp"
  22. #include "Expect.hpp"
  23. namespace ZeroTier {
  24. Peer::Peer(const RuntimeEnvironment *renv) :
  25. RR(renv),
  26. m_ephemeralPairTimestamp(0),
  27. m_lastReceive(0),
  28. m_lastSend(0),
  29. m_lastSentHello(),
  30. m_lastWhoisRequestReceived(0),
  31. m_lastEchoRequestReceived(0),
  32. m_lastPrioritizedPaths(0),
  33. m_lastProbeReceived(0),
  34. m_alivePathCount(0),
  35. m_tryQueue(),
  36. m_vProto(0),
  37. m_vMajor(0),
  38. m_vMinor(0),
  39. m_vRevision(0)
  40. {
  41. }
  42. Peer::~Peer()
  43. {
  44. Utils::burn(m_helloMacKey, sizeof(m_helloMacKey));
  45. }
  46. bool Peer::init(const Identity &peerIdentity)
  47. {
  48. RWMutex::Lock l(m_lock);
  49. if (m_id) // already initialized sanity check
  50. return false;
  51. m_id = peerIdentity;
  52. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  53. if (!RR->identity.agree(peerIdentity, k))
  54. return false;
  55. m_identityKey.set(new SymmetricKey(RR->node->now(), k));
  56. Utils::burn(k, sizeof(k));
  57. m_deriveSecondaryIdentityKeys();
  58. return true;
  59. }
  60. void Peer::received(
  61. void *tPtr,
  62. const SharedPtr<Path> &path,
  63. const unsigned int hops,
  64. const uint64_t packetId,
  65. const unsigned int payloadLength,
  66. const Protocol::Verb verb,
  67. const Protocol::Verb inReVerb)
  68. {
  69. const int64_t now = RR->node->now();
  70. m_lastReceive = now;
  71. m_inMeter.log(now, payloadLength);
  72. if (hops == 0) {
  73. RWMutex::RMaybeWLock l(m_lock);
  74. // If this matches an existing path, skip path learning stuff. For the small number
  75. // of paths a peer will have linear scan is the fastest way to do lookup.
  76. for (unsigned int i = 0;i < m_alivePathCount;++i) {
  77. if (m_paths[i] == path)
  78. return;
  79. }
  80. // If we made it here, we don't already know this path.
  81. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, path->localSocket(), path->address())) {
  82. // SECURITY: note that if we've made it here we expected this OK, see Expect.hpp.
  83. // There is replay protection in effect for OK responses.
  84. if (verb == Protocol::VERB_OK) {
  85. // If we're learning a new path convert the lock to an exclusive write lock.
  86. l.writing();
  87. // If the path list is full, replace the least recently active path. Otherwise append new path.
  88. unsigned int newPathIdx = 0;
  89. if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
  90. int64_t lastReceiveTimeMax = 0;
  91. for (unsigned int i = 0;i < m_alivePathCount;++i) {
  92. if ((m_paths[i]->address().family() == path->address().family()) &&
  93. (m_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
  94. (m_paths[i]->address().ipsEqual2(path->address()))) {
  95. // Replace older path if everything is the same except the port number, since NAT/firewall reboots
  96. // and other wacky stuff can change port number assignments.
  97. m_paths[i] = path;
  98. return;
  99. } else if (m_paths[i]->lastIn() >= lastReceiveTimeMax) {
  100. lastReceiveTimeMax = m_paths[i]->lastIn();
  101. newPathIdx = i;
  102. }
  103. }
  104. } else {
  105. newPathIdx = m_alivePathCount++;
  106. }
  107. InetAddress old;
  108. if (m_paths[newPathIdx])
  109. old = m_paths[newPathIdx]->address();
  110. m_paths[newPathIdx] = path;
  111. // Re-prioritize paths to include the new one.
  112. m_prioritizePaths(now);
  113. RR->t->learnedNewPath(tPtr, 0x582fabdd, packetId, m_id, path->address(), old);
  114. } else {
  115. path->sent(now, hello(tPtr, path->localSocket(), path->address(), now));
  116. RR->t->tryingNewPath(tPtr, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t) verb, m_id);
  117. }
  118. }
  119. }
  120. }
  121. void Peer::send(void *tPtr, int64_t now, const void *data, unsigned int len) noexcept
  122. {
  123. SharedPtr<Path> via(this->path(now));
  124. if (via) {
  125. via->send(RR, tPtr, data, len, now);
  126. } else {
  127. const SharedPtr<Peer> root(RR->topology->root());
  128. if ((root) && (root.ptr() != this)) {
  129. via = root->path(now);
  130. if (via) {
  131. via->send(RR, tPtr, data, len, now);
  132. root->relayed(now, len);
  133. } else {
  134. return;
  135. }
  136. } else {
  137. return;
  138. }
  139. }
  140. sent(now, len);
  141. }
  142. unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atAddress, const int64_t now)
  143. {
  144. Buf outp;
  145. const uint64_t packetId = m_identityKey->nextMessage(RR->identity.address(), m_id.address());
  146. int ii = Protocol::newPacket(outp, packetId, m_id.address(), RR->identity.address(), Protocol::VERB_HELLO);
  147. outp.wI8(ii, ZT_PROTO_VERSION);
  148. outp.wI8(ii, ZEROTIER_VERSION_MAJOR);
  149. outp.wI8(ii, ZEROTIER_VERSION_MINOR);
  150. outp.wI16(ii, ZEROTIER_VERSION_REVISION);
  151. outp.wI64(ii, (uint64_t) now);
  152. outp.wO(ii, RR->identity);
  153. outp.wO(ii, atAddress);
  154. const int ivStart = ii;
  155. outp.wR(ii, 12);
  156. // LEGACY: the six reserved bytes after the IV exist for legacy compatibility with v1.x nodes.
  157. // Once those are dead they'll become just reserved bytes for future use as flags etc.
  158. outp.wI32(ii, 0); // reserved bytes
  159. void *const legacyMoonCountStart = outp.unsafeData + ii;
  160. outp.wI16(ii, 0);
  161. const uint64_t legacySalsaIv = packetId & ZT_CONST_TO_BE_UINT64(0xfffffffffffffff8ULL);
  162. Salsa20(m_identityKey->secret, &legacySalsaIv).crypt12(legacyMoonCountStart, legacyMoonCountStart, 2);
  163. const int cryptSectionStart = ii;
  164. FCV<uint8_t, 4096> md;
  165. Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, RR->instanceId);
  166. outp.wI16(ii, (uint16_t) md.size());
  167. outp.wB(ii, md.data(), (unsigned int) md.size());
  168. if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check: should be impossible
  169. return 0;
  170. AES::CTR ctr(m_helloCipher);
  171. void *const cryptSection = outp.unsafeData + ii;
  172. ctr.init(outp.unsafeData + ivStart, 0, cryptSection);
  173. ctr.crypt(cryptSection, ii - cryptSectionStart);
  174. ctr.finish();
  175. HMACSHA384(m_helloMacKey, outp.unsafeData, ii, outp.unsafeData + ii);
  176. ii += ZT_HMACSHA384_LEN;
  177. // LEGACY: we also need Poly1305 for v1.x peers.
  178. uint8_t polyKey[ZT_POLY1305_KEY_SIZE], perPacketKey[ZT_SALSA20_KEY_SIZE];
  179. Protocol::salsa2012DeriveKey(m_identityKey->secret, perPacketKey, outp, ii);
  180. Salsa20(perPacketKey, &packetId).crypt12(Utils::ZERO256, polyKey, sizeof(polyKey));
  181. Poly1305 p1305(polyKey);
  182. p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
  183. uint64_t polyMac[2];
  184. p1305.finish(polyMac);
  185. Utils::storeAsIsEndian<uint64_t>(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
  186. if (likely(RR->node->putPacket(tPtr, localSocket, atAddress, outp.unsafeData, ii)))
  187. return ii;
  188. return 0;
  189. }
  190. void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
  191. {
  192. RWMutex::Lock l(m_lock);
  193. // Determine if we need a new ephemeral key pair and if a new HELLO needs
  194. // to be sent. The latter happens every ZT_PEER_HELLO_INTERVAL or if a new
  195. // ephemeral key pair is generated.
  196. bool needHello = false;
  197. if ((m_vProto >= 11) && (((now - m_ephemeralPairTimestamp) >= (ZT_SYMMETRIC_KEY_TTL / 2)) || ((m_ephemeralKeys[0]) && (m_ephemeralKeys[0]->odometer() >= (ZT_SYMMETRIC_KEY_TTL_MESSAGES / 2))))) {
  198. m_ephemeralPair.generate();
  199. needHello = true;
  200. } else if ((now - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
  201. needHello = true;
  202. }
  203. // Prioritize paths and more importantly for here forget dead ones.
  204. m_prioritizePaths(now);
  205. if (m_tryQueue.empty()) {
  206. if (m_alivePathCount == 0) {
  207. // If there are no living paths and nothing in the try queue, try addresses
  208. // from any locator we have on file or that are fetched via the external API
  209. // callback (if one was supplied).
  210. if (m_locator) {
  211. for (Vector<Endpoint>::const_iterator ep(m_locator->endpoints().begin());ep != m_locator->endpoints().end();++ep) {
  212. if (ep->type == ZT_ENDPOINT_TYPE_IP_UDP) {
  213. RR->t->tryingNewPath(tPtr, 0x84b22322, m_id, ep->ip(), InetAddress::NIL, 0, 0, Identity::NIL);
  214. sent(now, m_sendProbe(tPtr, -1, ep->ip(), nullptr, 0, now));
  215. }
  216. }
  217. }
  218. InetAddress addr;
  219. if (RR->node->externalPathLookup(tPtr, m_id, -1, addr)) {
  220. if ((addr) && (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, addr))) {
  221. RR->t->tryingNewPath(tPtr, 0x84a10000, m_id, addr, InetAddress::NIL, 0, 0, Identity::NIL);
  222. sent(now, m_sendProbe(tPtr, -1, addr, nullptr, 0, now));
  223. }
  224. }
  225. }
  226. } else {
  227. // Attempt up to ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE queued addresses.
  228. unsigned int attempts = 0;
  229. do {
  230. p_TryQueueItem &qi = m_tryQueue.front();
  231. if (qi.target.isInetAddr()) {
  232. // Skip entry if it overlaps with any currently active IP.
  233. for (unsigned int i = 0;i < m_alivePathCount;++i) {
  234. if (m_paths[i]->address().ipsEqual(qi.target.ip()))
  235. goto next_tryQueue_item;
  236. }
  237. }
  238. if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
  239. ++attempts;
  240. if (qi.privilegedPortTrialIteration < 0) {
  241. sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), nullptr, 0, now));
  242. if ((qi.target.ip().isV4()) && (qi.target.ip().port() < 1024)) {
  243. qi.privilegedPortTrialIteration = 0;
  244. if (m_tryQueue.size() > 1)
  245. m_tryQueue.splice(m_tryQueue.end(),m_tryQueue,m_tryQueue.begin());
  246. continue;
  247. } // else goto next_tryQueue_item;
  248. } else if (qi.privilegedPortTrialIteration < 1023) {
  249. uint16_t ports[ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE];
  250. unsigned int pn = 0;
  251. while ((pn < ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE) && (qi.privilegedPortTrialIteration < 1023)) {
  252. const uint16_t p = RR->randomPrivilegedPortOrder[qi.privilegedPortTrialIteration++];
  253. if ((unsigned int)p != qi.target.ip().port())
  254. ports[pn++] = p;
  255. }
  256. sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), ports, pn, now));
  257. if (qi.privilegedPortTrialIteration < 1023) {
  258. if (m_tryQueue.size() > 1)
  259. m_tryQueue.splice(m_tryQueue.end(),m_tryQueue,m_tryQueue.begin());
  260. continue;
  261. } // else goto next_tryQueue_item;
  262. }
  263. }
  264. next_tryQueue_item:
  265. m_tryQueue.pop_front();
  266. } while ((attempts < ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE) && (!m_tryQueue.empty()));
  267. }
  268. // Do keepalive on all currently active paths, sending HELLO to the first
  269. // if needHello is true and sending small keepalives to others.
  270. uint64_t randomJunk = Utils::random();
  271. for (unsigned int i = 0;i < m_alivePathCount;++i) {
  272. if (needHello) {
  273. needHello = false;
  274. const unsigned int bytes = hello(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), now);
  275. m_paths[i]->sent(now, bytes);
  276. sent(now, bytes);
  277. m_lastSentHello = now;
  278. } else if ((now - m_paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
  279. m_paths[i]->send(RR, tPtr, reinterpret_cast<uint8_t *>(&randomJunk) + (i & 7U), 1, now);
  280. sent(now, 1);
  281. }
  282. }
  283. // Send a HELLO indirectly if we were not able to send one via any direct path.
  284. if (needHello) {
  285. const SharedPtr<Peer> root(RR->topology->root());
  286. if (root) {
  287. const SharedPtr<Path> via(root->path(now));
  288. if (via) {
  289. const unsigned int bytes = hello(tPtr, via->localSocket(), via->address(), now);
  290. via->sent(now, bytes);
  291. root->relayed(now, bytes);
  292. sent(now, bytes);
  293. m_lastSentHello = now;
  294. }
  295. }
  296. }
  297. }
  298. void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep)
  299. {
  300. static uint8_t foo = 0;
  301. RWMutex::Lock l(m_lock);
  302. // See if there's already a path to this endpoint and if so ignore it.
  303. if (ep.isInetAddr()) {
  304. if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL)
  305. m_prioritizePaths(now);
  306. for (unsigned int i = 0;i < m_alivePathCount;++i) {
  307. if (m_paths[i]->address().ipsEqual(ep.ip()))
  308. return;
  309. }
  310. }
  311. // For IPv4 addresses we send a tiny packet with a low TTL, which helps to
  312. // traverse some NAT types. It has no effect otherwise.
  313. if (ep.isInetAddr() && ep.ip().isV4()) {
  314. ++foo;
  315. RR->node->putPacket(tPtr, -1, ep.ip(), &foo, 1, 2);
  316. }
  317. // Make sure address is not already in the try queue. If so just update it.
  318. for (List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i != m_tryQueue.end();++i) {
  319. if (i->target.isSameAddress(ep)) {
  320. i->target = ep;
  321. i->privilegedPortTrialIteration = -1;
  322. return;
  323. }
  324. }
  325. m_tryQueue.push_back(p_TryQueueItem(ep));
  326. }
  327. void Peer::resetWithinScope(void *tPtr, InetAddress::IpScope scope, int inetAddressFamily, int64_t now)
  328. {
  329. RWMutex::Lock l(m_lock);
  330. unsigned int pc = 0;
  331. for (unsigned int i = 0;i < m_alivePathCount;++i) {
  332. if ((m_paths[i]) && ((m_paths[i]->address().family() == inetAddressFamily) && (m_paths[i]->address().ipScope() == scope))) {
  333. const unsigned int bytes = m_sendProbe(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), nullptr, 0, now);
  334. m_paths[i]->sent(now, bytes);
  335. sent(now, bytes);
  336. } else if (pc != i) {
  337. m_paths[pc++] = m_paths[i];
  338. }
  339. }
  340. m_alivePathCount = pc;
  341. while (pc < ZT_MAX_PEER_NETWORK_PATHS)
  342. m_paths[pc++].zero();
  343. }
  344. bool Peer::directlyConnected(int64_t now)
  345. {
  346. if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  347. RWMutex::Lock l(m_lock);
  348. m_prioritizePaths(now);
  349. return m_alivePathCount > 0;
  350. } else {
  351. RWMutex::RLock l(m_lock);
  352. return m_alivePathCount > 0;
  353. }
  354. }
  355. void Peer::getAllPaths(Vector<SharedPtr<Path> > &paths)
  356. {
  357. RWMutex::RLock l(m_lock);
  358. paths.clear();
  359. paths.reserve(m_alivePathCount);
  360. paths.assign(m_paths, m_paths + m_alivePathCount);
  361. }
  362. void Peer::save(void *tPtr) const
  363. {
  364. uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
  365. // Prefix each saved peer with the current timestamp.
  366. Utils::storeBigEndian<uint64_t>(buf, (uint64_t) RR->node->now());
  367. const int len = marshal(buf + 8);
  368. if (len > 0) {
  369. uint64_t id[2];
  370. id[0] = m_id.address().toInt();
  371. id[1] = 0;
  372. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_PEER, id, buf, (unsigned int) len + 8);
  373. }
  374. }
  375. int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
  376. {
  377. RWMutex::RLock l(m_lock);
  378. if (!m_identityKey)
  379. return -1;
  380. data[0] = 0; // serialized peer version
  381. // Include our identity's address to detect if this changes and require
  382. // recomputation of m_identityKey.
  383. RR->identity.address().copyTo(data + 1);
  384. // SECURITY: encryption in place is only to protect secrets if they are
  385. // cached to local storage. It's not used over the wire. Dumb ECB is fine
  386. // because secret keys are random and have no structure to reveal.
  387. RR->localCacheSymmetric.encrypt(m_identityKey->secret, data + 6);
  388. RR->localCacheSymmetric.encrypt(m_identityKey->secret + 22, data + 17);
  389. RR->localCacheSymmetric.encrypt(m_identityKey->secret + 38, data + 33);
  390. int p = 54;
  391. int s = m_id.marshal(data + p, false);
  392. if (s < 0)
  393. return -1;
  394. p += s;
  395. if (m_locator) {
  396. data[p++] = 1;
  397. s = m_locator->marshal(data + p);
  398. if (s <= 0)
  399. return s;
  400. p += s;
  401. } else {
  402. data[p++] = 0;
  403. }
  404. Utils::storeBigEndian(data + p, (uint16_t) m_vProto);
  405. p += 2;
  406. Utils::storeBigEndian(data + p, (uint16_t) m_vMajor);
  407. p += 2;
  408. Utils::storeBigEndian(data + p, (uint16_t) m_vMinor);
  409. p += 2;
  410. Utils::storeBigEndian(data + p, (uint16_t) m_vRevision);
  411. p += 2;
  412. data[p++] = 0;
  413. data[p++] = 0;
  414. return p;
  415. }
  416. int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
  417. {
  418. RWMutex::Lock l(m_lock);
  419. if ((len <= 54) || (data[0] != 0))
  420. return -1;
  421. m_identityKey.zero();
  422. m_ephemeralKeys[0].zero();
  423. m_ephemeralKeys[1].zero();
  424. if (Address(data + 1) == RR->identity.address()) {
  425. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  426. static_assert(ZT_SYMMETRIC_KEY_SIZE == 48, "marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
  427. RR->localCacheSymmetric.decrypt(data + 1, k);
  428. RR->localCacheSymmetric.decrypt(data + 17, k + 16);
  429. RR->localCacheSymmetric.decrypt(data + 33, k + 32);
  430. m_identityKey.set(new SymmetricKey(RR->node->now(), k));
  431. Utils::burn(k, sizeof(k));
  432. }
  433. int p = 49;
  434. int s = m_id.unmarshal(data + 38, len - 38);
  435. if (s < 0)
  436. return s;
  437. p += s;
  438. if (!m_identityKey) {
  439. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  440. if (!RR->identity.agree(m_id, k))
  441. return -1;
  442. m_identityKey.set(new SymmetricKey(RR->node->now(), k));
  443. Utils::burn(k, sizeof(k));
  444. }
  445. if (data[p] == 0) {
  446. ++p;
  447. m_locator.zero();
  448. } else if (data[p] == 1) {
  449. ++p;
  450. Locator *const loc = new Locator();
  451. s = loc->unmarshal(data + p, len - p);
  452. m_locator.set(loc);
  453. if (s < 0)
  454. return s;
  455. p += s;
  456. } else {
  457. return -1;
  458. }
  459. if ((p + 10) > len)
  460. return -1;
  461. m_vProto = Utils::loadBigEndian<uint16_t>(data + p);
  462. p += 2;
  463. m_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
  464. p += 2;
  465. m_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
  466. p += 2;
  467. m_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
  468. p += 2;
  469. p += 2 + (int) Utils::loadBigEndian<uint16_t>(data + p);
  470. m_deriveSecondaryIdentityKeys();
  471. return (p > len) ? -1 : p;
  472. }
  473. struct _PathPriorityComparisonOperator
  474. {
  475. ZT_INLINE bool operator()(const SharedPtr<Path> &a, const SharedPtr<Path> &b) const noexcept
  476. {
  477. // Sort in descending order of most recent receive time.
  478. return (a->lastIn() > b->lastIn());
  479. }
  480. };
  481. void Peer::m_prioritizePaths(int64_t now)
  482. {
  483. // assumes _lock is locked for writing
  484. m_lastPrioritizedPaths = now;
  485. if (m_alivePathCount > 0) {
  486. // Sort paths in descending order of priority.
  487. std::sort(m_paths, m_paths + m_alivePathCount, _PathPriorityComparisonOperator());
  488. // Let go of paths that have expired.
  489. for (unsigned int i = 0;i < ZT_MAX_PEER_NETWORK_PATHS;++i) {
  490. if ((!m_paths[i]) || (!m_paths[i]->alive(now))) {
  491. m_alivePathCount = i;
  492. for (;i < ZT_MAX_PEER_NETWORK_PATHS;++i)
  493. m_paths[i].zero();
  494. break;
  495. }
  496. }
  497. }
  498. }
  499. unsigned int Peer::m_sendProbe(void *tPtr, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, const unsigned int numPorts, int64_t now)
  500. {
  501. // Assumes m_lock is locked
  502. const SharedPtr<SymmetricKey> k(m_key());
  503. const uint64_t packetId = k->nextMessage(RR->identity.address(), m_id.address());
  504. uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH];
  505. Utils::storeAsIsEndian<uint64_t>(p + ZT_PROTO_PACKET_ID_INDEX, packetId);
  506. m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
  507. RR->identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
  508. p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
  509. p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_ECHO;
  510. Protocol::armor(p, ZT_PROTO_MIN_PACKET_LENGTH, k, cipher());
  511. RR->expect->sending(packetId, now);
  512. if (numPorts > 0) {
  513. InetAddress tmp(atAddress);
  514. for (unsigned int i = 0;i < numPorts;++i) {
  515. tmp.setPort(ports[i]);
  516. RR->node->putPacket(tPtr, -1, tmp, p, ZT_PROTO_MIN_PACKET_LENGTH);
  517. }
  518. return ZT_PROTO_MIN_PACKET_LENGTH * numPorts;
  519. } else {
  520. RR->node->putPacket(tPtr, -1, atAddress, p, ZT_PROTO_MIN_PACKET_LENGTH);
  521. return ZT_PROTO_MIN_PACKET_LENGTH;
  522. }
  523. }
  524. void Peer::m_deriveSecondaryIdentityKeys() noexcept
  525. {
  526. uint8_t hk[ZT_SYMMETRIC_KEY_SIZE];
  527. KBKDFHMACSHA384(m_identityKey->secret, ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0, hk);
  528. m_helloCipher.init(hk);
  529. Utils::burn(hk, sizeof(hk));
  530. KBKDFHMACSHA384(m_identityKey->secret, ZT_KBKDF_LABEL_PACKET_HMAC, 0, 0, m_helloMacKey);
  531. }
  532. } // namespace ZeroTier