Peer.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "RuntimeEnvironment.hpp"
  15. #include "Trace.hpp"
  16. #include "Peer.hpp"
  17. #include "Topology.hpp"
  18. #include "SelfAwareness.hpp"
  19. #include "InetAddress.hpp"
  20. #include "Protocol.hpp"
  21. #include "Endpoint.hpp"
  22. #include "Expect.hpp"
  23. namespace ZeroTier {
  24. Peer::Peer(const RuntimeEnvironment *renv) :
  25. RR(renv),
  26. m_ephemeralPairTimestamp(0),
  27. m_lastReceive(0),
  28. m_lastSend(0),
  29. m_lastSentHello(),
  30. m_lastWhoisRequestReceived(0),
  31. m_lastEchoRequestReceived(0),
  32. m_lastPrioritizedPaths(0),
  33. m_lastProbeReceived(0),
  34. m_alivePathCount(0),
  35. m_tryQueue(),
  36. m_tryQueuePtr(m_tryQueue.end()),
  37. m_vProto(0),
  38. m_vMajor(0),
  39. m_vMinor(0),
  40. m_vRevision(0)
  41. {
  42. }
  43. Peer::~Peer()
  44. {
  45. Utils::burn(m_helloMacKey,sizeof(m_helloMacKey));
  46. }
  47. bool Peer::init(const Identity &peerIdentity)
  48. {
  49. RWMutex::Lock l(m_lock);
  50. if (m_id) // already initialized sanity check
  51. return false;
  52. m_id = peerIdentity;
  53. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  54. if (!RR->identity.agree(peerIdentity,k))
  55. return false;
  56. m_identityKey.set(new SymmetricKey(RR->node->now(),k));
  57. Utils::burn(k,sizeof(k));
  58. m_deriveSecondaryIdentityKeys();
  59. return true;
  60. }
  61. void Peer::received(
  62. void *tPtr,
  63. const SharedPtr<Path> &path,
  64. const unsigned int hops,
  65. const uint64_t packetId,
  66. const unsigned int payloadLength,
  67. const Protocol::Verb verb,
  68. const Protocol::Verb inReVerb)
  69. {
  70. const int64_t now = RR->node->now();
  71. m_lastReceive = now;
  72. m_inMeter.log(now,payloadLength);
  73. if (hops == 0) {
  74. RWMutex::RMaybeWLock l(m_lock);
  75. // If this matches an existing path, skip path learning stuff. For the small number
  76. // of paths a peer will have linear scan is the fastest way to do lookup.
  77. for (unsigned int i=0;i < m_alivePathCount;++i) {
  78. if (m_paths[i] == path)
  79. return;
  80. }
  81. // If we made it here, we don't already know this path.
  82. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, path->localSocket(), path->address())) {
  83. // SECURITY: note that if we've made it here we expected this OK, see Expect.hpp.
  84. // There is replay protection in effect for OK responses.
  85. if (verb == Protocol::VERB_OK) {
  86. // If we're learning a new path convert the lock to an exclusive write lock.
  87. l.writing();
  88. // If the path list is full, replace the least recently active path. Otherwise append new path.
  89. unsigned int newPathIdx = 0;
  90. if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
  91. int64_t lastReceiveTimeMax = 0;
  92. for (unsigned int i=0;i<m_alivePathCount;++i) {
  93. if ((m_paths[i]->address().family() == path->address().family()) &&
  94. (m_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
  95. (m_paths[i]->address().ipsEqual2(path->address()))) {
  96. // Replace older path if everything is the same except the port number, since NAT/firewall reboots
  97. // and other wacky stuff can change port number assignments.
  98. m_paths[i] = path;
  99. return;
  100. } else if (m_paths[i]->lastIn() >= lastReceiveTimeMax) {
  101. lastReceiveTimeMax = m_paths[i]->lastIn();
  102. newPathIdx = i;
  103. }
  104. }
  105. } else {
  106. newPathIdx = m_alivePathCount++;
  107. }
  108. InetAddress old;
  109. if (m_paths[newPathIdx])
  110. old = m_paths[newPathIdx]->address();
  111. m_paths[newPathIdx] = path;
  112. // Re-prioritize paths to include the new one.
  113. m_prioritizePaths(now);
  114. // Remember most recently learned paths for future bootstrap attempts on restart.
  115. Endpoint pathEndpoint(path->address());
  116. m_bootstrap[pathEndpoint.type()] = pathEndpoint;
  117. RR->t->learnedNewPath(tPtr, 0x582fabdd, packetId, m_id, path->address(), old);
  118. } else {
  119. path->sent(now,hello(tPtr,path->localSocket(),path->address(),now));
  120. RR->t->tryingNewPath(tPtr, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id);
  121. }
  122. }
  123. }
  124. }
  125. void Peer::send(void *tPtr,int64_t now,const void *data,unsigned int len) noexcept
  126. {
  127. SharedPtr<Path> via(this->path(now));
  128. if (via) {
  129. via->send(RR,tPtr,data,len,now);
  130. } else {
  131. const SharedPtr<Peer> root(RR->topology->root());
  132. if ((root)&&(root.ptr() != this)) {
  133. via = root->path(now);
  134. if (via) {
  135. via->send(RR,tPtr,data,len,now);
  136. root->relayed(now,len);
  137. } else {
  138. return;
  139. }
  140. } else {
  141. return;
  142. }
  143. }
  144. sent(now,len);
  145. }
  146. unsigned int Peer::hello(void *tPtr,int64_t localSocket,const InetAddress &atAddress,const int64_t now)
  147. {
  148. Buf outp;
  149. const uint64_t packetId = m_identityKey->nextMessage(RR->identity.address(),m_id.address());
  150. int ii = Protocol::newPacket(outp,packetId,m_id.address(),RR->identity.address(),Protocol::VERB_HELLO);
  151. outp.wI8(ii,ZT_PROTO_VERSION);
  152. outp.wI8(ii,ZEROTIER_VERSION_MAJOR);
  153. outp.wI8(ii,ZEROTIER_VERSION_MINOR);
  154. outp.wI16(ii,ZEROTIER_VERSION_REVISION);
  155. outp.wI64(ii,(uint64_t)now);
  156. outp.wO(ii,RR->identity);
  157. outp.wO(ii,atAddress);
  158. const int ivStart = ii;
  159. outp.wR(ii,12);
  160. // LEGACY: the six reserved bytes after the IV exist for legacy compatibility with v1.x nodes.
  161. // Once those are dead they'll become just reserved bytes for future use as flags etc.
  162. outp.wI32(ii,0); // reserved bytes
  163. void *const legacyMoonCountStart = outp.unsafeData + ii;
  164. outp.wI16(ii,0);
  165. const uint64_t legacySalsaIv = packetId & ZT_CONST_TO_BE_UINT64(0xfffffffffffffff8ULL);
  166. Salsa20(m_identityKey->secret,&legacySalsaIv).crypt12(legacyMoonCountStart,legacyMoonCountStart,2);
  167. const int cryptSectionStart = ii;
  168. FCV<uint8_t,4096> md;
  169. Dictionary::append(md,ZT_PROTO_HELLO_NODE_META_INSTANCE_ID,RR->instanceId);
  170. outp.wI16(ii,(uint16_t)md.size());
  171. outp.wB(ii,md.data(),(unsigned int)md.size());
  172. if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check: should be impossible
  173. return 0;
  174. AES::CTR ctr(m_helloCipher);
  175. void *const cryptSection = outp.unsafeData + ii;
  176. ctr.init(outp.unsafeData + ivStart,0,cryptSection);
  177. ctr.crypt(cryptSection,ii - cryptSectionStart);
  178. ctr.finish();
  179. HMACSHA384(m_helloMacKey,outp.unsafeData,ii,outp.unsafeData + ii);
  180. ii += ZT_HMACSHA384_LEN;
  181. // LEGACY: we also need Poly1305 for v1.x peers.
  182. uint8_t polyKey[ZT_POLY1305_KEY_SIZE],perPacketKey[ZT_SALSA20_KEY_SIZE];
  183. Protocol::salsa2012DeriveKey(m_identityKey->secret,perPacketKey,outp,ii);
  184. Salsa20(perPacketKey,&packetId).crypt12(Utils::ZERO256,polyKey,sizeof(polyKey));
  185. Poly1305 p1305(polyKey);
  186. p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
  187. uint64_t polyMac[2];
  188. p1305.finish(polyMac);
  189. Utils::storeAsIsEndian<uint64_t>(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX,polyMac[0]);
  190. if (likely(RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,ii)))
  191. return ii;
  192. return 0;
  193. }
  194. void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot)
  195. {
  196. RWMutex::Lock l(m_lock);
  197. // Determine if we need to send a full HELLO because we are refreshing ephemeral
  198. // keys or it's simply been too long.
  199. bool needHello = false;
  200. if ( (m_vProto >= 11) && ( ((now - m_ephemeralPairTimestamp) >= (ZT_SYMMETRIC_KEY_TTL / 2)) || ((m_ephemeralKeys[0])&&(m_ephemeralKeys[0]->odometer() >= (ZT_SYMMETRIC_KEY_TTL_MESSAGES / 2))) ) ) {
  201. m_ephemeralPair.generate();
  202. needHello = true;
  203. } else if ((now - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
  204. needHello = true;
  205. }
  206. // If we have no active paths and none queued to try, attempt any
  207. // old paths we have cached in m_bootstrap or that external code
  208. // supplies to the core via the optional API callback.
  209. if (m_tryQueue.empty()&&(m_alivePathCount == 0)) {
  210. InetAddress addr;
  211. if (RR->node->externalPathLookup(tPtr, m_id, -1, addr)) {
  212. if ((addr)&&(RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, addr))) {
  213. RR->t->tryingNewPath(tPtr, 0x84a10000, m_id, addr, InetAddress::NIL, 0, 0, Identity::NIL);
  214. sent(now,m_sendProbe(tPtr,-1,addr,nullptr,0,now));
  215. }
  216. }
  217. if (!m_bootstrap.empty()) {
  218. unsigned int tryAtIndex = (unsigned int)Utils::random() % (unsigned int)m_bootstrap.size();
  219. for(SortedMap< Endpoint::Type,Endpoint >::const_iterator i(m_bootstrap.begin());i != m_bootstrap.end();++i) {
  220. if (tryAtIndex > 0) {
  221. --tryAtIndex;
  222. } else {
  223. if ((i->second.isInetAddr())&&(!i->second.ip().ipsEqual(addr))) {
  224. RR->t->tryingNewPath(tPtr, 0x0a009444, m_id, i->second.ip(), InetAddress::NIL, 0, 0, Identity::NIL);
  225. sent(now,m_sendProbe(tPtr,-1,i->second.ip(),nullptr,0,now));
  226. break;
  227. }
  228. }
  229. }
  230. }
  231. }
  232. // Sort paths and forget expired ones.
  233. m_prioritizePaths(now);
  234. // Attempt queued endpoints if they don't overlap with paths.
  235. if (!m_tryQueue.empty()) {
  236. for(int k=0;k<ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE;++k) {
  237. // This is a global circular pointer that iterates through the list of
  238. // endpoints to attempt.
  239. if (m_tryQueuePtr == m_tryQueue.end()) {
  240. if (m_tryQueue.empty())
  241. break;
  242. m_tryQueuePtr = m_tryQueue.begin();
  243. }
  244. if (likely((now - m_tryQueuePtr->ts) < ZT_PATH_ALIVE_TIMEOUT)) {
  245. if (m_tryQueuePtr->target.isInetAddr()) {
  246. for(unsigned int i=0;i<m_alivePathCount;++i) {
  247. if (m_paths[i]->address().ipsEqual(m_tryQueuePtr->target.ip()))
  248. goto skip_tryQueue_item;
  249. }
  250. if ((m_alivePathCount == 0) && (m_tryQueuePtr->breakSymmetricBFG1024) && (RR->node->natMustDie())) {
  251. // Attempt aggressive NAT traversal if both requested and enabled. This sends a probe
  252. // to all ports under 1024, which assumes that the peer has bound to such a port and
  253. // has attempted to initiate a connection through it. This can traverse a decent number
  254. // of symmetric NATs at the cost of 32KiB per attempt and the potential to trigger IDS
  255. // systems by looking like a port scan (because it is).
  256. uint16_t ports[1023];
  257. for (unsigned int i=0;i<1023;++i)
  258. ports[i] = (uint64_t)(i + 1);
  259. for (unsigned int i=0;i<512;++i) {
  260. const uint64_t rn = Utils::random();
  261. const unsigned int a = (unsigned int)rn % 1023;
  262. const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
  263. if (a != b) {
  264. const uint16_t tmp = ports[a];
  265. ports[a] = ports[b];
  266. ports[b] = tmp;
  267. }
  268. }
  269. sent(now,m_sendProbe(tPtr, -1, m_tryQueuePtr->target.ip(), ports, 1023, now));
  270. } else {
  271. sent(now,m_sendProbe(tPtr, -1, m_tryQueuePtr->target.ip(), nullptr, 0, now));
  272. }
  273. }
  274. }
  275. skip_tryQueue_item:
  276. m_tryQueue.erase(m_tryQueuePtr++);
  277. }
  278. }
  279. // Do keepalive on all currently active paths, sending HELLO to the first
  280. // if needHello is true and sending small keepalives to others.
  281. uint64_t randomJunk = Utils::random();
  282. for(unsigned int i=0;i<m_alivePathCount;++i) {
  283. if (needHello) {
  284. needHello = false;
  285. const unsigned int bytes = hello(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), now);
  286. m_paths[i]->sent(now, bytes);
  287. sent(now,bytes);
  288. m_lastSentHello = now;
  289. } else if ((now - m_paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
  290. m_paths[i]->send(RR, tPtr, reinterpret_cast<uint8_t *>(&randomJunk) + (i & 7U), 1, now);
  291. sent(now,1);
  292. }
  293. }
  294. // Send a HELLO indirectly if we were not able to send one via any direct path.
  295. if (needHello) {
  296. const SharedPtr<Peer> root(RR->topology->root());
  297. if (root) {
  298. const SharedPtr<Path> via(root->path(now));
  299. if (via) {
  300. const unsigned int bytes = hello(tPtr,via->localSocket(),via->address(),now);
  301. via->sent(now,bytes);
  302. root->relayed(now,bytes);
  303. sent(now,bytes);
  304. m_lastSentHello = now;
  305. }
  306. }
  307. }
  308. }
  309. void Peer::contact(void *tPtr,const int64_t now,const Endpoint &ep,const bool breakSymmetricBFG1024)
  310. {
  311. static uint8_t foo = 0;
  312. RWMutex::Lock l(m_lock);
  313. if (ep.isInetAddr()&&ep.ip().isV4()) {
  314. // For IPv4 addresses we send a tiny packet with a low TTL, which helps to
  315. // traverse some NAT types. It has no effect otherwise. It's important to
  316. // send this right away in case this is a coordinated attempt via RENDEZVOUS.
  317. RR->node->putPacket(tPtr,-1,ep.ip(),&foo,1,2);
  318. ++foo;
  319. }
  320. const bool wasEmpty = m_tryQueue.empty();
  321. if (!wasEmpty) {
  322. for(List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i!=m_tryQueue.end();++i) {
  323. if (i->target == ep) {
  324. i->ts = now;
  325. i->breakSymmetricBFG1024 = breakSymmetricBFG1024;
  326. return;
  327. }
  328. }
  329. }
  330. #ifdef __CPP11__
  331. m_tryQueue.emplace_back(now, ep, breakSymmetricBFG1024);
  332. #else
  333. _tryQueue.push_back(_TryQueueItem(now,ep,breakSymmetricBFG1024));
  334. #endif
  335. if (wasEmpty)
  336. m_tryQueuePtr = m_tryQueue.begin();
  337. }
  338. void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
  339. {
  340. RWMutex::Lock l(m_lock);
  341. unsigned int pc = 0;
  342. for(unsigned int i=0;i<m_alivePathCount;++i) {
  343. if ((m_paths[i]) && ((m_paths[i]->address().family() == inetAddressFamily) && (m_paths[i]->address().ipScope() == scope))) {
  344. const unsigned int bytes = m_sendProbe(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), nullptr, 0, now);
  345. m_paths[i]->sent(now, bytes);
  346. sent(now,bytes);
  347. } else if (pc != i) {
  348. m_paths[pc++] = m_paths[i];
  349. }
  350. }
  351. m_alivePathCount = pc;
  352. while (pc < ZT_MAX_PEER_NETWORK_PATHS)
  353. m_paths[pc].zero();
  354. }
  355. bool Peer::directlyConnected(int64_t now)
  356. {
  357. if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  358. RWMutex::Lock l(m_lock);
  359. m_prioritizePaths(now);
  360. return m_alivePathCount > 0;
  361. } else {
  362. RWMutex::RLock l(m_lock);
  363. return m_alivePathCount > 0;
  364. }
  365. }
  366. void Peer::getAllPaths(Vector< SharedPtr<Path> > &paths)
  367. {
  368. RWMutex::RLock l(m_lock);
  369. paths.clear();
  370. paths.reserve(m_alivePathCount);
  371. paths.assign(m_paths, m_paths + m_alivePathCount);
  372. }
  373. void Peer::save(void *tPtr) const
  374. {
  375. uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
  376. // Prefix each saved peer with the current timestamp.
  377. Utils::storeBigEndian<uint64_t>(buf,(uint64_t)RR->node->now());
  378. const int len = marshal(buf + 8);
  379. if (len > 0) {
  380. uint64_t id[2];
  381. id[0] = m_id.address().toInt();
  382. id[1] = 0;
  383. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8);
  384. }
  385. }
  386. int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
  387. {
  388. RWMutex::RLock l(m_lock);
  389. if (!m_identityKey)
  390. return -1;
  391. data[0] = 0; // serialized peer version
  392. // Include our identity's address to detect if this changes and require
  393. // recomputation of m_identityKey.
  394. RR->identity.address().copyTo(data + 1);
  395. // SECURITY: encryption in place is only to protect secrets if they are
  396. // cached to local storage. It's not used over the wire. Dumb ECB is fine
  397. // because secret keys are random and have no structure to reveal.
  398. RR->localCacheSymmetric.encrypt(m_identityKey->secret,data + 6);
  399. RR->localCacheSymmetric.encrypt(m_identityKey->secret + 22,data + 17);
  400. RR->localCacheSymmetric.encrypt(m_identityKey->secret + 38,data + 33);
  401. int p = 54;
  402. int s = m_id.marshal(data + p, false);
  403. if (s < 0)
  404. return -1;
  405. p += s;
  406. s = m_locator.marshal(data + p);
  407. if (s <= 0)
  408. return s;
  409. p += s;
  410. data[p++] = (uint8_t)m_bootstrap.size();
  411. for(std::map< Endpoint::Type,Endpoint >::const_iterator i(m_bootstrap.begin());i != m_bootstrap.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
  412. s = i->second.marshal(data + p);
  413. if (s <= 0)
  414. return -1;
  415. p += s;
  416. }
  417. Utils::storeBigEndian(data + p,(uint16_t)m_vProto);
  418. p += 2;
  419. Utils::storeBigEndian(data + p,(uint16_t)m_vMajor);
  420. p += 2;
  421. Utils::storeBigEndian(data + p,(uint16_t)m_vMinor);
  422. p += 2;
  423. Utils::storeBigEndian(data + p,(uint16_t)m_vRevision);
  424. p += 2;
  425. data[p++] = 0;
  426. data[p++] = 0;
  427. return p;
  428. }
  429. int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
  430. {
  431. RWMutex::Lock l(m_lock);
  432. if ((len <= 54) || (data[0] != 0))
  433. return -1;
  434. m_identityKey.zero();
  435. m_ephemeralKeys[0].zero();
  436. m_ephemeralKeys[1].zero();
  437. if (Address(data + 1) == RR->identity.address()) {
  438. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  439. static_assert(ZT_SYMMETRIC_KEY_SIZE == 48,"marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
  440. RR->localCacheSymmetric.decrypt(data + 1,k);
  441. RR->localCacheSymmetric.decrypt(data + 17,k + 16);
  442. RR->localCacheSymmetric.decrypt(data + 33,k + 32);
  443. m_identityKey.set(new SymmetricKey(RR->node->now(),k));
  444. Utils::burn(k,sizeof(k));
  445. }
  446. int p = 49;
  447. int s = m_id.unmarshal(data + 38, len - 38);
  448. if (s < 0)
  449. return s;
  450. p += s;
  451. if (!m_identityKey) {
  452. uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
  453. if (!RR->identity.agree(m_id,k))
  454. return -1;
  455. m_identityKey.set(new SymmetricKey(RR->node->now(),k));
  456. Utils::burn(k,sizeof(k));
  457. }
  458. s = m_locator.unmarshal(data + p, len - p);
  459. if (s < 0)
  460. return s;
  461. p += s;
  462. if (p >= len)
  463. return -1;
  464. const unsigned int bootstrapCount = data[p++];
  465. if (bootstrapCount > ZT_MAX_PEER_NETWORK_PATHS)
  466. return -1;
  467. m_bootstrap.clear();
  468. for(unsigned int i=0;i<bootstrapCount;++i) {
  469. Endpoint tmp;
  470. s = tmp.unmarshal(data + p,len - p);
  471. if (s < 0)
  472. return s;
  473. p += s;
  474. m_bootstrap[tmp.type()] = tmp;
  475. }
  476. if ((p + 10) > len)
  477. return -1;
  478. m_vProto = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  479. m_vMajor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  480. m_vMinor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  481. m_vRevision = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  482. p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
  483. m_deriveSecondaryIdentityKeys();
  484. return (p > len) ? -1 : p;
  485. }
  486. struct _PathPriorityComparisonOperator
  487. {
  488. ZT_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const noexcept
  489. {
  490. // Sort in descending order of most recent receive time.
  491. return (a->lastIn() > b->lastIn());
  492. }
  493. };
  494. void Peer::m_prioritizePaths(int64_t now)
  495. {
  496. // assumes _lock is locked for writing
  497. m_lastPrioritizedPaths = now;
  498. if (m_alivePathCount > 0) {
  499. // Sort paths in descending order of priority.
  500. std::sort(m_paths, m_paths + m_alivePathCount, _PathPriorityComparisonOperator());
  501. // Let go of paths that have expired.
  502. for (unsigned int i = 0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  503. if ((!m_paths[i]) || (!m_paths[i]->alive(now))) {
  504. m_alivePathCount = i;
  505. for (;i < ZT_MAX_PEER_NETWORK_PATHS;++i)
  506. m_paths[i].zero();
  507. break;
  508. }
  509. }
  510. }
  511. }
  512. unsigned int Peer::m_sendProbe(void *tPtr,int64_t localSocket,const InetAddress &atAddress,const uint16_t *ports,const unsigned int numPorts,int64_t now)
  513. {
  514. // Assumes m_lock is locked
  515. const SharedPtr<SymmetricKey> k(m_key());
  516. const uint64_t packetId = k->nextMessage(RR->identity.address(),m_id.address());
  517. uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH + 1];
  518. Utils::storeAsIsEndian<uint64_t>(p + ZT_PROTO_PACKET_ID_INDEX,packetId);
  519. m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
  520. RR->identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
  521. p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
  522. p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_ECHO;
  523. p[ZT_PROTO_PACKET_VERB_INDEX + 1] = 0; // arbitrary payload
  524. Protocol::armor(p,ZT_PROTO_MIN_PACKET_LENGTH + 1,k,cipher());
  525. RR->expect->sending(packetId,now);
  526. if (numPorts > 0) {
  527. InetAddress tmp(atAddress);
  528. for(unsigned int i=0;i<numPorts;++i) {
  529. tmp.setPort(ports[i]);
  530. RR->node->putPacket(tPtr,-1,tmp,p,ZT_PROTO_MIN_PACKET_LENGTH + 1);
  531. }
  532. return ZT_PROTO_MIN_PACKET_LENGTH * numPorts;
  533. } else {
  534. RR->node->putPacket(tPtr,-1,atAddress,p,ZT_PROTO_MIN_PACKET_LENGTH + 1);
  535. return ZT_PROTO_MIN_PACKET_LENGTH;
  536. }
  537. }
  538. void Peer::m_deriveSecondaryIdentityKeys() noexcept
  539. {
  540. uint8_t hk[ZT_SYMMETRIC_KEY_SIZE];
  541. KBKDFHMACSHA384(m_identityKey->secret,ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT,0,0,hk);
  542. m_helloCipher.init(hk);
  543. Utils::burn(hk,sizeof(hk));
  544. KBKDFHMACSHA384(m_identityKey->secret,ZT_KBKDF_LABEL_PACKET_HMAC,0,0,m_helloMacKey);
  545. }
  546. } // namespace ZeroTier