Peer.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "RuntimeEnvironment.hpp"
  15. #include "Trace.hpp"
  16. #include "Peer.hpp"
  17. #include "Topology.hpp"
  18. #include "Node.hpp"
  19. #include "SelfAwareness.hpp"
  20. #include "InetAddress.hpp"
  21. #include "Protocol.hpp"
  22. #include "Endpoint.hpp"
  23. namespace ZeroTier {
  24. Peer::Peer(const RuntimeEnvironment *renv) : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
  25. RR(renv),
  26. _lastReceive(0),
  27. _lastSend(0),
  28. _lastSentHello(),
  29. _lastWhoisRequestReceived(0),
  30. _lastEchoRequestReceived(0),
  31. _lastProbeReceived(0),
  32. _lastAttemptedP2PInit(0),
  33. _lastPrioritizedPaths(0),
  34. _lastAttemptedAggressiveNATTraversal(0),
  35. _alivePathCount(0),
  36. _probe(0),
  37. _vProto(0),
  38. _vMajor(0),
  39. _vMinor(0),
  40. _vRevision(0)
  41. {
  42. }
  43. Peer::~Peer() // NOLINT(hicpp-use-equals-default,modernize-use-equals-default)
  44. {
  45. }
  46. bool Peer::init(const Identity &peerIdentity)
  47. {
  48. RWMutex::Lock l(_lock);
  49. if (_id == peerIdentity)
  50. return true;
  51. _id = peerIdentity;
  52. uint8_t ktmp[ZT_SYMMETRIC_KEY_SIZE];
  53. if (!RR->identity.agree(peerIdentity,ktmp))
  54. return false;
  55. _identityKey.init(RR->node->now(),ktmp);
  56. Utils::burn(ktmp,sizeof(ktmp));
  57. return true;
  58. }
  59. void Peer::received(
  60. void *tPtr,
  61. const SharedPtr<Path> &path,
  62. const unsigned int hops,
  63. const uint64_t packetId,
  64. const unsigned int payloadLength,
  65. const Protocol::Verb verb,
  66. const Protocol::Verb inReVerb)
  67. {
  68. const int64_t now = RR->node->now();
  69. _lastReceive = now;
  70. _inMeter.log(now,payloadLength);
  71. if (hops == 0) {
  72. RWMutex::RMaybeWLock l(_lock);
  73. // If this matches an existing path, skip path learning stuff.
  74. for (unsigned int i=0;i<_alivePathCount;++i) {
  75. if (_paths[i] == path) {
  76. _lock.runlock();
  77. return;
  78. }
  79. }
  80. // If we made it here, we don't already know this path.
  81. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
  82. if (verb == Protocol::VERB_OK) {
  83. l.writing();
  84. // SECURITY: in the future we may not accept anything but OK(HELLO) to learn paths,
  85. // but right now we accept any OK for backward compatibility. Note that OK will
  86. // have been checked against expected packet IDs (see Expect.hpp) before we get here,
  87. // and this guards against replay attacks.
  88. // If the path list is full, replace the least recently active path. Otherwise append new path.
  89. unsigned int newPathIdx = 0;
  90. if (_alivePathCount >= ZT_MAX_PEER_NETWORK_PATHS) {
  91. int64_t lastReceiveTimeMax = 0;
  92. for (unsigned int i=0;i<_alivePathCount;++i) {
  93. if ((_paths[i]->address().family() == path->address().family()) &&
  94. (_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
  95. (_paths[i]->address().ipsEqual2(path->address()))) {
  96. // Replace older path if everything is the same except the port number, since NAT/firewall reboots
  97. // and other wacky stuff can change port number assignments.
  98. _paths[i] = path;
  99. return;
  100. } else if (_paths[i]->lastIn() > lastReceiveTimeMax) {
  101. lastReceiveTimeMax = _paths[i]->lastIn();
  102. newPathIdx = i;
  103. }
  104. }
  105. } else {
  106. newPathIdx = _alivePathCount++;
  107. }
  108. InetAddress old;
  109. if (_paths[newPathIdx])
  110. old = _paths[newPathIdx]->address();
  111. _paths[newPathIdx] = path;
  112. // Re-prioritize paths to include the new one.
  113. _prioritizePaths(now);
  114. // Remember most recently learned paths for future bootstrap attempts on restart.
  115. Endpoint pathEndpoint(path->address());
  116. _bootstrap[pathEndpoint.type()] = pathEndpoint;
  117. RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old);
  118. } else {
  119. path->sent(now,hello(tPtr,path->localSocket(),path->address(),now));
  120. RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id,ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
  121. }
  122. }
  123. }
  124. }
  125. void Peer::send(void *const tPtr,const int64_t now,const void *const data,const unsigned int len,const SharedPtr<Path> &via) noexcept
  126. {
  127. via->send(RR,tPtr,data,len,now);
  128. sent(now,len);
  129. }
  130. void Peer::send(void *const tPtr,const int64_t now,const void *const data,const unsigned int len) noexcept
  131. {
  132. SharedPtr<Path> via(this->path(now));
  133. if (via) {
  134. via->send(RR,tPtr,data,len,now);
  135. } else {
  136. const SharedPtr<Peer> root(RR->topology->root());
  137. if ((root)&&(root.ptr() != this)) {
  138. via = root->path(now);
  139. if (via) {
  140. via->send(RR,tPtr,data,len,now);
  141. root->relayed(now,len);
  142. } else {
  143. return;
  144. }
  145. } else {
  146. return;
  147. }
  148. }
  149. sent(now,len);
  150. }
  151. unsigned int Peer::hello(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now)
  152. {
  153. #if 0
  154. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
  155. outp.append((unsigned char)ZT_PROTO_VERSION);
  156. outp.append((unsigned char)ZEROTIER_VERSION_MAJOR);
  157. outp.append((unsigned char)ZEROTIER_VERSION_MINOR);
  158. outp.append((uint16_t)ZEROTIER_VERSION_REVISION);
  159. outp.append(now);
  160. RR->identity.serialize(outp,false);
  161. atAddress.serialize(outp);
  162. RR->node->expectReplyTo(outp.packetId());
  163. if (atAddress) {
  164. outp.armor(_key,false); // false == don't encrypt full payload, but add MAC
  165. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  166. } else {
  167. RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC
  168. }
  169. #endif
  170. }
  171. unsigned int Peer::sendNOP(void *const tPtr,const int64_t localSocket,const InetAddress &atAddress,const int64_t now)
  172. {
  173. Buf outp;
  174. Protocol::Header &ph = outp.as<Protocol::Header>(); // NOLINT(hicpp-use-auto,modernize-use-auto)
  175. ph.packetId = Protocol::getPacketId();
  176. _id.address().copyTo(ph.destination);
  177. RR->identity.address().copyTo(ph.source);
  178. ph.flags = 0;
  179. ph.verb = Protocol::VERB_NOP;
  180. Protocol::armor(outp,sizeof(Protocol::Header),_identityKey.key(),this->cipher());
  181. RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header));
  182. return sizeof(Protocol::Header);
  183. }
  184. void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot)
  185. {
  186. RWMutex::Lock l(_lock);
  187. bool needHello = false;
  188. if ((now - _lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
  189. _lastSentHello = now;
  190. needHello = true;
  191. }
  192. _prioritizePaths(now);
  193. for(unsigned int i=0;i<_alivePathCount;++i) {
  194. if (needHello) {
  195. needHello = false;
  196. const unsigned int bytes = hello(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
  197. _paths[i]->sent(now,bytes);
  198. sent(now,bytes);
  199. } else if ((now - _paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
  200. _paths[i]->send(RR,tPtr,&now,1,now);
  201. sent(now,1);
  202. }
  203. // TODO: when we merge multipath we'll keep one open per interface to non-roots.
  204. // For roots we try to keep every path open.
  205. if (!isRoot)
  206. return;
  207. }
  208. if (needHello) {
  209. // Try any statically configured addresses.
  210. InetAddress addr;
  211. if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
  212. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr)) {
  213. RR->t->tryingNewPath(tPtr,0x84a10000,_id,addr,InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS);
  214. hello(tPtr,-1,addr,now);
  215. }
  216. }
  217. if (!_bootstrap.empty()) {
  218. if (isRoot) {
  219. // Try all bootstrap addresses if this is a root.
  220. for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) {
  221. if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) {
  222. RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
  223. hello(tPtr,-1,i->second.inetAddr(),now);
  224. }
  225. }
  226. } else {
  227. // Otherwise try a random bootstrap address.
  228. unsigned int tryAtIndex = (unsigned int)Utils::random() % (unsigned int)_bootstrap.size();
  229. for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) {
  230. if (tryAtIndex > 0) {
  231. --tryAtIndex;
  232. } else {
  233. if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) {
  234. RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
  235. hello(tPtr,-1,i->second.inetAddr(),now);
  236. }
  237. }
  238. }
  239. }
  240. }
  241. }
  242. }
  243. void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
  244. {
  245. RWMutex::RLock l(_lock);
  246. for(unsigned int i=0;i<_alivePathCount;++i) {
  247. if ((_paths[i])&&((_paths[i]->address().family() == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope)))
  248. _paths[i]->sent(now,sendNOP(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
  249. }
  250. }
  251. bool Peer::directlyConnected(int64_t now)
  252. {
  253. if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  254. RWMutex::Lock l(_lock);
  255. _prioritizePaths(now);
  256. return _alivePathCount > 0;
  257. } else {
  258. RWMutex::RLock l(_lock);
  259. return _alivePathCount > 0;
  260. }
  261. }
  262. void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
  263. {
  264. RWMutex::RLock l(_lock);
  265. paths.clear();
  266. paths.assign(_paths,_paths + _alivePathCount);
  267. }
  268. void Peer::save(void *tPtr) const
  269. {
  270. uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
  271. // Prefix each saved peer with the current timestamp.
  272. Utils::storeBigEndian<uint64_t>(buf,(uint64_t)RR->node->now());
  273. const int len = marshal(buf + 8);
  274. if (len > 0) {
  275. uint64_t id[2];
  276. id[0] = _id.address().toInt();
  277. id[1] = 0;
  278. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8);
  279. }
  280. }
  281. void Peer::tryToContactAt(void *const tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024)
  282. {
  283. static uint8_t junk = 0;
  284. if (ep.inetAddr()) { // only this endpoint type is currently implemented
  285. if (!RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,ep.inetAddr()))
  286. return;
  287. // Sending a packet with a low TTL before the real message assists traversal with some
  288. // stateful firewalls and is harmless otherwise AFAIK.
  289. ++junk;
  290. RR->node->putPacket(tPtr,-1,ep.inetAddr(),&junk,1,2);
  291. // In a few hundred milliseconds we'll send the real packet.
  292. {
  293. RWMutex::Lock l(_lock);
  294. _contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ZT_MAX_PEER_NETWORK_PATHS)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
  295. }
  296. // If the peer indicates that they may be behind a symmetric NAT and there are no
  297. // living direct paths, try a few more aggressive things.
  298. if ((ep.inetAddr().family() == AF_INET) && (!directlyConnected(now))) {
  299. unsigned int port = ep.inetAddr().port();
  300. if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
  301. // If the other side is using a low-numbered port and has elected to
  302. // have this done, we can try scanning every port below 1024. The search
  303. // space here is small enough that we have a very good chance of punching.
  304. // Generate a random order list of all <1024 ports except 0 and the original sending port.
  305. uint16_t ports[1022];
  306. uint16_t ctr = 1;
  307. for (int i=0;i<1022;++i) { // NOLINT(modernize-loop-convert)
  308. if (ctr == port) ++ctr;
  309. ports[i] = ctr++;
  310. }
  311. for (int i=0;i<512;++i) {
  312. uint64_t rn = Utils::random();
  313. unsigned int a = ((unsigned int)rn) % 1022;
  314. unsigned int b = ((unsigned int)(rn >> 24U)) % 1022;
  315. if (a != b) {
  316. uint16_t tmp = ports[a];
  317. ports[a] = ports[b];
  318. ports[b] = tmp;
  319. }
  320. }
  321. // Chunk ports into chunks of 128 to try in few hundred millisecond intervals,
  322. // abandoning attempts once there is at least one direct path.
  323. {
  324. static_assert((896 % ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE) == 0,"port scan chunk size doesn't evenly divide port list");
  325. static_assert((1022 - 896) <= ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE,"port scan chunk size needs to be adjusted");
  326. RWMutex::Lock l(_lock);
  327. for (int i=0;i<896;i+=ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE)
  328. _contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ports + i,ports + i + ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
  329. _contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ports + 896,ports + 1022,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
  330. }
  331. } else {
  332. // Otherwise use the simpler sequential port attempt method in intervals.
  333. RWMutex::Lock l(_lock);
  334. for (int k=0;k<3;++k) {
  335. if (++port > 65535) break;
  336. InetAddress tryNext(ep.inetAddr());
  337. tryNext.setPort(port);
  338. _contactQueue.push_back(_ContactQueueItem(tryNext,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
  339. }
  340. }
  341. }
  342. // Start alarms going off to actually send these...
  343. RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL);
  344. }
  345. }
  346. void Peer::alarm(void *tPtr,const int64_t now)
  347. {
  348. // Right now alarms are only used for multi-phase or multi-step NAT traversal operations.
  349. // Pop one contact queue item and also clean the queue of any that are no
  350. // longer applicable because the alive path count has exceeded their threshold.
  351. bool stillHaveContactQueueItems;
  352. _ContactQueueItem qi;
  353. {
  354. RWMutex::Lock l(_lock);
  355. if (_contactQueue.empty())
  356. return;
  357. while (_alivePathCount >= _contactQueue.front().alivePathThreshold) {
  358. _contactQueue.pop_front();
  359. if (_contactQueue.empty())
  360. return;
  361. }
  362. _ContactQueueItem &qi2 = _contactQueue.front();
  363. qi.address = qi2.address;
  364. qi.ports = qi2.ports;
  365. qi.alivePathThreshold = qi2.alivePathThreshold;
  366. _contactQueue.pop_front();
  367. for(std::list< _ContactQueueItem,Utils::Mallocator<_ContactQueueItem> >::iterator q(_contactQueue.begin());q!=_contactQueue.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
  368. if (_alivePathCount >= q->alivePathThreshold)
  369. _contactQueue.erase(q++);
  370. else ++q;
  371. }
  372. stillHaveContactQueueItems = !_contactQueue.empty();
  373. }
  374. if ((_vProto >= 11) && (_probe != 0)) {
  375. if (qi.ports.empty()) {
  376. RR->node->putPacket(tPtr,-1,qi.address,&_probe,ZT_PROTO_PROBE_LENGTH);
  377. } else {
  378. for (FCV<uint16_t,ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) { // NOLINT(hicpp-use-auto,modernize-use-auto)
  379. qi.address.setPort(*p);
  380. RR->node->putPacket(tPtr,-1,qi.address,&_probe,ZT_PROTO_PROBE_LENGTH);
  381. }
  382. }
  383. } else {
  384. if (qi.ports.empty()) {
  385. this->sendNOP(tPtr,-1,qi.address,now);
  386. } else {
  387. for (FCV<uint16_t,ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) { // NOLINT(hicpp-use-auto,modernize-use-auto)
  388. qi.address.setPort(*p);
  389. this->sendNOP(tPtr,-1,qi.address,now);
  390. }
  391. }
  392. }
  393. if (stillHaveContactQueueItems)
  394. RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL);
  395. }
  396. int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
  397. {
  398. data[0] = 0; // serialized peer version
  399. RWMutex::RLock l(_lock);
  400. int s = _identityKey.marshal(RR->localCacheSymmetric,data + 1);
  401. if (s < 0)
  402. return -1;
  403. int p = 1 + s;
  404. s = _id.marshal(data + p,false);
  405. if (s < 0)
  406. return -1;
  407. p += s;
  408. s = _locator.marshal(data + p);
  409. if (s <= 0)
  410. return s;
  411. p += s;
  412. data[p++] = (uint8_t)_bootstrap.size();
  413. for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
  414. s = i->second.marshal(data + p);
  415. if (s <= 0)
  416. return -1;
  417. p += s;
  418. }
  419. Utils::storeBigEndian(data + p,(uint16_t)_vProto);
  420. p += 2;
  421. Utils::storeBigEndian(data + p,(uint16_t)_vMajor);
  422. p += 2;
  423. Utils::storeBigEndian(data + p,(uint16_t)_vMinor);
  424. p += 2;
  425. Utils::storeBigEndian(data + p,(uint16_t)_vRevision);
  426. p += 2;
  427. data[p++] = 0;
  428. data[p++] = 0;
  429. return p;
  430. }
  431. int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
  432. {
  433. RWMutex::Lock l(_lock);
  434. if ((len <= 1) || (data[0] != 0))
  435. return -1;
  436. int s = _identityKey.unmarshal(RR->localCacheSymmetric,data + 1,len);
  437. if (s < 0)
  438. return -1;
  439. int p = 1 + s;
  440. // If the identity key did not pass verification, it may mean that our local
  441. // identity has changed. In this case we do not have to forget everything about
  442. // the peer but we must generate a new identity key by key agreement with our
  443. // new identity.
  444. if (!_identityKey) {
  445. uint8_t tmp[ZT_SYMMETRIC_KEY_SIZE];
  446. if (!RR->identity.agree(_id,tmp))
  447. return -1;
  448. _identityKey.init(RR->node->now(),tmp);
  449. Utils::burn(tmp,sizeof(tmp));
  450. }
  451. // These are ephemeral and start out as NIL after unmarshal.
  452. _ephemeralKeys[0].clear();
  453. _ephemeralKeys[1].clear();
  454. s = _id.unmarshal(data + 38,len - 38);
  455. if (s < 0)
  456. return s;
  457. p += s;
  458. s = _locator.unmarshal(data + p,len - p);
  459. if (s < 0)
  460. return s;
  461. p += s;
  462. if (p >= len)
  463. return -1;
  464. const unsigned int bootstrapCount = data[p++];
  465. if (bootstrapCount > ZT_MAX_PEER_NETWORK_PATHS)
  466. return -1;
  467. _bootstrap.clear();
  468. for(unsigned int i=0;i<bootstrapCount;++i) {
  469. Endpoint tmp;
  470. s = tmp.unmarshal(data + p,len - p);
  471. if (s < 0)
  472. return s;
  473. p += s;
  474. _bootstrap[tmp.type()] = tmp;
  475. }
  476. _probe = 0; // ephemeral token, reset on unmarshal
  477. if ((p + 10) > len)
  478. return -1;
  479. _vProto = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  480. _vMajor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  481. _vMinor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  482. _vRevision = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
  483. p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
  484. return (p > len) ? -1 : p;
  485. }
  486. struct _PathPriorityComparisonOperator
  487. {
  488. ZT_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const noexcept
  489. {
  490. // Sort in order of last received time for receipt of anything over path, which prioritizes
  491. // paths by aliveness. This will go away when we merge in multipath in favor of something
  492. // much smarter.
  493. return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) );
  494. }
  495. };
  496. void Peer::_prioritizePaths(const int64_t now)
  497. {
  498. // assumes _lock is locked for writing
  499. _lastPrioritizedPaths = now;
  500. std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
  501. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  502. if ((!_paths[i]) || (!_paths[i]->alive(now))) {
  503. _alivePathCount = i;
  504. for(;i<ZT_MAX_PEER_NETWORK_PATHS;++i)
  505. _paths[i].zero();
  506. break;
  507. }
  508. }
  509. }
  510. } // namespace ZeroTier