Peer.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "RuntimeEnvironment.hpp"
  15. #include "Trace.hpp"
  16. #include "Peer.hpp"
  17. #include "Topology.hpp"
  18. #include "Node.hpp"
  19. #include "SelfAwareness.hpp"
  20. #include "InetAddress.hpp"
  21. #include "Protocol.hpp"
  22. #include "Endpoint.hpp"
  23. #include <set>
  24. namespace ZeroTier {
  25. struct _PathPriorityComparisonOperator
  26. {
  27. ZT_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const
  28. {
  29. return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) );
  30. }
  31. };
  32. Peer::Peer(const RuntimeEnvironment *renv) :
  33. RR(renv),
  34. _lastReceive(0),
  35. _lastWhoisRequestReceived(0),
  36. _lastEchoRequestReceived(0),
  37. _lastPushDirectPathsReceived(0),
  38. _lastProbeReceived(0),
  39. _lastAttemptedP2PInit(0),
  40. _lastTriedStaticPath(0),
  41. _lastPrioritizedPaths(0),
  42. _lastAttemptedAggressiveNATTraversal(0),
  43. _latency(0xffff),
  44. _alivePathCount(0),
  45. _vProto(0),
  46. _vMajor(0),
  47. _vMinor(0),
  48. _vRevision(0)
  49. {
  50. Utils::memoryLock(_key,sizeof(_key));
  51. }
  52. bool Peer::init(const Identity &peerIdentity)
  53. {
  54. RWMutex::Lock l(_lock);
  55. if (_id == peerIdentity)
  56. return true;
  57. _id = peerIdentity;
  58. if (!RR->identity.agree(peerIdentity,_key))
  59. return false;
  60. _incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
  61. return true;
  62. }
  63. void Peer::received(
  64. void *tPtr,
  65. const SharedPtr<Path> &path,
  66. const unsigned int hops,
  67. const uint64_t packetId,
  68. const unsigned int payloadLength,
  69. const Protocol::Verb verb,
  70. const Protocol::Verb inReVerb)
  71. {
  72. const int64_t now = RR->node->now();
  73. _lastReceive = now;
  74. if (hops == 0) {
  75. _lock.rlock();
  76. for(int i=0;i<(int)_alivePathCount;++i) {
  77. if (_paths[i] == path) {
  78. _lock.runlock();
  79. goto path_check_done;
  80. }
  81. }
  82. _lock.runlock();
  83. if (verb == Protocol::VERB_OK) {
  84. RWMutex::Lock l(_lock);
  85. int64_t lastReceiveTimeMax = 0;
  86. int lastReceiveTimeMaxAt = 0;
  87. for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  88. if ((_paths[i]->address().family() == path->address().family()) &&
  89. (_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
  90. (_paths[i]->address().ipsEqual2(path->address()))) {
  91. // Replace older path if everything is the same except the port number.
  92. _paths[i] = path;
  93. goto path_check_done;
  94. } else {
  95. if (_paths[i]) {
  96. if (_paths[i]->lastIn() > lastReceiveTimeMax) {
  97. lastReceiveTimeMax = _paths[i]->lastIn();
  98. lastReceiveTimeMaxAt = i;
  99. }
  100. } else {
  101. lastReceiveTimeMax = 0x7fffffffffffffffLL;
  102. lastReceiveTimeMaxAt = i;
  103. }
  104. }
  105. }
  106. _lastPrioritizedPaths = now;
  107. InetAddress old;
  108. if (_paths[lastReceiveTimeMaxAt])
  109. old = _paths[lastReceiveTimeMaxAt]->address();
  110. _paths[lastReceiveTimeMaxAt] = path;
  111. _bootstrap = Endpoint(path->address());
  112. _prioritizePaths(now);
  113. RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old);
  114. } else {
  115. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
  116. RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id,ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
  117. path->sent(now,sendHELLO(tPtr,path->localSocket(),path->address(),now));
  118. }
  119. }
  120. }
  121. path_check_done:
  122. if ((now - _lastAttemptedP2PInit) >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
  123. _lastAttemptedP2PInit = now;
  124. InetAddress addr;
  125. if ((_bootstrap.type() == Endpoint::TYPE_INETADDR_V4)||(_bootstrap.type() == Endpoint::TYPE_INETADDR_V6)) {
  126. RR->t->tryingNewPath(tPtr,0x0a009444,_id,_bootstrap.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
  127. sendHELLO(tPtr,-1,_bootstrap.inetAddr(),now);
  128. } if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
  129. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr)) {
  130. RR->t->tryingNewPath(tPtr,0x84a10000,_id,_bootstrap.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS);
  131. sendHELLO(tPtr,-1,addr,now);
  132. }
  133. }
  134. std::vector<ZT_InterfaceAddress> localInterfaceAddresses(RR->node->localInterfaceAddresses());
  135. std::multimap<unsigned long,InetAddress> detectedAddresses(RR->sa->externalAddresses(now));
  136. std::set<InetAddress> addrs;
  137. for(std::vector<ZT_InterfaceAddress>::const_iterator i(localInterfaceAddresses.begin());i!=localInterfaceAddresses.end();++i)
  138. addrs.insert(asInetAddress(i->address));
  139. for(std::multimap<unsigned long,InetAddress>::const_reverse_iterator i(detectedAddresses.rbegin());i!=detectedAddresses.rend();++i) {
  140. if (i->first <= 1)
  141. break;
  142. if (addrs.count(i->second) == 0) {
  143. addrs.insert(i->second);
  144. break;
  145. }
  146. }
  147. if (!addrs.empty()) {
  148. #if 0
  149. ScopedPtr<Packet> outp(new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS));
  150. outp->addSize(2); // leave room for count
  151. unsigned int count = 0;
  152. for(std::set<InetAddress>::iterator a(addrs.begin());a!=addrs.end();++a) {
  153. uint8_t addressType = 4;
  154. uint8_t addressLength = 6;
  155. unsigned int ipLength = 4;
  156. const void *rawIpData = nullptr;
  157. uint16_t port = 0;
  158. switch(a->ss_family) {
  159. case AF_INET:
  160. rawIpData = &(reinterpret_cast<const sockaddr_in *>(&(*a))->sin_addr.s_addr);
  161. port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in *>(&(*a))->sin_port);
  162. break;
  163. case AF_INET6:
  164. rawIpData = reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_addr.s6_addr;
  165. port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_port);
  166. addressType = 6;
  167. addressLength = 18;
  168. ipLength = 16;
  169. break;
  170. default:
  171. continue;
  172. }
  173. outp->append((uint8_t)0); // no flags
  174. outp->append((uint16_t)0); // no extensions
  175. outp->append(addressType);
  176. outp->append(addressLength);
  177. outp->append(rawIpData,ipLength);
  178. outp->append(port);
  179. ++count;
  180. if (outp->size() >= (ZT_PROTO_MAX_PACKET_LENGTH - 32))
  181. break;
  182. }
  183. if (count > 0) {
  184. outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
  185. outp->compress();
  186. outp->armor(_key,true);
  187. path->send(RR,tPtr,outp->data(),outp->size(),now);
  188. }
  189. #endif
  190. }
  191. }
  192. }
  193. unsigned int Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  194. {
  195. #if 0
  196. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
  197. outp.append((unsigned char)ZT_PROTO_VERSION);
  198. outp.append((unsigned char)ZEROTIER_VERSION_MAJOR);
  199. outp.append((unsigned char)ZEROTIER_VERSION_MINOR);
  200. outp.append((uint16_t)ZEROTIER_VERSION_REVISION);
  201. outp.append(now);
  202. RR->identity.serialize(outp,false);
  203. atAddress.serialize(outp);
  204. RR->node->expectReplyTo(outp.packetId());
  205. if (atAddress) {
  206. outp.armor(_key,false); // false == don't encrypt full payload, but add MAC
  207. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  208. } else {
  209. RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC
  210. }
  211. #endif
  212. }
  213. unsigned int Peer::sendNOP(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  214. {
  215. Buf outp;
  216. Protocol::Header &ph = outp.as<Protocol::Header>();
  217. ph.packetId = Protocol::getPacketId();
  218. _id.address().copyTo(ph.destination);
  219. RR->identity.address().copyTo(ph.source);
  220. ph.flags = 0;
  221. ph.verb = Protocol::VERB_NOP;
  222. Protocol::armor(outp,sizeof(Protocol::Header),_key,this->cipher());
  223. RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header));
  224. return sizeof(Protocol::Header);
  225. }
  226. void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
  227. {
  228. RWMutex::RLock l(_lock);
  229. _lastPrioritizedPaths = now;
  230. _prioritizePaths(now);
  231. if (_alivePathCount > 0) {
  232. for (unsigned int i = 0; i < _alivePathCount; ++i) {
  233. _paths[i]->sent(now,sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
  234. if (!pingAllAddressTypes)
  235. return;
  236. }
  237. return;
  238. }
  239. if ((_bootstrap.type() == Endpoint::TYPE_INETADDR_V4)||(_bootstrap.type() == Endpoint::TYPE_INETADDR_V6))
  240. sendHELLO(tPtr,-1,_bootstrap.inetAddr(),now);
  241. SharedPtr<Peer> r(RR->topology->root());
  242. if ((r)&&(r.ptr() != this)) {
  243. SharedPtr<Path> rp(r->path(now));
  244. if (rp) {
  245. rp->sent(now,sendHELLO(tPtr,rp->localSocket(),rp->address(),now));
  246. return;
  247. }
  248. }
  249. }
  250. void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
  251. {
  252. RWMutex::RLock l(_lock);
  253. for(unsigned int i=0; i < _alivePathCount; ++i) {
  254. if ((_paths[i])&&((_paths[i]->address().family() == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope))) {
  255. _paths[i]->sent(now,sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
  256. }
  257. }
  258. }
  259. void Peer::updateLatency(const unsigned int l) noexcept
  260. {
  261. if ((l > 0)&&(l < 0xffff)) {
  262. unsigned int lat = _latency;
  263. if (lat < 0xffff) {
  264. _latency = (l + l + lat) / 3;
  265. } else {
  266. _latency = l;
  267. }
  268. }
  269. }
  270. SharedPtr<Path> Peer::path(const int64_t now)
  271. {
  272. if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  273. _lastPrioritizedPaths = now;
  274. RWMutex::Lock l(_lock);
  275. _prioritizePaths(now);
  276. if (_alivePathCount == 0)
  277. return SharedPtr<Path>();
  278. return _paths[0];
  279. } else {
  280. RWMutex::RLock l(_lock);
  281. if (_alivePathCount == 0)
  282. return SharedPtr<Path>();
  283. return _paths[0];
  284. }
  285. }
  286. bool Peer::direct(const int64_t now)
  287. {
  288. if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  289. _lastPrioritizedPaths = now;
  290. RWMutex::Lock l(_lock);
  291. _prioritizePaths(now);
  292. return (_alivePathCount > 0);
  293. } else {
  294. RWMutex::RLock l(_lock);
  295. return (_alivePathCount > 0);
  296. }
  297. }
  298. void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
  299. {
  300. RWMutex::RLock l(_lock);
  301. paths.clear();
  302. paths.assign(_paths,_paths + _alivePathCount);
  303. }
  304. void Peer::save(void *tPtr) const
  305. {
  306. uint8_t *const buf = (uint8_t *)malloc(8 + ZT_PEER_MARSHAL_SIZE_MAX);
  307. if (!buf) return;
  308. Utils::storeBigEndian<uint64_t>(buf,(uint64_t)RR->node->now());
  309. _lock.rlock();
  310. const int len = marshal(buf + 8);
  311. _lock.runlock();
  312. if (len > 0) {
  313. uint64_t id[2];
  314. id[0] = _id.address().toInt();
  315. id[1] = 0;
  316. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8);
  317. }
  318. free(buf);
  319. }
  320. void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024)
  321. {
  322. static uint8_t junk = 0;
  323. InetAddress phyAddr(ep.inetAddr());
  324. if (phyAddr) { // only this endpoint type is currently implemented
  325. if (!RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,phyAddr))
  326. return;
  327. // Sending a packet with a low TTL before the real message assists traversal with some
  328. // stateful firewalls and is harmless otherwise AFAIK.
  329. ++junk;
  330. RR->node->putPacket(tPtr,-1,phyAddr,&junk,1,2);
  331. // In a few hundred milliseconds we'll send the real packet.
  332. {
  333. RWMutex::Lock l(_lock);
  334. _contactQueue.push_back(_ContactQueueItem(phyAddr,ZT_MAX_PEER_NETWORK_PATHS));
  335. }
  336. // If the peer indicates that they may be behind a symmetric NAT and there are no
  337. // living direct paths, try a few more aggressive things.
  338. if ((phyAddr.family() == AF_INET) && (!direct(now))) {
  339. unsigned int port = phyAddr.port();
  340. if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
  341. // If the other side is using a low-numbered port and has elected to
  342. // have this done, we can try scanning every port below 1024. The search
  343. // space here is small enough that we have a very good chance of punching.
  344. // Generate a random order list of all <1024 ports except 0 and the original sending port.
  345. uint16_t ports[1022];
  346. uint16_t ctr = 1;
  347. for (int i=0;i<1022;++i) {
  348. if (ctr == port) ++ctr;
  349. ports[i] = ctr++;
  350. }
  351. for (int i=0;i<512;++i) {
  352. uint64_t rn = Utils::random();
  353. unsigned int a = ((unsigned int)rn) % 1022;
  354. unsigned int b = ((unsigned int)(rn >> 24U)) % 1022;
  355. if (a != b) {
  356. uint16_t tmp = ports[a];
  357. ports[a] = ports[b];
  358. ports[b] = tmp;
  359. }
  360. }
  361. // Chunk ports into chunks of 128 to try in few hundred millisecond intervals,
  362. // abandoning attempts once there is at least one direct path.
  363. {
  364. RWMutex::Lock l(_lock);
  365. for (int i=0;i<896;i+=128)
  366. _contactQueue.push_back(_ContactQueueItem(phyAddr,ports + i,ports + i + 128,1));
  367. _contactQueue.push_back(_ContactQueueItem(phyAddr,ports + 896,ports + 1022,1));
  368. }
  369. } else {
  370. // Otherwise use the simpler sequential port attempt method in intervals.
  371. RWMutex::Lock l(_lock);
  372. for (int k=0;k<3;++k) {
  373. if (++port > 65535) break;
  374. InetAddress tryNext(phyAddr);
  375. tryNext.setPort(port);
  376. _contactQueue.push_back(_ContactQueueItem(tryNext,1));
  377. }
  378. }
  379. }
  380. // Start alarms going off to actually send these...
  381. RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
  382. }
  383. }
  384. void Peer::alarm(void *tPtr,const int64_t now)
  385. {
  386. // Pop one contact queue item and also clean the queue of any that are no
  387. // longer applicable because the alive path count has exceeded their threshold.
  388. bool stillHaveContactQueueItems;
  389. _ContactQueueItem qi;
  390. {
  391. RWMutex::Lock l(_lock);
  392. if (_contactQueue.empty())
  393. return;
  394. while (_alivePathCount >= _contactQueue.front().alivePathThreshold) {
  395. _contactQueue.pop_front();
  396. if (_contactQueue.empty())
  397. return;
  398. }
  399. _ContactQueueItem &qi2 = _contactQueue.front();
  400. qi.address = qi2.address;
  401. qi.ports.swap(qi2.ports);
  402. qi.alivePathThreshold = qi2.alivePathThreshold;
  403. _contactQueue.pop_front();
  404. for(std::list<_ContactQueueItem>::iterator q(_contactQueue.begin());q!=_contactQueue.end();) {
  405. if (_alivePathCount >= q->alivePathThreshold)
  406. _contactQueue.erase(q++);
  407. else ++q;
  408. }
  409. stillHaveContactQueueItems = !_contactQueue.empty();
  410. }
  411. if (_vProto >= 11) {
  412. uint64_t outgoingProbe = Protocol::createProbe(RR->identity,_id,_key);
  413. if (qi.ports.empty()) {
  414. RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
  415. } else {
  416. for (std::vector<uint16_t>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) {
  417. qi.address.setPort(*p);
  418. RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
  419. }
  420. }
  421. } else {
  422. if (qi.ports.empty()) {
  423. this->sendNOP(tPtr,-1,qi.address,now);
  424. } else {
  425. for (std::vector<uint16_t>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) {
  426. qi.address.setPort(*p);
  427. this->sendNOP(tPtr,-1,qi.address,now);
  428. }
  429. }
  430. }
  431. if (stillHaveContactQueueItems)
  432. RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
  433. }
  434. int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
  435. {
  436. data[0] = 0; // serialized peer version
  437. // For faster unmarshaling on large nodes the long-term secret key is cached. It's
  438. // encrypted with a symmetric key derived from a hash of the local node's identity
  439. // secrets, so the local node's address is also included. That way the unmarshal
  440. // code can check this address and not use this cached key if the local identity has
  441. // changed. In that case agreement must be executed again.
  442. RR->identity.address().copyTo(data + 1);
  443. RR->localCacheSymmetric.encrypt(_key,data + 6);
  444. RR->localCacheSymmetric.encrypt(_key + 16,data + 22);
  445. RWMutex::RLock l(_lock);
  446. int s = _id.marshal(data + 38,false);
  447. if (s <= 0)
  448. return s;
  449. int p = s + 38;
  450. s = _locator.marshal(data + p);
  451. if (s <= 0)
  452. return s;
  453. p += s;
  454. s = _bootstrap.marshal(data + p);
  455. if (s <= 0)
  456. return s;
  457. p += s;
  458. Utils::storeBigEndian(data + p,(uint16_t)_vProto);
  459. p += 2;
  460. Utils::storeBigEndian(data + p,(uint16_t)_vMajor);
  461. p += 2;
  462. Utils::storeBigEndian(data + p,(uint16_t)_vMinor);
  463. p += 2;
  464. Utils::storeBigEndian(data + p,(uint16_t)_vRevision);
  465. p += 2;
  466. data[p++] = 0;
  467. data[p++] = 0;
  468. return p;
  469. }
  470. int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
  471. {
  472. int p;
  473. bool mustRecomputeSecret;
  474. {
  475. RWMutex::Lock l(_lock);
  476. if ((len <= 38) || (data[0] != 0))
  477. return -1;
  478. if (Address(data + 1) == RR->identity.address()) {
  479. RR->localCacheSymmetric.decrypt(data + 6,_key);
  480. RR->localCacheSymmetric.decrypt(data + 22,_key + 16);
  481. mustRecomputeSecret = false;
  482. } else {
  483. mustRecomputeSecret = true; // can't use cached key if local identity has changed
  484. }
  485. int s = _id.unmarshal(data + 38,len - 38);
  486. if (s <= 0)
  487. return s;
  488. p = s + 38;
  489. s = _locator.unmarshal(data + p,len - p);
  490. if (s <= 0)
  491. return s;
  492. p += s;
  493. s = _bootstrap.unmarshal(data + p,len - p);
  494. if (s <= 0)
  495. return s;
  496. p += s;
  497. if ((p + 10) > len)
  498. return -1;
  499. _vProto = Utils::loadBigEndian<uint16_t>(data + p);
  500. p += 2;
  501. _vMajor = Utils::loadBigEndian<uint16_t>(data + p);
  502. p += 2;
  503. _vMinor = Utils::loadBigEndian<uint16_t>(data + p);
  504. p += 2;
  505. _vRevision = Utils::loadBigEndian<uint16_t>(data + p);
  506. p += 2;
  507. p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
  508. if (p > len)
  509. return -1;
  510. }
  511. if (mustRecomputeSecret) {
  512. if (!RR->identity.agree(_id,_key))
  513. return -1;
  514. }
  515. _incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
  516. return p;
  517. }
  518. void Peer::_prioritizePaths(const int64_t now)
  519. {
  520. // assumes _lock is locked for writing
  521. std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
  522. for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  523. if ((!_paths[i]) || (!_paths[i]->alive(now))) {
  524. _alivePathCount = i;
  525. for(;i<ZT_MAX_PEER_NETWORK_PATHS;++i)
  526. _paths[i].zero();
  527. return;
  528. }
  529. }
  530. }
  531. } // namespace ZeroTier