Peer.cpp 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2019 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * You can be released from the requirements of the license by purchasing
  21. * a commercial license. Buying such a license is mandatory as soon as you
  22. * develop commercial closed-source software that incorporates or links
  23. * directly against ZeroTier software without disclosing the source code
  24. * of your own application.
  25. */
  26. #include "../version.h"
  27. #include "Constants.hpp"
  28. #include "Peer.hpp"
  29. #include "Node.hpp"
  30. #include "Switch.hpp"
  31. #include "Network.hpp"
  32. #include "SelfAwareness.hpp"
  33. #include "Packet.hpp"
  34. #include "Trace.hpp"
  35. #include "InetAddress.hpp"
  36. #include "RingBuffer.hpp"
  37. #include "Utils.hpp"
  38. #include "../include/ZeroTierDebug.h"
  39. namespace ZeroTier {
  40. static unsigned char s_freeRandomByteCounter = 0;
  41. Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
  42. RR(renv),
  43. _lastReceive(0),
  44. _lastNontrivialReceive(0),
  45. _lastTriedMemorizedPath(0),
  46. _lastDirectPathPushSent(0),
  47. _lastDirectPathPushReceive(0),
  48. _lastCredentialRequestSent(0),
  49. _lastWhoisRequestReceived(0),
  50. _lastEchoRequestReceived(0),
  51. _lastCredentialsReceived(0),
  52. _lastTrustEstablishedPacketReceived(0),
  53. _lastSentFullHello(0),
  54. _lastACKWindowReset(0),
  55. _lastQoSWindowReset(0),
  56. _lastMultipathCompatibilityCheck(0),
  57. _freeRandomByte((unsigned char)((uintptr_t)this >> 4) ^ ++s_freeRandomByteCounter),
  58. _uniqueAlivePathCount(0),
  59. _localMultipathSupported(false),
  60. _remoteMultipathSupported(false),
  61. _canUseMultipath(false),
  62. _vProto(0),
  63. _vMajor(0),
  64. _vMinor(0),
  65. _vRevision(0),
  66. _id(peerIdentity),
  67. _directPathPushCutoffCount(0),
  68. _credentialsCutoffCount(0),
  69. _linkIsBalanced(false),
  70. _linkIsRedundant(false),
  71. _remotePeerMultipathEnabled(false),
  72. _lastAggregateStatsReport(0),
  73. _lastAggregateAllocation(0),
  74. _virtualPathCount(0),
  75. _roundRobinPathAssignmentIdx(0)
  76. {
  77. if (!myIdentity.agree(peerIdentity,_key,ZT_PEER_SECRET_KEY_LENGTH))
  78. throw ZT_EXCEPTION_INVALID_ARGUMENT;
  79. }
  80. void Peer::received(
  81. void *tPtr,
  82. const SharedPtr<Path> &path,
  83. const unsigned int hops,
  84. const uint64_t packetId,
  85. const unsigned int payloadLength,
  86. const Packet::Verb verb,
  87. const uint64_t inRePacketId,
  88. const Packet::Verb inReVerb,
  89. const bool trustEstablished,
  90. const uint64_t networkId)
  91. {
  92. const int64_t now = RR->node->now();
  93. _lastReceive = now;
  94. switch (verb) {
  95. case Packet::VERB_FRAME:
  96. case Packet::VERB_EXT_FRAME:
  97. case Packet::VERB_NETWORK_CONFIG_REQUEST:
  98. case Packet::VERB_NETWORK_CONFIG:
  99. case Packet::VERB_MULTICAST_FRAME:
  100. _lastNontrivialReceive = now;
  101. break;
  102. default:
  103. break;
  104. }
  105. if (trustEstablished) {
  106. _lastTrustEstablishedPacketReceived = now;
  107. path->trustedPacketReceived(now);
  108. }
  109. {
  110. Mutex::Lock _l(_paths_m);
  111. recordIncomingPacket(tPtr, path, packetId, payloadLength, verb, now);
  112. if (_canUseMultipath) {
  113. if (path->needsToSendQoS(now)) {
  114. sendQOS_MEASUREMENT(tPtr, path, path->localSocket(), path->address(), now);
  115. }
  116. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  117. if (_paths[i].p) {
  118. _paths[i].p->processBackgroundPathMeasurements(now);
  119. }
  120. }
  121. }
  122. }
  123. if (hops == 0) {
  124. // If this is a direct packet (no hops), update existing paths or learn new ones
  125. bool havePath = false;
  126. {
  127. Mutex::Lock _l(_paths_m);
  128. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  129. if (_paths[i].p) {
  130. if (_paths[i].p == path) {
  131. _paths[i].lr = now;
  132. havePath = true;
  133. break;
  134. }
  135. } else break;
  136. }
  137. }
  138. bool attemptToContact = false;
  139. if ((!havePath)&&(RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id.address(),path->localSocket(),path->address()))) {
  140. Mutex::Lock _l(_paths_m);
  141. // Paths are redundant if they duplicate an alive path to the same IP or
  142. // with the same local socket and address family.
  143. bool redundant = false;
  144. unsigned int replacePath = ZT_MAX_PEER_NETWORK_PATHS;
  145. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  146. if (_paths[i].p) {
  147. if ( (_paths[i].p->alive(now)) && ( ((_paths[i].p->localSocket() == path->localSocket())&&(_paths[i].p->address().ss_family == path->address().ss_family)) || (_paths[i].p->address().ipsEqual2(path->address())) ) ) {
  148. redundant = true;
  149. break;
  150. }
  151. // If the path is the same address and port, simply assume this is a replacement
  152. if ( (_paths[i].p->address().ipsEqual2(path->address()))) {
  153. replacePath = i;
  154. break;
  155. }
  156. } else break;
  157. }
  158. // If the path isn't a duplicate of the same localSocket AND we haven't already determined a replacePath,
  159. // then find the worst path and replace it.
  160. if (!redundant && replacePath == ZT_MAX_PEER_NETWORK_PATHS) {
  161. int replacePathQuality = 0;
  162. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  163. if (_paths[i].p) {
  164. const int q = _paths[i].p->quality(now);
  165. if (q > replacePathQuality) {
  166. replacePathQuality = q;
  167. replacePath = i;
  168. }
  169. } else {
  170. replacePath = i;
  171. break;
  172. }
  173. }
  174. }
  175. if (replacePath != ZT_MAX_PEER_NETWORK_PATHS) {
  176. if (verb == Packet::VERB_OK) {
  177. RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
  178. _paths[replacePath].lr = now;
  179. _paths[replacePath].p = path;
  180. _paths[replacePath].priority = 1;
  181. } else {
  182. attemptToContact = true;
  183. }
  184. // Every time we learn of new path, rebuild set of virtual paths
  185. constructSetOfVirtualPaths();
  186. }
  187. }
  188. if (attemptToContact) {
  189. attemptToContactAt(tPtr,path->localSocket(),path->address(),now,true);
  190. path->sent(now);
  191. RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
  192. }
  193. }
  194. // If we have a trust relationship periodically push a message enumerating
  195. // all known external addresses for ourselves. If we already have a path this
  196. // is done less frequently.
  197. if (this->trustEstablished(now)) {
  198. const int64_t sinceLastPush = now - _lastDirectPathPushSent;
  199. if (sinceLastPush >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)
  200. || (_localMultipathSupported && (sinceLastPush >= (ZT_DIRECT_PATH_PUSH_INTERVAL_MULTIPATH)))) {
  201. _lastDirectPathPushSent = now;
  202. std::vector<InetAddress> pathsToPush(RR->node->directPaths());
  203. if (pathsToPush.size() > 0) {
  204. std::vector<InetAddress>::const_iterator p(pathsToPush.begin());
  205. while (p != pathsToPush.end()) {
  206. Packet *const outp = new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
  207. outp->addSize(2); // leave room for count
  208. unsigned int count = 0;
  209. while ((p != pathsToPush.end())&&((outp->size() + 24) < 1200)) {
  210. uint8_t addressType = 4;
  211. switch(p->ss_family) {
  212. case AF_INET:
  213. break;
  214. case AF_INET6:
  215. addressType = 6;
  216. break;
  217. default: // we currently only push IP addresses
  218. ++p;
  219. continue;
  220. }
  221. outp->append((uint8_t)0); // no flags
  222. outp->append((uint16_t)0); // no extensions
  223. outp->append(addressType);
  224. outp->append((uint8_t)((addressType == 4) ? 6 : 18));
  225. outp->append(p->rawIpData(),((addressType == 4) ? 4 : 16));
  226. outp->append((uint16_t)p->port());
  227. ++count;
  228. ++p;
  229. }
  230. if (count) {
  231. outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
  232. outp->compress();
  233. outp->armor(_key,true);
  234. path->send(RR,tPtr,outp->data(),outp->size(),now);
  235. }
  236. delete outp;
  237. }
  238. }
  239. }
  240. }
  241. }
  242. void Peer::constructSetOfVirtualPaths()
  243. {
  244. if (!_remoteMultipathSupported) {
  245. return;
  246. }
  247. Mutex::Lock _l(_virtual_paths_m);
  248. int64_t now = RR->node->now();
  249. _virtualPathCount = 0;
  250. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  251. if (_paths[i].p && _paths[i].p->alive(now)) {
  252. for(unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  253. if (_paths[j].p && _paths[j].p->alive(now)) {
  254. int64_t localSocket = _paths[j].p->localSocket();
  255. bool foundVirtualPath = false;
  256. for (int k=0; k<_virtualPaths.size(); k++) {
  257. if (_virtualPaths[k]->localSocket == localSocket && _virtualPaths[k]->p == _paths[i].p) {
  258. foundVirtualPath = true;
  259. }
  260. }
  261. if (!foundVirtualPath)
  262. {
  263. VirtualPath *np = new VirtualPath;
  264. np->p = _paths[i].p;
  265. np->localSocket = localSocket;
  266. _virtualPaths.push_back(np);
  267. }
  268. }
  269. }
  270. }
  271. }
  272. }
  273. void Peer::recordOutgoingPacket(const SharedPtr<Path> &path, const uint64_t packetId,
  274. uint16_t payloadLength, const Packet::Verb verb, int64_t now)
  275. {
  276. _freeRandomByte += (unsigned char)(packetId >> 8); // grab entropy to use in path selection logic for multipath
  277. if (_canUseMultipath) {
  278. path->recordOutgoingPacket(now, packetId, payloadLength, verb);
  279. }
  280. }
  281. void Peer::recordIncomingPacket(void *tPtr, const SharedPtr<Path> &path, const uint64_t packetId,
  282. uint16_t payloadLength, const Packet::Verb verb, int64_t now)
  283. {
  284. if (_canUseMultipath) {
  285. if (path->needsToSendAck(now)) {
  286. sendACK(tPtr, path, path->localSocket(), path->address(), now);
  287. }
  288. path->recordIncomingPacket(now, packetId, payloadLength, verb);
  289. }
  290. }
  291. void Peer::computeAggregateAllocation(int64_t now)
  292. {
  293. float maxStability = 0;
  294. float totalRelativeQuality = 0;
  295. float maxThroughput = 1;
  296. float maxScope = 0;
  297. float relStability[ZT_MAX_PEER_NETWORK_PATHS];
  298. float relThroughput[ZT_MAX_PEER_NETWORK_PATHS];
  299. memset(&relStability, 0, sizeof(relStability));
  300. memset(&relThroughput, 0, sizeof(relThroughput));
  301. // Survey all paths
  302. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  303. if (_paths[i].p) {
  304. relStability[i] = _paths[i].p->lastComputedStability();
  305. relThroughput[i] = (float)_paths[i].p->maxLifetimeThroughput();
  306. maxStability = relStability[i] > maxStability ? relStability[i] : maxStability;
  307. maxThroughput = relThroughput[i] > maxThroughput ? relThroughput[i] : maxThroughput;
  308. maxScope = _paths[i].p->ipScope() > maxScope ? _paths[i].p->ipScope() : maxScope;
  309. }
  310. }
  311. // Convert to relative values
  312. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  313. if (_paths[i].p) {
  314. relStability[i] /= maxStability ? maxStability : 1;
  315. relThroughput[i] /= maxThroughput ? maxThroughput : 1;
  316. float normalized_ma = Utils::normalize((float)_paths[i].p->ackAge(now), 0, ZT_PATH_MAX_AGE, 0, 10);
  317. float age_contrib = exp((-1)*normalized_ma);
  318. float relScope = ((float)(_paths[i].p->ipScope()+1) / (maxScope + 1));
  319. float relQuality =
  320. (relStability[i] * (float)ZT_PATH_CONTRIB_STABILITY)
  321. + (fmaxf(1.0f, relThroughput[i]) * (float)ZT_PATH_CONTRIB_THROUGHPUT)
  322. + relScope * (float)ZT_PATH_CONTRIB_SCOPE;
  323. relQuality *= age_contrib;
  324. // Clamp values
  325. relQuality = relQuality > (1.00f / 100.0f) ? relQuality : 0.0f;
  326. relQuality = relQuality < (99.0f / 100.0f) ? relQuality : 1.0f;
  327. totalRelativeQuality += relQuality;
  328. _paths[i].p->updateRelativeQuality(relQuality);
  329. }
  330. }
  331. // Convert set of relative performances into an allocation set
  332. for(uint16_t i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  333. if (_paths[i].p) {
  334. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RANDOM) {
  335. _paths[i].p->updateComponentAllocationOfAggregateLink(((float)_pathChoiceHist.countValue(i) / (float)_pathChoiceHist.count()) * 255);
  336. }
  337. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_DYNAMIC_OPAQUE) {
  338. _paths[i].p->updateComponentAllocationOfAggregateLink((unsigned char)((_paths[i].p->relativeQuality() / totalRelativeQuality) * 255));
  339. }
  340. }
  341. }
  342. }
  343. int Peer::computeAggregateLinkPacketDelayVariance()
  344. {
  345. float pdv = 0.0;
  346. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  347. if (_paths[i].p) {
  348. pdv += _paths[i].p->relativeQuality() * _paths[i].p->packetDelayVariance();
  349. }
  350. }
  351. return (int)pdv;
  352. }
  353. int Peer::computeAggregateLinkMeanLatency()
  354. {
  355. int ml = 0;
  356. int pathCount = 0;
  357. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  358. if (_paths[i].p) {
  359. pathCount++;
  360. ml += (int)(_paths[i].p->relativeQuality() * _paths[i].p->meanLatency());
  361. }
  362. }
  363. return ml / pathCount;
  364. }
  365. int Peer::aggregateLinkPhysicalPathCount()
  366. {
  367. std::map<std::string, bool> ifnamemap;
  368. int pathCount = 0;
  369. int64_t now = RR->node->now();
  370. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  371. if (_paths[i].p && _paths[i].p->alive(now)) {
  372. if (!ifnamemap[_paths[i].p->getName()]) {
  373. ifnamemap[_paths[i].p->getName()] = true;
  374. pathCount++;
  375. }
  376. }
  377. }
  378. return pathCount;
  379. }
  380. int Peer::aggregateLinkLogicalPathCount()
  381. {
  382. int pathCount = 0;
  383. int64_t now = RR->node->now();
  384. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  385. if (_paths[i].p && _paths[i].p->alive(now)) {
  386. pathCount++;
  387. }
  388. }
  389. return pathCount;
  390. }
  391. std::vector<SharedPtr<Path> > Peer::getAllPaths(int64_t now)
  392. {
  393. Mutex::Lock _l(_virtual_paths_m); // FIXME: TX can now lock RX
  394. std::vector<SharedPtr<Path> > paths;
  395. for (int i=0; i<_virtualPaths.size(); i++) {
  396. if (_virtualPaths[i]->p) {
  397. paths.push_back(_virtualPaths[i]->p);
  398. }
  399. }
  400. return paths;
  401. }
  402. SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired, int64_t flowId)
  403. {
  404. Mutex::Lock _l(_paths_m);
  405. SharedPtr<Path> selectedPath;
  406. char curPathStr[128];
  407. char newPathStr[128];
  408. unsigned int bestPath = ZT_MAX_PEER_NETWORK_PATHS;
  409. /**
  410. * Send traffic across the highest quality path only. This algorithm will still
  411. * use the old path quality metric from protocol version 9.
  412. */
  413. if (!_canUseMultipath) {
  414. long bestPathQuality = 2147483647;
  415. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  416. if (_paths[i].p) {
  417. if ((includeExpired)||((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION)) {
  418. const long q = _paths[i].p->quality(now) / _paths[i].priority;
  419. if (q <= bestPathQuality) {
  420. bestPathQuality = q;
  421. bestPath = i;
  422. }
  423. }
  424. } else break;
  425. }
  426. if (bestPath != ZT_MAX_PEER_NETWORK_PATHS) {
  427. return _paths[bestPath].p;
  428. }
  429. return SharedPtr<Path>();
  430. }
  431. // Update path measurements
  432. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  433. if (_paths[i].p) {
  434. _paths[i].p->processBackgroundPathMeasurements(now);
  435. }
  436. }
  437. // Detect new flows and update existing records
  438. if (_flows.count(flowId)) {
  439. _flows[flowId]->lastSend = now;
  440. }
  441. else {
  442. fprintf(stderr, "new flow %llx detected between this node and %llx (%lu active flow(s))\n",
  443. flowId, this->_id.address().toInt(), (_flows.size()+1));
  444. struct Flow *newFlow = new Flow(flowId, now);
  445. _flows[flowId] = newFlow;
  446. newFlow->assignedPath = nullptr;
  447. }
  448. // Construct set of virtual paths if needed
  449. if (!_virtualPaths.size()) {
  450. constructSetOfVirtualPaths();
  451. }
  452. if (!_virtualPaths.size()) {
  453. fprintf(stderr, "no paths to send packet out on\n");
  454. return SharedPtr<Path>();
  455. }
  456. /**
  457. * Traffic is randomly distributed among all active paths.
  458. */
  459. int numAlivePaths = 0;
  460. int numStalePaths = 0;
  461. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RANDOM) {
  462. int sz = _virtualPaths.size();
  463. if (sz) {
  464. int idx = _freeRandomByte % sz;
  465. _pathChoiceHist.push(idx);
  466. char pathStr[128];
  467. _virtualPaths[idx]->p->address().toString(pathStr);
  468. fprintf(stderr, "sending out: (%llx), idx=%d: path=%s, localSocket=%lld\n",
  469. this->_id.address().toInt(), idx, pathStr, _virtualPaths[idx]->localSocket);
  470. return _virtualPaths[idx]->p;
  471. }
  472. // This call is algorithmically inert but gives us a value to show in the status output
  473. computeAggregateAllocation(now);
  474. }
  475. /**
  476. * All traffic is sent on all paths.
  477. */
  478. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BROADCAST) {
  479. // Not handled here. Handled in Switch::_trySend()
  480. }
  481. /**
  482. * Only one link is active. Fail-over is immediate.
  483. */
  484. if (RR->node->getMultipathMode() == ZT_MULTIPATH_ACTIVE_BACKUP) {
  485. bool bFoundHotPath = false;
  486. if (!_activeBackupPath) {
  487. /* Select the fist path that appears to still be active.
  488. * This will eventually be user-configurable */
  489. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; i++) {
  490. if (_paths[i].p) {
  491. if (_activeBackupPath.ptr() == _paths[i].p.ptr()) {
  492. continue;
  493. }
  494. _activeBackupPath = _paths[i].p;
  495. if ((now - _paths[i].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
  496. bFoundHotPath = true;
  497. _activeBackupPath = _paths[i].p;
  498. _activeBackupPath->address().toString(curPathStr);
  499. fprintf(stderr, "selected %s as the primary active-backup path to %llx\n",
  500. curPathStr, this->_id.address().toInt());
  501. }
  502. }
  503. }
  504. if (!_activeBackupPath) {
  505. return SharedPtr<Path>();
  506. }
  507. if (!bFoundHotPath) {
  508. _activeBackupPath->address().toString(curPathStr);
  509. fprintf(stderr, "no hot paths available to to use as active-backup primary to %llx, selected %s anyway\n",
  510. this->_id.address().toInt(), curPathStr);
  511. }
  512. }
  513. else {
  514. if ((now - _activeBackupPath->lastIn()) > ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
  515. _activeBackupPath->address().toString(curPathStr);
  516. /* Fail-over to the fist path that appears to still be active.
  517. * This will eventually be user-configurable */
  518. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; i++) {
  519. if (_paths[i].p) {
  520. if (_activeBackupPath.ptr() == _paths[i].p.ptr()) {
  521. continue;
  522. }
  523. if ((now - _paths[i].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
  524. bFoundHotPath = true;
  525. _activeBackupPath->address().toString(curPathStr); // Record path string for later debug trace
  526. _activeBackupPath = _paths[i].p;
  527. _activeBackupPath->address().toString(newPathStr);
  528. }
  529. }
  530. }
  531. if (bFoundHotPath) {
  532. fprintf(stderr, "primary active-backup path %s to %llx appears to be dead, switched to path %s\n",
  533. curPathStr, this->_id.address().toInt(), newPathStr);
  534. }
  535. }
  536. }
  537. return _activeBackupPath;
  538. }
  539. /**
  540. * Packets are striped across all available paths.
  541. */
  542. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RR_OPAQUE) {
  543. // fprintf(stderr, "ZT_MULTIPATH_BALANCE_RR_OPAQUE\n");
  544. int16_t previousIdx = _roundRobinPathAssignmentIdx;
  545. if (_roundRobinPathAssignmentIdx < (_virtualPaths.size()-1)) {
  546. _roundRobinPathAssignmentIdx++;
  547. }
  548. else {
  549. _roundRobinPathAssignmentIdx = 0;
  550. }
  551. selectedPath = _virtualPaths[previousIdx]->p;
  552. char pathStr[128];
  553. selectedPath->address().toString(pathStr);
  554. fprintf(stderr, "sending packet out on path %s at index %d\n",
  555. pathStr, previousIdx);
  556. return selectedPath;
  557. }
  558. /**
  559. * Flows are striped across all available paths.
  560. */
  561. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RR_FLOW) {
  562. // fprintf(stderr, "ZT_MULTIPATH_BALANCE_RR_FLOW\n");
  563. }
  564. /**
  565. * Flows are hashed across all available paths.
  566. */
  567. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_XOR_FLOW) {
  568. // fprintf(stderr, "ZT_MULTIPATH_BALANCE_XOR_FLOW (%llx) \n", flowId);
  569. struct Flow *currFlow = NULL;
  570. if (_flows.count(flowId)) {
  571. currFlow = _flows[flowId];
  572. if (!currFlow->assignedPath) {
  573. int idx = abs((int)(currFlow->flowId % (_virtualPaths.size()-1)));
  574. currFlow->assignedPath = _virtualPaths[idx];
  575. _virtualPaths[idx]->p->address().toString(curPathStr);
  576. fprintf(stderr, "assigning flow %llx between this node and peer %llx to path %s at index %d\n",
  577. currFlow->flowId, this->_id.address().toInt(), curPathStr, idx);
  578. }
  579. else {
  580. if (!currFlow->assignedPath->p->alive(now)) {
  581. currFlow->assignedPath->p->address().toString(curPathStr);
  582. // Re-assign
  583. int idx = abs((int)(currFlow->flowId % (_virtualPaths.size()-1)));
  584. currFlow->assignedPath = _virtualPaths[idx];
  585. _virtualPaths[idx]->p->address().toString(newPathStr);
  586. fprintf(stderr, "path %s assigned to flow %llx between this node and %llx appears to be dead, reassigning to path %s\n",
  587. curPathStr, currFlow->flowId, this->_id.address().toInt(), newPathStr);
  588. }
  589. }
  590. return currFlow->assignedPath->p;
  591. }
  592. }
  593. /**
  594. * Proportionally allocate traffic according to dynamic path quality measurements.
  595. */
  596. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_DYNAMIC_OPAQUE) {
  597. if ((now - _lastAggregateAllocation) >= ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
  598. _lastAggregateAllocation = now;
  599. computeAggregateAllocation(now);
  600. }
  601. // Randomly choose path according to their allocations
  602. float rf = _freeRandomByte;
  603. for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  604. if (_paths[i].p) {
  605. if (rf < _paths[i].p->allocation()) {
  606. bestPath = i;
  607. _pathChoiceHist.push(bestPath); // Record which path we chose
  608. break;
  609. }
  610. rf -= _paths[i].p->allocation();
  611. }
  612. }
  613. if (bestPath < ZT_MAX_PEER_NETWORK_PATHS) {
  614. return _paths[bestPath].p;
  615. }
  616. }
  617. /**
  618. * Flows are dynamically allocated across paths in proportion to link strength and load.
  619. */
  620. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_DYNAMIC_FLOW) {
  621. }
  622. return SharedPtr<Path>();
  623. }
  624. char *Peer::interfaceListStr()
  625. {
  626. std::map<std::string, int> ifnamemap;
  627. char tmp[32];
  628. const int64_t now = RR->node->now();
  629. char *ptr = _interfaceListStr;
  630. bool imbalanced = false;
  631. memset(_interfaceListStr, 0, sizeof(_interfaceListStr));
  632. int alivePathCount = aggregateLinkLogicalPathCount();
  633. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  634. if (_paths[i].p && _paths[i].p->alive(now)) {
  635. int ipv = _paths[i].p->address().isV4();
  636. // If this is acting as an aggregate link, check allocations
  637. float targetAllocation = 1.0f / (float)alivePathCount;
  638. float currentAllocation = 1.0f;
  639. if (alivePathCount > 1) {
  640. currentAllocation = (float)_pathChoiceHist.countValue(i) / (float)_pathChoiceHist.count();
  641. if (fabs(targetAllocation - currentAllocation) > ZT_PATH_IMBALANCE_THRESHOLD) {
  642. imbalanced = true;
  643. }
  644. }
  645. char *ipvStr = ipv ? (char*)"ipv4" : (char*)"ipv6";
  646. sprintf(tmp, "(%s, %s, %.3f)", _paths[i].p->getName(), ipvStr, currentAllocation);
  647. // Prevent duplicates
  648. if(ifnamemap[_paths[i].p->getName()] != ipv) {
  649. memcpy(ptr, tmp, strlen(tmp));
  650. ptr += strlen(tmp);
  651. *ptr = ' ';
  652. ptr++;
  653. ifnamemap[_paths[i].p->getName()] = ipv;
  654. }
  655. }
  656. }
  657. ptr--; // Overwrite trailing space
  658. if (imbalanced) {
  659. sprintf(tmp, ", is asymmetrical");
  660. memcpy(ptr, tmp, sizeof(tmp));
  661. } else {
  662. *ptr = '\0';
  663. }
  664. return _interfaceListStr;
  665. }
  666. void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &other) const
  667. {
  668. unsigned int myBestV4ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  669. unsigned int myBestV6ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  670. long myBestV4QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  671. long myBestV6QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  672. unsigned int theirBestV4ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  673. unsigned int theirBestV6ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  674. long theirBestV4QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  675. long theirBestV6QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  676. for(int i=0;i<=ZT_INETADDRESS_MAX_SCOPE;++i) {
  677. myBestV4ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  678. myBestV6ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  679. myBestV4QualityByScope[i] = 2147483647;
  680. myBestV6QualityByScope[i] = 2147483647;
  681. theirBestV4ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  682. theirBestV6ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  683. theirBestV4QualityByScope[i] = 2147483647;
  684. theirBestV6QualityByScope[i] = 2147483647;
  685. }
  686. Mutex::Lock _l1(_paths_m);
  687. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  688. if (_paths[i].p) {
  689. const long q = _paths[i].p->quality(now) / _paths[i].priority;
  690. const unsigned int s = (unsigned int)_paths[i].p->ipScope();
  691. switch(_paths[i].p->address().ss_family) {
  692. case AF_INET:
  693. if (q <= myBestV4QualityByScope[s]) {
  694. myBestV4QualityByScope[s] = q;
  695. myBestV4ByScope[s] = i;
  696. }
  697. break;
  698. case AF_INET6:
  699. if (q <= myBestV6QualityByScope[s]) {
  700. myBestV6QualityByScope[s] = q;
  701. myBestV6ByScope[s] = i;
  702. }
  703. break;
  704. }
  705. } else break;
  706. }
  707. Mutex::Lock _l2(other->_paths_m);
  708. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  709. if (other->_paths[i].p) {
  710. const long q = other->_paths[i].p->quality(now) / other->_paths[i].priority;
  711. const unsigned int s = (unsigned int)other->_paths[i].p->ipScope();
  712. switch(other->_paths[i].p->address().ss_family) {
  713. case AF_INET:
  714. if (q <= theirBestV4QualityByScope[s]) {
  715. theirBestV4QualityByScope[s] = q;
  716. theirBestV4ByScope[s] = i;
  717. }
  718. break;
  719. case AF_INET6:
  720. if (q <= theirBestV6QualityByScope[s]) {
  721. theirBestV6QualityByScope[s] = q;
  722. theirBestV6ByScope[s] = i;
  723. }
  724. break;
  725. }
  726. } else break;
  727. }
  728. unsigned int mine = ZT_MAX_PEER_NETWORK_PATHS;
  729. unsigned int theirs = ZT_MAX_PEER_NETWORK_PATHS;
  730. for(int s=ZT_INETADDRESS_MAX_SCOPE;s>=0;--s) {
  731. if ((myBestV6ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)&&(theirBestV6ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)) {
  732. mine = myBestV6ByScope[s];
  733. theirs = theirBestV6ByScope[s];
  734. break;
  735. }
  736. if ((myBestV4ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)&&(theirBestV4ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)) {
  737. mine = myBestV4ByScope[s];
  738. theirs = theirBestV4ByScope[s];
  739. break;
  740. }
  741. }
  742. if (mine != ZT_MAX_PEER_NETWORK_PATHS) {
  743. unsigned int alt = (unsigned int)RR->node->prng() & 1; // randomize which hint we send first for black magickal NAT-t reasons
  744. const unsigned int completed = alt + 2;
  745. while (alt != completed) {
  746. if ((alt & 1) == 0) {
  747. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
  748. outp.append((uint8_t)0);
  749. other->_id.address().appendTo(outp);
  750. outp.append((uint16_t)other->_paths[theirs].p->address().port());
  751. if (other->_paths[theirs].p->address().ss_family == AF_INET6) {
  752. outp.append((uint8_t)16);
  753. outp.append(other->_paths[theirs].p->address().rawIpData(),16);
  754. } else {
  755. outp.append((uint8_t)4);
  756. outp.append(other->_paths[theirs].p->address().rawIpData(),4);
  757. }
  758. outp.armor(_key,true);
  759. _paths[mine].p->send(RR,tPtr,outp.data(),outp.size(),now);
  760. } else {
  761. Packet outp(other->_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
  762. outp.append((uint8_t)0);
  763. _id.address().appendTo(outp);
  764. outp.append((uint16_t)_paths[mine].p->address().port());
  765. if (_paths[mine].p->address().ss_family == AF_INET6) {
  766. outp.append((uint8_t)16);
  767. outp.append(_paths[mine].p->address().rawIpData(),16);
  768. } else {
  769. outp.append((uint8_t)4);
  770. outp.append(_paths[mine].p->address().rawIpData(),4);
  771. }
  772. outp.armor(other->_key,true);
  773. other->_paths[theirs].p->send(RR,tPtr,outp.data(),outp.size(),now);
  774. }
  775. ++alt;
  776. }
  777. }
  778. }
  779. inline void Peer::processBackgroundPeerTasks(const int64_t now)
  780. {
  781. // Determine current multipath compatibility with other peer
  782. if ((now - _lastMultipathCompatibilityCheck) >= ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
  783. //
  784. // Cache number of available paths so that we can short-circuit multipath logic elsewhere
  785. //
  786. // We also take notice of duplicate paths (same IP only) because we may have
  787. // recently received a direct path push from a peer and our list might contain
  788. // a dead path which hasn't been fully recognized as such. In this case we
  789. // don't want the duplicate to trigger execution of multipath code prematurely.
  790. //
  791. // This is done to support the behavior of auto multipath enable/disable
  792. // without user intervention.
  793. //
  794. int currAlivePathCount = 0;
  795. int duplicatePathsFound = 0;
  796. for (unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  797. if (_paths[i].p) {
  798. currAlivePathCount++;
  799. for (unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  800. if (_paths[i].p && _paths[j].p && _paths[i].p->address().ipsEqual2(_paths[j].p->address()) && i != j) {
  801. duplicatePathsFound+=1;
  802. break;
  803. }
  804. }
  805. }
  806. }
  807. _uniqueAlivePathCount = (currAlivePathCount - (duplicatePathsFound / 2));
  808. _lastMultipathCompatibilityCheck = now;
  809. _localMultipathSupported = ((RR->node->getMultipathMode() != ZT_MULTIPATH_NONE) && (ZT_PROTO_VERSION > 9));
  810. _remoteMultipathSupported = _vProto > 9;
  811. // If both peers support multipath and more than one path exist, we can use multipath logic
  812. _canUseMultipath = _localMultipathSupported && _remoteMultipathSupported && (_uniqueAlivePathCount > 1);
  813. }
  814. // Remove old flows
  815. std::map<int64_t, struct Flow *>::iterator it = _flows.begin();
  816. while (it != _flows.end()) {
  817. if ((now - it->second->lastSend) > ZT_MULTIPATH_FLOW_EXPIRATION) {
  818. fprintf(stderr, "forgetting flow %llx between this node and %llx (%lu active flow(s))\n",
  819. it->first, this->_id.address().toInt(), _flows.size());
  820. it = _flows.erase(it);
  821. } else {
  822. it++;
  823. }
  824. }
  825. }
  826. void Peer::sendACK(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  827. {
  828. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ACK);
  829. uint32_t bytesToAck = path->bytesToAck();
  830. outp.append<uint32_t>(bytesToAck);
  831. if (atAddress) {
  832. outp.armor(_key,false);
  833. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  834. } else {
  835. RR->sw->send(tPtr,outp,false);
  836. }
  837. path->sentAck(now);
  838. }
  839. void Peer::sendQOS_MEASUREMENT(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  840. {
  841. const int64_t _now = RR->node->now();
  842. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_QOS_MEASUREMENT);
  843. char qosData[ZT_PATH_MAX_QOS_PACKET_SZ];
  844. int16_t len = path->generateQoSPacket(_now,qosData);
  845. outp.append(qosData,len);
  846. if (atAddress) {
  847. outp.armor(_key,false);
  848. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  849. } else {
  850. RR->sw->send(tPtr,outp,false);
  851. }
  852. path->sentQoS(now);
  853. }
  854. void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  855. {
  856. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
  857. outp.append((unsigned char)ZT_PROTO_VERSION);
  858. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  859. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  860. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  861. outp.append(now);
  862. RR->identity.serialize(outp,false);
  863. atAddress.serialize(outp);
  864. outp.append((uint64_t)RR->topology->planetWorldId());
  865. outp.append((uint64_t)RR->topology->planetWorldTimestamp());
  866. const unsigned int startCryptedPortionAt = outp.size();
  867. std::vector<World> moons(RR->topology->moons());
  868. std::vector<uint64_t> moonsWanted(RR->topology->moonsWanted());
  869. outp.append((uint16_t)(moons.size() + moonsWanted.size()));
  870. for(std::vector<World>::const_iterator m(moons.begin());m!=moons.end();++m) {
  871. outp.append((uint8_t)m->type());
  872. outp.append((uint64_t)m->id());
  873. outp.append((uint64_t)m->timestamp());
  874. }
  875. for(std::vector<uint64_t>::const_iterator m(moonsWanted.begin());m!=moonsWanted.end();++m) {
  876. outp.append((uint8_t)World::TYPE_MOON);
  877. outp.append(*m);
  878. outp.append((uint64_t)0);
  879. }
  880. outp.cryptField(_key,startCryptedPortionAt,outp.size() - startCryptedPortionAt);
  881. RR->node->expectReplyTo(outp.packetId());
  882. if (atAddress) {
  883. outp.armor(_key,false); // false == don't encrypt full payload, but add MAC
  884. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  885. } else {
  886. RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC
  887. }
  888. }
  889. void Peer::attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello)
  890. {
  891. if ( (!sendFullHello) && (_vProto >= 5) && (!((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0))) ) {
  892. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
  893. RR->node->expectReplyTo(outp.packetId());
  894. outp.armor(_key,true);
  895. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  896. } else {
  897. sendHELLO(tPtr,localSocket,atAddress,now);
  898. }
  899. }
  900. void Peer::tryMemorizedPath(void *tPtr,int64_t now)
  901. {
  902. if ((now - _lastTriedMemorizedPath) >= ZT_TRY_MEMORIZED_PATH_INTERVAL) {
  903. _lastTriedMemorizedPath = now;
  904. InetAddress mp;
  905. if (RR->node->externalPathLookup(tPtr,_id.address(),-1,mp))
  906. attemptToContactAt(tPtr,-1,mp,now,true);
  907. }
  908. }
  909. unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
  910. {
  911. unsigned int sent = 0;
  912. Mutex::Lock _l(_paths_m);
  913. processBackgroundPeerTasks(now);
  914. // Emit traces regarding aggregate link status
  915. if (_canUseMultipath) {
  916. int alivePathCount = aggregateLinkPhysicalPathCount();
  917. if ((now - _lastAggregateStatsReport) > ZT_PATH_AGGREGATE_STATS_REPORT_INTERVAL) {
  918. _lastAggregateStatsReport = now;
  919. if (alivePathCount) {
  920. RR->t->peerLinkAggregateStatistics(NULL,*this);
  921. }
  922. } if (alivePathCount < 2 && _linkIsRedundant) {
  923. _linkIsRedundant = !_linkIsRedundant;
  924. RR->t->peerLinkNoLongerAggregate(NULL,*this);
  925. } if (alivePathCount > 1 && !_linkIsRedundant) {
  926. _linkIsRedundant = !_linkIsRedundant;
  927. RR->t->peerLinkNoLongerAggregate(NULL,*this);
  928. }
  929. }
  930. // Right now we only keep pinging links that have the maximum priority. The
  931. // priority is used to track cluster redirections, meaning that when a cluster
  932. // redirects us its redirect target links override all other links and we
  933. // let those old links expire.
  934. long maxPriority = 0;
  935. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  936. if (_paths[i].p)
  937. maxPriority = std::max(_paths[i].priority,maxPriority);
  938. else break;
  939. }
  940. const bool sendFullHello = ((now - _lastSentFullHello) >= ZT_PEER_PING_PERIOD);
  941. _lastSentFullHello = now;
  942. unsigned int j = 0;
  943. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  944. if (_paths[i].p) {
  945. // Clean expired and reduced priority paths
  946. if ( ((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION) && (_paths[i].priority == maxPriority) ) {
  947. if ((sendFullHello)||(_paths[i].p->needsHeartbeat(now))) {
  948. attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,sendFullHello);
  949. _paths[i].p->sent(now);
  950. sent |= (_paths[i].p->address().ss_family == AF_INET) ? 0x1 : 0x2;
  951. }
  952. if (i != j)
  953. _paths[j] = _paths[i];
  954. ++j;
  955. }
  956. } else break;
  957. }
  958. if (canUseMultipath()) {
  959. while(j < ZT_MAX_PEER_NETWORK_PATHS) {
  960. _paths[j].lr = 0;
  961. _paths[j].p.zero();
  962. _paths[j].priority = 1;
  963. ++j;
  964. }
  965. }
  966. return sent;
  967. }
  968. void Peer::clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now)
  969. {
  970. SharedPtr<Path> np(RR->topology->getPath(originatingPath->localSocket(),remoteAddress));
  971. RR->t->peerRedirected(tPtr,0,*this,np);
  972. attemptToContactAt(tPtr,originatingPath->localSocket(),remoteAddress,now,true);
  973. {
  974. Mutex::Lock _l(_paths_m);
  975. // New priority is higher than the priority of the originating path (if known)
  976. long newPriority = 1;
  977. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  978. if (_paths[i].p) {
  979. if (_paths[i].p == originatingPath) {
  980. newPriority = _paths[i].priority;
  981. break;
  982. }
  983. } else break;
  984. }
  985. newPriority += 2;
  986. // Erase any paths with lower priority than this one or that are duplicate
  987. // IPs and add this path.
  988. unsigned int j = 0;
  989. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  990. if (_paths[i].p) {
  991. if ((_paths[i].priority >= newPriority)&&(!_paths[i].p->address().ipsEqual2(remoteAddress))) {
  992. if (i != j)
  993. _paths[j] = _paths[i];
  994. ++j;
  995. }
  996. }
  997. }
  998. if (j < ZT_MAX_PEER_NETWORK_PATHS) {
  999. _paths[j].lr = now;
  1000. _paths[j].p = np;
  1001. _paths[j].priority = newPriority;
  1002. ++j;
  1003. while (j < ZT_MAX_PEER_NETWORK_PATHS) {
  1004. _paths[j].lr = 0;
  1005. _paths[j].p.zero();
  1006. _paths[j].priority = 1;
  1007. ++j;
  1008. }
  1009. }
  1010. }
  1011. }
  1012. void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
  1013. {
  1014. Mutex::Lock _l(_paths_m);
  1015. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1016. if (_paths[i].p) {
  1017. if ((_paths[i].p->address().ss_family == inetAddressFamily)&&(_paths[i].p->ipScope() == scope)) {
  1018. attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,false);
  1019. _paths[i].p->sent(now);
  1020. _paths[i].lr = 0; // path will not be used unless it speaks again
  1021. }
  1022. } else break;
  1023. }
  1024. }
  1025. } // namespace ZeroTier