Peer.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "Peer.hpp"
  15. #include "Node.hpp"
  16. #include "Switch.hpp"
  17. #include "Network.hpp"
  18. #include "SelfAwareness.hpp"
  19. #include "Packet.hpp"
  20. #include "Trace.hpp"
  21. #include "InetAddress.hpp"
  22. #include <set>
  23. namespace ZeroTier {
  24. struct _PathPriorityComparisonOperator
  25. {
  26. ZT_ALWAYS_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const
  27. {
  28. return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) );
  29. }
  30. };
  31. Peer::Peer(const RuntimeEnvironment *renv) :
  32. RR(renv),
  33. _lastReceive(0),
  34. _lastWhoisRequestReceived(0),
  35. _lastEchoRequestReceived(0),
  36. _lastPushDirectPathsReceived(0),
  37. _lastAttemptedP2PInit(0),
  38. _lastTriedStaticPath(0),
  39. _lastPrioritizedPaths(0),
  40. _latency(0xffff),
  41. _alivePathCount(0)
  42. {
  43. }
  44. bool Peer::init(const Identity &myIdentity,const Identity &peerIdentity)
  45. {
  46. if (_id == peerIdentity)
  47. return true;
  48. _id = peerIdentity;
  49. _vProto = 0;
  50. _vMajor = 0;
  51. _vMinor = 0;
  52. _vRevision = 0;
  53. return myIdentity.agree(peerIdentity,_key);
  54. }
  55. void Peer::received(
  56. void *tPtr,
  57. const SharedPtr<Path> &path,
  58. const unsigned int hops,
  59. const uint64_t packetId,
  60. const unsigned int payloadLength,
  61. const Packet::Verb verb,
  62. const uint64_t inRePacketId,
  63. const Packet::Verb inReVerb,
  64. const uint64_t networkId)
  65. {
  66. const int64_t now = RR->node->now();
  67. _lastReceive = now;
  68. if (hops == 0) {
  69. _lock.rlock();
  70. for(int i=0;i<(int)_alivePathCount;++i) {
  71. if (_paths[i] == path) {
  72. _lock.runlock();
  73. goto path_check_done;
  74. }
  75. }
  76. _lock.runlock();
  77. if (verb == Packet::VERB_OK) {
  78. RWMutex::Lock l(_lock);
  79. int64_t lastReceiveTimeMax = 0;
  80. int lastReceiveTimeMaxAt = 0;
  81. for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  82. if ((_paths[i]->address().ss_family == path->address().ss_family) &&
  83. (_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
  84. (_paths[i]->address().ipsEqual2(path->address()))) {
  85. // Replace older path if everything is the same except the port number.
  86. _paths[i] = path;
  87. goto path_check_done;
  88. } else {
  89. if (_paths[i]) {
  90. if (_paths[i]->lastIn() > lastReceiveTimeMax) {
  91. lastReceiveTimeMax = _paths[i]->lastIn();
  92. lastReceiveTimeMaxAt = i;
  93. }
  94. } else {
  95. lastReceiveTimeMax = 0x7fffffffffffffffLL;
  96. lastReceiveTimeMaxAt = i;
  97. }
  98. }
  99. }
  100. _lastPrioritizedPaths = now;
  101. _paths[lastReceiveTimeMaxAt] = path;
  102. _bootstrap = path->address();
  103. _prioritizePaths(now);
  104. RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
  105. } else {
  106. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
  107. sendHELLO(tPtr,path->localSocket(),path->address(),now);
  108. path->sent(now);
  109. RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
  110. }
  111. }
  112. }
  113. path_check_done:
  114. const int64_t sinceLastP2PInit = now - _lastAttemptedP2PInit;
  115. if (sinceLastP2PInit >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
  116. _lastAttemptedP2PInit = now;
  117. InetAddress addr;
  118. if ((_bootstrap.type() == Endpoint::INETADDR_V4)||(_bootstrap.type() == Endpoint::INETADDR_V6))
  119. sendHELLO(tPtr,-1,_bootstrap.inetAddr(),now);
  120. if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
  121. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr))
  122. sendHELLO(tPtr,-1,addr,now);
  123. }
  124. std::vector<ZT_InterfaceAddress> localInterfaceAddresses(RR->node->localInterfaceAddresses());
  125. std::multimap<unsigned long,InetAddress> detectedAddresses(RR->sa->externalAddresses(now));
  126. std::set<InetAddress> addrs;
  127. for(std::vector<ZT_InterfaceAddress>::const_iterator i(localInterfaceAddresses.begin());i!=localInterfaceAddresses.end();++i)
  128. addrs.insert(asInetAddress(i->address));
  129. for(std::multimap<unsigned long,InetAddress>::const_reverse_iterator i(detectedAddresses.rbegin());i!=detectedAddresses.rend();++i) {
  130. if (i->first <= 1)
  131. break;
  132. if (addrs.count(i->second) == 0) {
  133. addrs.insert(i->second);
  134. break;
  135. }
  136. }
  137. if (!addrs.empty()) {
  138. ScopedPtr<Packet> outp(new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS));
  139. outp->addSize(2); // leave room for count
  140. unsigned int count = 0;
  141. for(std::set<InetAddress>::iterator a(addrs.begin());a!=addrs.end();++a) {
  142. uint8_t addressType = 4;
  143. uint8_t addressLength = 6;
  144. unsigned int ipLength = 4;
  145. const void *rawIpData = (const void *)0;
  146. uint16_t port = 0;
  147. switch(a->ss_family) {
  148. case AF_INET:
  149. rawIpData = &(reinterpret_cast<const sockaddr_in *>(&(*a))->sin_addr.s_addr);
  150. port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in *>(&(*a))->sin_port);
  151. break;
  152. case AF_INET6:
  153. rawIpData = reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_addr.s6_addr;
  154. port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_port);
  155. addressType = 6;
  156. addressLength = 18;
  157. ipLength = 16;
  158. break;
  159. default:
  160. continue;
  161. }
  162. outp->append((uint8_t)0); // no flags
  163. outp->append((uint16_t)0); // no extensions
  164. outp->append(addressType);
  165. outp->append(addressLength);
  166. outp->append(rawIpData,ipLength);
  167. outp->append(port);
  168. ++count;
  169. if (outp->size() >= (ZT_PROTO_MAX_PACKET_LENGTH - 32))
  170. break;
  171. }
  172. if (count > 0) {
  173. outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
  174. outp->compress();
  175. outp->armor(_key,true);
  176. path->send(RR,tPtr,outp->data(),outp->size(),now);
  177. }
  178. }
  179. }
  180. }
  181. bool Peer::shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const
  182. {
  183. int maxHaveScope = -1;
  184. {
  185. RWMutex::RLock l(_lock);
  186. for (unsigned int i = 0; i < _alivePathCount; ++i) {
  187. if (_paths[i]) {
  188. if (_paths[i]->address().ipsEqual2(addr))
  189. return false;
  190. int s = (int)_paths[i]->address().ipScope();
  191. if (s > maxHaveScope)
  192. maxHaveScope = s;
  193. }
  194. }
  195. }
  196. return ( ((int)addr.ipScope() > maxHaveScope) && RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr) );
  197. }
  198. void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  199. {
  200. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
  201. outp.append((unsigned char)ZT_PROTO_VERSION);
  202. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  203. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  204. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  205. outp.append(now);
  206. RR->identity.serialize(outp,false);
  207. atAddress.serialize(outp);
  208. RR->node->expectReplyTo(outp.packetId());
  209. if (atAddress) {
  210. outp.armor(_key,false); // false == don't encrypt full payload, but add MAC
  211. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  212. } else {
  213. RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC
  214. }
  215. }
  216. void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
  217. {
  218. RWMutex::RLock l(_lock);
  219. _lastPrioritizedPaths = now;
  220. _prioritizePaths(now);
  221. if (_alivePathCount > 0) {
  222. for (unsigned int i = 0; i < _alivePathCount; ++i) {
  223. sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
  224. _paths[i]->sent(now);
  225. if (!pingAllAddressTypes)
  226. return;
  227. }
  228. return;
  229. }
  230. if ((_bootstrap.type() == Endpoint::INETADDR_V4)||(_bootstrap.type() == Endpoint::INETADDR_V6))
  231. sendHELLO(tPtr,-1,_bootstrap.inetAddr(),now);
  232. SharedPtr<Peer> r(RR->topology->root());
  233. if ((r)&&(r.ptr() != this)) {
  234. SharedPtr<Path> rp(r->path(now));
  235. if (rp) {
  236. sendHELLO(tPtr,rp->localSocket(),rp->address(),now);
  237. rp->sent(now);
  238. return;
  239. }
  240. }
  241. }
  242. void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
  243. {
  244. RWMutex::RLock l(_lock);
  245. for(unsigned int i=0; i < _alivePathCount; ++i) {
  246. if ((_paths[i])&&((_paths[i]->address().ss_family == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope))) {
  247. sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
  248. _paths[i]->sent(now);
  249. }
  250. }
  251. }
  252. void Peer::updateLatency(const unsigned int l)
  253. {
  254. if ((l > 0)&&(l < 0xffff)) {
  255. unsigned int lat = _latency;
  256. if (lat < 0xffff) {
  257. _latency = (l + l + lat) / 3;
  258. } else {
  259. _latency = l;
  260. }
  261. }
  262. }
  263. bool Peer::sendDirect(void *tPtr,const void *data,const unsigned int len,const int64_t now)
  264. {
  265. if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  266. _lastPrioritizedPaths = now;
  267. _lock.lock();
  268. _prioritizePaths(now);
  269. if (_alivePathCount == 0) {
  270. _lock.unlock();
  271. return false;
  272. }
  273. const bool r = _paths[0]->send(RR,tPtr,data,len,now);
  274. _lock.unlock();
  275. return r;
  276. } else {
  277. _lock.rlock();
  278. if (_alivePathCount == 0) {
  279. _lock.runlock();
  280. return false;
  281. }
  282. const bool r = _paths[0]->send(RR,tPtr,data,len,now);
  283. _lock.runlock();
  284. return r;
  285. }
  286. }
  287. SharedPtr<Path> Peer::path(const int64_t now)
  288. {
  289. if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
  290. _lastPrioritizedPaths = now;
  291. RWMutex::Lock l(_lock);
  292. _prioritizePaths(now);
  293. if (_alivePathCount == 0)
  294. return SharedPtr<Path>();
  295. return _paths[0];
  296. } else {
  297. RWMutex::RLock l(_lock);
  298. if (_alivePathCount == 0)
  299. return SharedPtr<Path>();
  300. return _paths[0];
  301. }
  302. }
  303. void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
  304. {
  305. RWMutex::RLock l(_lock);
  306. paths.clear();
  307. paths.assign(_paths,_paths + _alivePathCount);
  308. }
  309. void Peer::save(void *tPtr) const
  310. {
  311. uint8_t *const buf = (uint8_t *)malloc(ZT_PEER_MARSHAL_SIZE_MAX);
  312. if (!buf) return;
  313. _lock.rlock();
  314. const int len = marshal(buf);
  315. _lock.runlock();
  316. if (len > 0) {
  317. uint64_t id[2];
  318. id[0] = _id.address().toInt();
  319. id[1] = 0;
  320. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len);
  321. }
  322. free(buf);
  323. }
  324. int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const
  325. {
  326. RWMutex::RLock l(_lock);
  327. data[0] = 0; // serialized peer version
  328. int s = _id.marshal(data + 1,false);
  329. if (s <= 0)
  330. return s;
  331. int p = 1 + s;
  332. s = _locator.marshal(data + p);
  333. if (s <= 0)
  334. return s;
  335. p += s;
  336. s = _bootstrap.marshal(data + p);
  337. if (s <= 0)
  338. return s;
  339. p += s;
  340. Utils::storeBigEndian(data + p,(uint16_t)_vProto);
  341. p += 2;
  342. Utils::storeBigEndian(data + p,(uint16_t)_vMajor);
  343. p += 2;
  344. Utils::storeBigEndian(data + p,(uint16_t)_vMinor);
  345. p += 2;
  346. Utils::storeBigEndian(data + p,(uint16_t)_vRevision);
  347. p += 2;
  348. data[p++] = 0;
  349. data[p++] = 0;
  350. return p;
  351. }
  352. int Peer::unmarshal(const uint8_t *restrict data,const int len)
  353. {
  354. RWMutex::Lock l(_lock);
  355. if ((len <= 1)||(data[0] != 0))
  356. return -1;
  357. int s = _id.unmarshal(data + 1,len - 1);
  358. if (s <= 0)
  359. return s;
  360. int p = 1 + s;
  361. s = _locator.unmarshal(data + p,len - p);
  362. if (s <= 0)
  363. return s;
  364. p += s;
  365. s = _bootstrap.unmarshal(data + p,len - p);
  366. if (s <= 0)
  367. return s;
  368. p += s;
  369. if ((p + 10) > len)
  370. return -1;
  371. _vProto = Utils::loadBigEndian<uint16_t>(data + p);
  372. p += 2;
  373. _vMajor = Utils::loadBigEndian<uint16_t>(data + p);
  374. p += 2;
  375. _vMinor = Utils::loadBigEndian<uint16_t>(data + p);
  376. p += 2;
  377. _vRevision = Utils::loadBigEndian<uint16_t>(data + p);
  378. p += 2;
  379. p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
  380. if (p > len)
  381. return -1;
  382. return p;
  383. }
  384. void Peer::_prioritizePaths(const int64_t now)
  385. {
  386. // assumes _lock is locked for writing
  387. std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
  388. for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  389. if ((!_paths[i]) || (!_paths[i]->alive(now))) {
  390. _alivePathCount = i;
  391. for(;i<ZT_MAX_PEER_NETWORK_PATHS;++i)
  392. _paths[i].zero();
  393. return;
  394. }
  395. }
  396. }
  397. } // namespace ZeroTier