Topology.cpp 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. namespace ZeroTier {
  15. const uint64_t Topology::s_pathHashSalt = Utils::getSecureRandomU64();
  16. // Sorts roots so as to put the lowest latency alive root first.
  17. struct _RootSortComparisonOperator
  18. {
  19. ZT_INLINE _RootSortComparisonOperator(const int64_t now) : _now(now) {}
  20. ZT_INLINE bool operator()(const SharedPtr<Peer> &a,const SharedPtr<Peer> &b)
  21. {
  22. const int64_t now = _now;
  23. if (a->active(now)) {
  24. if (b->active(now))
  25. return (a->latency() < b->latency());
  26. return true;
  27. }
  28. return a->lastReceive() < b->lastReceive();
  29. }
  30. const int64_t _now;
  31. };
  32. Topology::Topology(const RuntimeEnvironment *renv,void *tPtr) :
  33. RR(renv),
  34. _numConfiguredPhysicalPaths(0)
  35. {
  36. uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
  37. std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_ROOTS,idtmp));
  38. if (!data.empty()) {
  39. uint8_t *dptr = data.data();
  40. int drem = (int)data.size();
  41. while (drem > 0) {
  42. Identity id;
  43. int l = id.unmarshal(dptr,drem);
  44. if (l > 0) {
  45. _roots.insert(id);
  46. dptr += l;
  47. drem -= l;
  48. }
  49. }
  50. }
  51. for(std::set<Identity>::const_iterator r(_roots.begin());r!=_roots.end();++r) {
  52. SharedPtr<Peer> p;
  53. _loadCached(tPtr,r->address(),p);
  54. if ((!p)||(p->identity() != *r)) {
  55. p.set(new Peer(RR));
  56. p->init(*r);
  57. }
  58. _rootPeers.push_back(p);
  59. _peers[p->address()] = p;
  60. _peersByIncomingProbe[p->incomingProbe()] = p;
  61. _peersByIdentityHash[p->identity().fingerprint()] = p;
  62. }
  63. }
  64. Topology::~Topology()
  65. {
  66. }
  67. SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
  68. {
  69. RWMutex::Lock _l(_peers_l);
  70. SharedPtr<Peer> &hp = _peers[peer->address()];
  71. if (hp)
  72. return hp;
  73. _loadCached(tPtr,peer->address(),hp);
  74. if (hp) {
  75. _peersByIncomingProbe[peer->incomingProbe()] = hp;
  76. _peersByIdentityHash[peer->identity().fingerprint()] = hp;
  77. return hp;
  78. }
  79. hp = peer;
  80. _peersByIncomingProbe[peer->incomingProbe()] = peer;
  81. _peersByIdentityHash[peer->identity().fingerprint()] = peer;
  82. return peer;
  83. }
  84. void Topology::getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
  85. {
  86. RWMutex::RLock l(_peers_l);
  87. allPeers.clear();
  88. allPeers.reserve(_peers.size());
  89. for(Map< Address,SharedPtr<Peer> >::const_iterator i(_peers.begin());i!=_peers.end();++i)
  90. allPeers.push_back(i->second);
  91. }
  92. void Topology::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig)
  93. {
  94. if (!pathNetwork) {
  95. _numConfiguredPhysicalPaths = 0;
  96. } else {
  97. std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
  98. for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
  99. cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
  100. if (pathConfig) {
  101. ZT_PhysicalPathConfiguration pc(*pathConfig);
  102. if (pc.mtu <= 0)
  103. pc.mtu = ZT_DEFAULT_UDP_MTU;
  104. else if (pc.mtu < ZT_MIN_UDP_MTU)
  105. pc.mtu = ZT_MIN_UDP_MTU;
  106. else if (pc.mtu > ZT_MAX_UDP_MTU)
  107. pc.mtu = ZT_MAX_UDP_MTU;
  108. cpaths[*(reinterpret_cast<const InetAddress *>(pathNetwork))] = pc;
  109. } else {
  110. cpaths.erase(*(reinterpret_cast<const InetAddress *>(pathNetwork)));
  111. }
  112. unsigned int cnt = 0;
  113. for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
  114. _physicalPathConfig[cnt].first = i->first;
  115. _physicalPathConfig[cnt].second = i->second;
  116. ++cnt;
  117. }
  118. _numConfiguredPhysicalPaths = cnt;
  119. }
  120. }
  121. void Topology::addRoot(void *tPtr,const Identity &id,const InetAddress &bootstrap)
  122. {
  123. if (id == RR->identity) return; // sanity check
  124. RWMutex::Lock l1(_peers_l);
  125. std::pair< std::set<Identity>::iterator,bool > ir(_roots.insert(id));
  126. if (ir.second) {
  127. SharedPtr<Peer> &p = _peers[id.address()];
  128. if (!p) {
  129. p.set(new Peer(RR));
  130. p->init(id);
  131. if (bootstrap)
  132. p->setBootstrap(Endpoint(bootstrap));
  133. _peersByIncomingProbe[p->incomingProbe()] = p;
  134. _peersByIdentityHash[p->identity().fingerprint()] = p;
  135. }
  136. _rootPeers.push_back(p);
  137. uint8_t *const roots = (uint8_t *)malloc(ZT_IDENTITY_MARSHAL_SIZE_MAX * _roots.size());
  138. if (roots) {
  139. int p = 0;
  140. for(std::set<Identity>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
  141. int pp = i->marshal(roots + p,false);
  142. if (pp > 0)
  143. p += pp;
  144. }
  145. uint64_t id[2];
  146. id[0] = 0;
  147. id[1] = 0;
  148. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_ROOTS,id,roots,(unsigned int)p);
  149. free(roots);
  150. }
  151. }
  152. }
  153. bool Topology::removeRoot(const Identity &id)
  154. {
  155. RWMutex::Lock l1(_peers_l);
  156. std::set<Identity>::iterator r(_roots.find(id));
  157. if (r != _roots.end()) {
  158. for(std::vector< SharedPtr<Peer> >::iterator p(_rootPeers.begin());p!=_rootPeers.end();++p) {
  159. if ((*p)->identity() == id) {
  160. _rootPeers.erase(p);
  161. break;
  162. }
  163. }
  164. _roots.erase(r);
  165. return true;
  166. }
  167. return false;
  168. }
  169. void Topology::rankRoots(const int64_t now)
  170. {
  171. RWMutex::Lock l1(_peers_l);
  172. std::sort(_rootPeers.begin(),_rootPeers.end(),_RootSortComparisonOperator(now));
  173. }
  174. void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
  175. {
  176. {
  177. RWMutex::Lock l1(_peers_l);
  178. for(Map< Address,SharedPtr<Peer> >::iterator i(_peers.begin());i!=_peers.end();) {
  179. if ( (!i->second->alive(now)) && (_roots.count(i->second->identity()) == 0) ) {
  180. i->second->save(tPtr);
  181. _peersByIncomingProbe.erase(i->second->incomingProbe());
  182. _peersByIdentityHash.erase(i->second->identity().fingerprint());
  183. _peers.erase(i++);
  184. } else ++i;
  185. }
  186. }
  187. {
  188. RWMutex::Lock l1(_paths_l);
  189. for(Map< uint64_t,SharedPtr<Path> >::iterator i(_paths.begin());i!=_paths.end();) {
  190. if ((i->second.references() <= 1)&&(!i->second->alive(now)))
  191. _paths.erase(i++);
  192. else ++i;
  193. }
  194. }
  195. }
  196. void Topology::saveAll(void *tPtr)
  197. {
  198. RWMutex::RLock l(_peers_l);
  199. for(Map< Address,SharedPtr<Peer> >::iterator i(_peers.begin());i!=_peers.end();++i)
  200. i->second->save(tPtr);
  201. }
  202. void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
  203. {
  204. try {
  205. uint64_t id[2];
  206. id[0] = zta.toInt();
  207. id[1] = 0;
  208. std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER,id));
  209. if (data.size() > 8) {
  210. const uint8_t *d = data.data();
  211. int dl = (int)data.size();
  212. const int64_t ts = (int64_t)Utils::loadBigEndian<uint64_t>(d);
  213. Peer *const p = new Peer(RR);
  214. int n = p->unmarshal(d + 8,dl - 8);
  215. if (n < 0) {
  216. delete p;
  217. return;
  218. }
  219. if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
  220. // TODO: handle many peers, same address (?)
  221. peer.set(p);
  222. return;
  223. }
  224. }
  225. } catch ( ... ) {
  226. peer.zero();
  227. }
  228. }
  229. } // namespace ZeroTier