Topology.cpp 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. namespace ZeroTier {
  15. const uint64_t Topology::s_pathHashSalt = Utils::getSecureRandomU64();
  16. // Sorts roots so as to put the lowest latency alive root first.
  17. struct _RootSortComparisonOperator
  18. {
  19. ZT_ALWAYS_INLINE _RootSortComparisonOperator(const int64_t now) : _now(now) {}
  20. ZT_ALWAYS_INLINE bool operator()(const SharedPtr<Peer> &a,const SharedPtr<Peer> &b)
  21. {
  22. const int64_t now = _now;
  23. if (a->active(now)) {
  24. if (b->active(now))
  25. return (a->latency() < b->latency());
  26. return true;
  27. }
  28. return a->lastReceive() < b->lastReceive();
  29. }
  30. const int64_t _now;
  31. };
  32. Topology::Topology(const RuntimeEnvironment *renv,const Identity &myId,void *tPtr) :
  33. RR(renv),
  34. _myIdentity(myId),
  35. _numConfiguredPhysicalPaths(0),
  36. _peers(128),
  37. _paths(256)
  38. {
  39. uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
  40. std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_ROOTS,idtmp));
  41. if (!data.empty()) {
  42. uint8_t *dptr = data.data();
  43. int drem = (int)data.size();
  44. while (drem > 0) {
  45. Identity id;
  46. int l = id.unmarshal(dptr,drem);
  47. if (l > 0) {
  48. _roots.insert(id);
  49. dptr += l;
  50. drem -= l;
  51. }
  52. }
  53. }
  54. for(std::set<Identity>::const_iterator r(_roots.begin());r!=_roots.end();++r) {
  55. SharedPtr<Peer> p;
  56. _loadCached(tPtr,r->address(),p);
  57. if ((!p)||(p->identity() != *r)) {
  58. p.set(new Peer(RR));
  59. p->init(*r);
  60. }
  61. _rootPeers.push_back(p);
  62. }
  63. }
  64. Topology::~Topology()
  65. {
  66. }
  67. SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
  68. {
  69. RWMutex::Lock _l(_peers_l);
  70. SharedPtr<Peer> &hp = _peers[peer->address()];
  71. if (hp)
  72. return hp;
  73. _loadCached(tPtr,peer->address(),hp);
  74. if (hp) {
  75. _peersByIncomingProbe[peer->incomingProbe()] = hp;
  76. return hp;
  77. }
  78. hp = peer;
  79. _peersByIncomingProbe[peer->incomingProbe()] = peer;
  80. return peer;
  81. }
  82. void Topology::getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
  83. {
  84. RWMutex::RLock l(_peers_l);
  85. allPeers.clear();
  86. allPeers.reserve(_peers.size());
  87. Hashtable< Address,SharedPtr<Peer> >::Iterator i(*(const_cast<Hashtable< Address,SharedPtr<Peer> > *>(&_peers)));
  88. Address *a = nullptr;
  89. SharedPtr<Peer> *p = nullptr;
  90. while (i.next(a,p))
  91. allPeers.push_back(*p);
  92. }
  93. void Topology::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig)
  94. {
  95. if (!pathNetwork) {
  96. _numConfiguredPhysicalPaths = 0;
  97. } else {
  98. std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
  99. for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
  100. cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
  101. if (pathConfig) {
  102. ZT_PhysicalPathConfiguration pc(*pathConfig);
  103. if (pc.mtu <= 0)
  104. pc.mtu = ZT_DEFAULT_PHYSMTU;
  105. else if (pc.mtu < ZT_MIN_PHYSMTU)
  106. pc.mtu = ZT_MIN_PHYSMTU;
  107. else if (pc.mtu > ZT_MAX_PHYSMTU)
  108. pc.mtu = ZT_MAX_PHYSMTU;
  109. cpaths[*(reinterpret_cast<const InetAddress *>(pathNetwork))] = pc;
  110. } else {
  111. cpaths.erase(*(reinterpret_cast<const InetAddress *>(pathNetwork)));
  112. }
  113. unsigned int cnt = 0;
  114. for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
  115. _physicalPathConfig[cnt].first = i->first;
  116. _physicalPathConfig[cnt].second = i->second;
  117. ++cnt;
  118. }
  119. _numConfiguredPhysicalPaths = cnt;
  120. }
  121. }
  122. void Topology::addRoot(void *tPtr,const Identity &id,const InetAddress &bootstrap)
  123. {
  124. if (id == _myIdentity) return; // sanity check
  125. RWMutex::Lock l1(_peers_l);
  126. std::pair< std::set<Identity>::iterator,bool > ir(_roots.insert(id));
  127. if (ir.second) {
  128. SharedPtr<Peer> &p = _peers[id.address()];
  129. if (!p) {
  130. p.set(new Peer(RR));
  131. p->init(id);
  132. if (bootstrap)
  133. p->setBootstrap(Endpoint(bootstrap));
  134. }
  135. _rootPeers.push_back(p);
  136. uint8_t *const roots = (uint8_t *)malloc(ZT_IDENTITY_MARSHAL_SIZE_MAX * _roots.size());
  137. if (roots) {
  138. int p = 0;
  139. for(std::set<Identity>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
  140. int pp = i->marshal(roots + p,false);
  141. if (pp > 0)
  142. p += pp;
  143. }
  144. uint64_t id[2];
  145. id[0] = 0;
  146. id[1] = 0;
  147. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_ROOTS,id,roots,(unsigned int)p);
  148. free(roots);
  149. }
  150. }
  151. }
  152. bool Topology::removeRoot(const Identity &id)
  153. {
  154. RWMutex::Lock l1(_peers_l);
  155. std::set<Identity>::iterator r(_roots.find(id));
  156. if (r != _roots.end()) {
  157. for(std::vector< SharedPtr<Peer> >::iterator p(_rootPeers.begin());p!=_rootPeers.end();++p) {
  158. if ((*p)->identity() == id) {
  159. _rootPeers.erase(p);
  160. break;
  161. }
  162. }
  163. _roots.erase(r);
  164. return true;
  165. }
  166. return false;
  167. }
  168. void Topology::rankRoots(const int64_t now)
  169. {
  170. RWMutex::Lock l1(_peers_l);
  171. std::sort(_rootPeers.begin(),_rootPeers.end(),_RootSortComparisonOperator(now));
  172. }
  173. void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
  174. {
  175. {
  176. RWMutex::Lock l1(_peers_l);
  177. Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
  178. Address *a = nullptr;
  179. SharedPtr<Peer> *p = nullptr;
  180. while (i.next(a,p)) {
  181. if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
  182. (*p)->save(tPtr);
  183. _peersByIncomingProbe.erase((*p)->incomingProbe());
  184. _peers.erase(*a);
  185. }
  186. }
  187. }
  188. {
  189. RWMutex::Lock l1(_paths_l);
  190. Hashtable< uint64_t,SharedPtr<Path> >::Iterator i(_paths);
  191. uint64_t *k = nullptr;
  192. SharedPtr<Path> *p = nullptr;
  193. while (i.next(k,p)) {
  194. if (p->references() <= 1)
  195. _paths.erase(*k);
  196. }
  197. }
  198. }
  199. void Topology::saveAll(void *tPtr)
  200. {
  201. RWMutex::RLock l(_peers_l);
  202. Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
  203. Address *a = nullptr;
  204. SharedPtr<Peer> *p = nullptr;
  205. while (i.next(a,p)) {
  206. if ( (!(*p)->alive(RR->node->now())) && (_roots.count((*p)->identity()) == 0) ) {
  207. (*p)->save((void *)0);
  208. }
  209. }
  210. }
  211. void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
  212. {
  213. try {
  214. uint64_t id[2];
  215. id[0] = zta.toInt();
  216. id[1] = 0;
  217. std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER,id));
  218. if (!data.empty()) {
  219. const uint8_t *d = data.data();
  220. int dl = (int)data.size();
  221. for (;;) {
  222. Peer *const p = new Peer(RR);
  223. int n = p->unmarshal(d,dl);
  224. if (n > 0) {
  225. // TODO: will eventually handle multiple peers
  226. peer.set(p);
  227. return;
  228. } else {
  229. delete p;
  230. }
  231. }
  232. }
  233. } catch ( ... ) {
  234. peer.zero();
  235. }
  236. }
  237. } // namespace ZeroTier