Topology.cpp 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. namespace ZeroTier {
  15. const uint64_t Topology::s_pathHashSalt = Utils::getSecureRandomU64();
  16. // Sorts roots so as to put the lowest latency alive root first.
  17. struct _RootSortComparisonOperator
  18. {
  19. ZT_ALWAYS_INLINE _RootSortComparisonOperator(const int64_t now) : _now(now) {}
  20. ZT_ALWAYS_INLINE bool operator()(const SharedPtr<Peer> &a,const SharedPtr<Peer> &b)
  21. {
  22. const int64_t now = _now;
  23. if (a->active(now)) {
  24. if (b->active(now))
  25. return (a->latency() < b->latency());
  26. return true;
  27. }
  28. return a->lastReceive() < b->lastReceive();
  29. }
  30. const int64_t _now;
  31. };
  32. Topology::Topology(const RuntimeEnvironment *renv,const Identity &myId,void *tPtr) :
  33. RR(renv),
  34. _myIdentity(myId),
  35. _numConfiguredPhysicalPaths(0),
  36. _peers(256),
  37. _peersByIncomingProbe(256),
  38. _peersByIdentityHash(256),
  39. _paths(1024)
  40. {
  41. uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
  42. std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_ROOTS,idtmp));
  43. if (!data.empty()) {
  44. uint8_t *dptr = data.data();
  45. int drem = (int)data.size();
  46. while (drem > 0) {
  47. Identity id;
  48. int l = id.unmarshal(dptr,drem);
  49. if (l > 0) {
  50. _roots.insert(id);
  51. dptr += l;
  52. drem -= l;
  53. }
  54. }
  55. }
  56. for(std::set<Identity>::const_iterator r(_roots.begin());r!=_roots.end();++r) {
  57. SharedPtr<Peer> p;
  58. _loadCached(tPtr,r->address(),p);
  59. if ((!p)||(p->identity() != *r)) {
  60. p.set(new Peer(RR));
  61. p->init(*r);
  62. }
  63. _rootPeers.push_back(p);
  64. _peers[p->address()] = p;
  65. _peersByIncomingProbe[p->incomingProbe()] = p;
  66. _peersByIdentityHash[p->identity().hash()] = p;
  67. }
  68. }
  69. Topology::~Topology()
  70. {
  71. }
  72. SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
  73. {
  74. RWMutex::Lock _l(_peers_l);
  75. SharedPtr<Peer> &hp = _peers[peer->address()];
  76. if (hp)
  77. return hp;
  78. _loadCached(tPtr,peer->address(),hp);
  79. if (hp) {
  80. _peersByIncomingProbe[peer->incomingProbe()] = hp;
  81. _peersByIdentityHash[peer->identity().hash()] = hp;
  82. return hp;
  83. }
  84. hp = peer;
  85. _peersByIncomingProbe[peer->incomingProbe()] = peer;
  86. _peersByIdentityHash[peer->identity().hash()] = peer;
  87. return peer;
  88. }
  89. void Topology::getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
  90. {
  91. RWMutex::RLock l(_peers_l);
  92. allPeers.clear();
  93. allPeers.reserve(_peers.size());
  94. Hashtable< Address,SharedPtr<Peer> >::Iterator i(*(const_cast<Hashtable< Address,SharedPtr<Peer> > *>(&_peers)));
  95. Address *a = nullptr;
  96. SharedPtr<Peer> *p = nullptr;
  97. while (i.next(a,p))
  98. allPeers.push_back(*p);
  99. }
  100. void Topology::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig)
  101. {
  102. if (!pathNetwork) {
  103. _numConfiguredPhysicalPaths = 0;
  104. } else {
  105. std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
  106. for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
  107. cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
  108. if (pathConfig) {
  109. ZT_PhysicalPathConfiguration pc(*pathConfig);
  110. if (pc.mtu <= 0)
  111. pc.mtu = ZT_DEFAULT_UDP_MTU;
  112. else if (pc.mtu < ZT_MIN_UDP_MTU)
  113. pc.mtu = ZT_MIN_UDP_MTU;
  114. else if (pc.mtu > ZT_MAX_UDP_MTU)
  115. pc.mtu = ZT_MAX_UDP_MTU;
  116. cpaths[*(reinterpret_cast<const InetAddress *>(pathNetwork))] = pc;
  117. } else {
  118. cpaths.erase(*(reinterpret_cast<const InetAddress *>(pathNetwork)));
  119. }
  120. unsigned int cnt = 0;
  121. for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
  122. _physicalPathConfig[cnt].first = i->first;
  123. _physicalPathConfig[cnt].second = i->second;
  124. ++cnt;
  125. }
  126. _numConfiguredPhysicalPaths = cnt;
  127. }
  128. }
  129. void Topology::addRoot(void *tPtr,const Identity &id,const InetAddress &bootstrap)
  130. {
  131. if (id == _myIdentity) return; // sanity check
  132. RWMutex::Lock l1(_peers_l);
  133. std::pair< std::set<Identity>::iterator,bool > ir(_roots.insert(id));
  134. if (ir.second) {
  135. SharedPtr<Peer> &p = _peers[id.address()];
  136. if (!p) {
  137. p.set(new Peer(RR));
  138. p->init(id);
  139. if (bootstrap)
  140. p->setBootstrap(Endpoint(bootstrap));
  141. _peersByIncomingProbe[p->incomingProbe()] = p;
  142. _peersByIdentityHash[p->identity().hash()] = p;
  143. }
  144. _rootPeers.push_back(p);
  145. uint8_t *const roots = (uint8_t *)malloc(ZT_IDENTITY_MARSHAL_SIZE_MAX * _roots.size());
  146. if (roots) {
  147. int p = 0;
  148. for(std::set<Identity>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
  149. int pp = i->marshal(roots + p,false);
  150. if (pp > 0)
  151. p += pp;
  152. }
  153. uint64_t id[2];
  154. id[0] = 0;
  155. id[1] = 0;
  156. RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_ROOTS,id,roots,(unsigned int)p);
  157. free(roots);
  158. }
  159. }
  160. }
  161. bool Topology::removeRoot(const Identity &id)
  162. {
  163. RWMutex::Lock l1(_peers_l);
  164. std::set<Identity>::iterator r(_roots.find(id));
  165. if (r != _roots.end()) {
  166. for(std::vector< SharedPtr<Peer> >::iterator p(_rootPeers.begin());p!=_rootPeers.end();++p) {
  167. if ((*p)->identity() == id) {
  168. _rootPeers.erase(p);
  169. break;
  170. }
  171. }
  172. _roots.erase(r);
  173. return true;
  174. }
  175. return false;
  176. }
  177. void Topology::rankRoots(const int64_t now)
  178. {
  179. RWMutex::Lock l1(_peers_l);
  180. std::sort(_rootPeers.begin(),_rootPeers.end(),_RootSortComparisonOperator(now));
  181. }
  182. void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
  183. {
  184. {
  185. RWMutex::Lock l1(_peers_l);
  186. Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
  187. Address *a = nullptr;
  188. SharedPtr<Peer> *p = nullptr;
  189. while (i.next(a,p)) {
  190. if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
  191. (*p)->save(tPtr);
  192. _peersByIncomingProbe.erase((*p)->incomingProbe());
  193. _peersByIdentityHash.erase((*p)->identity().hash());
  194. _peers.erase(*a);
  195. }
  196. }
  197. }
  198. {
  199. RWMutex::Lock l1(_paths_l);
  200. Hashtable< uint64_t,SharedPtr<Path> >::Iterator i(_paths);
  201. uint64_t *k = nullptr;
  202. SharedPtr<Path> *p = nullptr;
  203. while (i.next(k,p)) {
  204. if ((p->references() <= 1)&&(!(*p)->alive(now)))
  205. _paths.erase(*k);
  206. }
  207. }
  208. }
  209. void Topology::saveAll(void *tPtr)
  210. {
  211. RWMutex::RLock l(_peers_l);
  212. Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
  213. Address *a = nullptr;
  214. SharedPtr<Peer> *p = nullptr;
  215. while (i.next(a,p))
  216. (*p)->save(tPtr);
  217. }
  218. void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
  219. {
  220. try {
  221. uint64_t id[2];
  222. id[0] = zta.toInt();
  223. id[1] = 0;
  224. std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER,id));
  225. if (data.size() > 8) {
  226. const uint8_t *d = data.data();
  227. int dl = (int)data.size();
  228. const int64_t ts = (int64_t)Utils::loadBigEndian<uint64_t>(d);
  229. Peer *const p = new Peer(RR);
  230. int n = p->unmarshal(d + 8,dl - 8);
  231. if (n < 0) {
  232. delete p;
  233. return;
  234. }
  235. if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
  236. // TODO: handle many peers, same address (?)
  237. peer.set(p);
  238. return;
  239. }
  240. }
  241. } catch ( ... ) {
  242. peer.zero();
  243. }
  244. }
  245. } // namespace ZeroTier