Topology.cpp 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. namespace ZeroTier {
  15. Topology::Topology(const RuntimeEnvironment *renv, void *tPtr) :
  16. RR(renv)
  17. {
  18. uint64_t idtmp[2];
  19. idtmp[0] = 0;
  20. idtmp[1] = 0;
  21. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_ROOTS, idtmp));
  22. if (!data.empty()) {
  23. uint8_t *dptr = data.data();
  24. int drem = (int)data.size();
  25. for (;;) {
  26. Identity id;
  27. int l = id.unmarshal(dptr, drem);
  28. if ((l > 0) && (id)) {
  29. ZT_SPEW("restored root %s", id.address().toString().c_str());
  30. if ((drem -= l) <= 0)
  31. break;
  32. } else break;
  33. }
  34. }
  35. m_updateRootPeers(tPtr);
  36. }
  37. SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
  38. {
  39. RWMutex::Lock _l(m_peers_l);
  40. SharedPtr< Peer > &hp = m_peers[peer->address()];
  41. if (hp)
  42. return hp;
  43. m_loadCached(tPtr, peer->address(), hp);
  44. if (hp)
  45. return hp;
  46. hp = peer;
  47. return peer;
  48. }
  49. struct p_RootSortComparisonOperator
  50. {
  51. ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
  52. {
  53. // Sort in inverse order of latency with lowest latency first (and -1 last).
  54. const int bb = b->latency();
  55. if (bb < 0)
  56. return true;
  57. return bb < a->latency();
  58. }
  59. };
  60. SharedPtr< Peer > Topology::addRoot(void *const tPtr, const Identity &id)
  61. {
  62. if ((id != RR->identity) && id.locallyValidate()) {
  63. RWMutex::Lock l1(m_peers_l);
  64. m_roots.insert(id);
  65. m_updateRootPeers(tPtr);
  66. m_writeRootList(tPtr);
  67. for (Vector< SharedPtr< Peer > >::const_iterator p(m_rootPeers.begin()); p != m_rootPeers.end(); ++p) {
  68. if ((*p)->identity() == id)
  69. return *p;
  70. }
  71. }
  72. return SharedPtr< Peer >();
  73. }
  74. bool Topology::removeRoot(void *const tPtr, Address address)
  75. {
  76. RWMutex::Lock l1(m_peers_l);
  77. for (Vector< SharedPtr< Peer > >::const_iterator r(m_rootPeers.begin()); r != m_rootPeers.end(); ++r) {
  78. if ((*r)->address() == address) {
  79. Set< Identity >::iterator rr(m_roots.find((*r)->identity()));
  80. if (rr != m_roots.end()) {
  81. m_roots.erase(rr);
  82. m_updateRootPeers(tPtr);
  83. m_writeRootList(tPtr);
  84. return true;
  85. }
  86. }
  87. }
  88. return false;
  89. }
  90. void Topology::rankRoots()
  91. {
  92. RWMutex::Lock l1(m_peers_l);
  93. std::sort(m_rootPeers.begin(), m_rootPeers.end(), p_RootSortComparisonOperator());
  94. }
  95. void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
  96. {
  97. // Delete peers that haven't said anything in ZT_PEER_ALIVE_TIMEOUT.
  98. {
  99. RWMutex::Lock l1(m_peers_l);
  100. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();) {
  101. // TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
  102. // a network frame or non-trivial control packet.
  103. if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (m_roots.count(i->second->identity()) == 0)) {
  104. i->second->save(tPtr);
  105. m_peers.erase(i++);
  106. } else ++i;
  107. }
  108. }
  109. // Delete paths that are no longer held by anyone else ("weak reference" type behavior).
  110. {
  111. RWMutex::Lock l1(m_paths_l);
  112. for (Map< uint64_t, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end();) {
  113. if (i->second.weakGC())
  114. m_paths.erase(i++);
  115. else ++i;
  116. }
  117. }
  118. }
  119. void Topology::saveAll(void *tPtr)
  120. {
  121. RWMutex::RLock l(m_peers_l);
  122. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  123. i->second->save(tPtr);
  124. }
  125. void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer)
  126. {
  127. try {
  128. uint64_t id[2];
  129. id[0] = zta.toInt();
  130. id[1] = 0;
  131. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id));
  132. if (data.size() > 8) {
  133. const uint8_t *d = data.data();
  134. int dl = (int)data.size();
  135. const int64_t ts = (int64_t)Utils::loadBigEndian< uint64_t >(d);
  136. Peer *const p = new Peer(RR);
  137. int n = p->unmarshal(d + 8, dl - 8);
  138. if (n < 0) {
  139. delete p;
  140. return;
  141. }
  142. if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
  143. // TODO: handle many peers, same address (?)
  144. peer.set(p);
  145. return;
  146. }
  147. }
  148. } catch (...) {
  149. peer.zero();
  150. }
  151. }
  152. void Topology::m_writeRootList(void *tPtr)
  153. {
  154. // assumes m_peers_l is locked for read or write
  155. uint8_t *const roots = (uint8_t *)malloc((ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 2) * m_roots.size());
  156. if (roots) { // sanity check
  157. int p = 0;
  158. for (Set< Identity >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
  159. const int pp = r->marshal(roots + p, false);
  160. if (pp > 0)
  161. p += pp;
  162. }
  163. uint64_t id[2];
  164. id[0] = 0;
  165. id[1] = 0;
  166. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_ROOTS, id, roots, (unsigned int)p);
  167. free(roots);
  168. }
  169. }
  170. void Topology::m_updateRootPeers(void *tPtr)
  171. {
  172. // assumes m_peers_l is locked for write
  173. Vector< SharedPtr< Peer > > rp;
  174. for (Map< Identity, Set< SubscriptionKeyHash > >::iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
  175. Map< Address, SharedPtr< Peer > >::iterator pp(m_peers.find(r->first.address()));
  176. SharedPtr< Peer > p;
  177. if (pp != m_peers.end())
  178. p = pp->second;
  179. if (!p)
  180. m_loadCached(tPtr, r->first.address(), p);
  181. if ((!p) || (p->identity() != r->first)) {
  182. p.set(new Peer(RR));
  183. p->init(r->first);
  184. m_peers[r->first.address()] = p;
  185. }
  186. rp.push_back(p);
  187. }
  188. std::sort(rp.begin(), rp.end(), p_RootSortComparisonOperator());
  189. m_rootPeers.swap(rp);
  190. }
  191. } // namespace ZeroTier