Topology.cpp 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. namespace ZeroTier {
  15. Topology::Topology(const RuntimeEnvironment *renv, void *tPtr) :
  16. RR(renv)
  17. {
  18. uint64_t idtmp[2];
  19. idtmp[0] = 0;
  20. idtmp[1] = 0;
  21. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_ROOTS, idtmp));
  22. if (!data.empty()) {
  23. uint8_t *dptr = data.data();
  24. int drem = (int)data.size();
  25. for (;;) {
  26. Identity id;
  27. int l = id.unmarshal(dptr, drem);
  28. if ((l > 0) && (id)) {
  29. m_roots.insert(id);
  30. ZT_SPEW("restored root %s", id.address().toString().c_str());
  31. if ((drem -= l) <= 0)
  32. break;
  33. } else break;
  34. }
  35. }
  36. m_updateRootPeers(tPtr);
  37. }
  38. SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
  39. {
  40. RWMutex::Lock _l(m_peers_l);
  41. SharedPtr< Peer > &hp = m_peers[peer->address()];
  42. if (hp)
  43. return hp;
  44. m_loadCached(tPtr, peer->address(), hp);
  45. if (hp)
  46. return hp;
  47. hp = peer;
  48. return peer;
  49. }
  50. struct p_RootSortComparisonOperator
  51. {
  52. ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
  53. {
  54. // Sort in inverse order of latency with lowest latency first (and -1 last).
  55. const int bb = b->latency();
  56. if (bb < 0)
  57. return true;
  58. return bb < a->latency();
  59. }
  60. };
  61. SharedPtr< Peer > Topology::addRoot(void *const tPtr, const Identity &id)
  62. {
  63. if ((id != RR->identity) && id.locallyValidate()) {
  64. RWMutex::Lock l1(m_peers_l);
  65. m_roots.insert(id);
  66. m_updateRootPeers(tPtr);
  67. m_writeRootList(tPtr);
  68. for (Vector< SharedPtr< Peer > >::const_iterator p(m_rootPeers.begin()); p != m_rootPeers.end(); ++p) {
  69. if ((*p)->identity() == id)
  70. return *p;
  71. }
  72. }
  73. return SharedPtr< Peer >();
  74. }
  75. bool Topology::removeRoot(void *const tPtr, Address address)
  76. {
  77. RWMutex::Lock l1(m_peers_l);
  78. for (Vector< SharedPtr< Peer > >::const_iterator r(m_rootPeers.begin()); r != m_rootPeers.end(); ++r) {
  79. if ((*r)->address() == address) {
  80. Set< Identity >::iterator rr(m_roots.find((*r)->identity()));
  81. if (rr != m_roots.end()) {
  82. m_roots.erase(rr);
  83. m_updateRootPeers(tPtr);
  84. m_writeRootList(tPtr);
  85. return true;
  86. }
  87. }
  88. }
  89. return false;
  90. }
  91. void Topology::rankRoots()
  92. {
  93. RWMutex::Lock l1(m_peers_l);
  94. std::sort(m_rootPeers.begin(), m_rootPeers.end(), p_RootSortComparisonOperator());
  95. }
  96. void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
  97. {
  98. // Delete peers that haven't said anything in ZT_PEER_ALIVE_TIMEOUT.
  99. {
  100. RWMutex::Lock l1(m_peers_l);
  101. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();) {
  102. // TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
  103. // a network frame or non-trivial control packet.
  104. if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (m_roots.count(i->second->identity()) == 0)) {
  105. i->second->save(tPtr);
  106. m_peers.erase(i++);
  107. } else ++i;
  108. }
  109. }
  110. // Delete paths that are no longer held by anyone else ("weak reference" type behavior).
  111. {
  112. RWMutex::Lock l1(m_paths_l);
  113. for (Map< uint64_t, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end();) {
  114. if (i->second.weakGC())
  115. m_paths.erase(i++);
  116. else ++i;
  117. }
  118. }
  119. }
  120. void Topology::saveAll(void *tPtr)
  121. {
  122. RWMutex::RLock l(m_peers_l);
  123. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  124. i->second->save(tPtr);
  125. }
  126. void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer)
  127. {
  128. try {
  129. uint64_t id[2];
  130. id[0] = zta.toInt();
  131. id[1] = 0;
  132. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id));
  133. if (data.size() > 8) {
  134. const uint8_t *d = data.data();
  135. int dl = (int)data.size();
  136. const int64_t ts = (int64_t)Utils::loadBigEndian< uint64_t >(d);
  137. Peer *const p = new Peer(RR);
  138. int n = p->unmarshal(d + 8, dl - 8);
  139. if (n < 0) {
  140. delete p;
  141. return;
  142. }
  143. if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
  144. // TODO: handle many peers, same address (?)
  145. peer.set(p);
  146. return;
  147. }
  148. }
  149. } catch (...) {
  150. peer.zero();
  151. }
  152. }
  153. void Topology::m_writeRootList(void *tPtr)
  154. {
  155. // assumes m_peers_l is locked for read or write
  156. uint8_t *const roots = (uint8_t *)malloc((ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 2) * m_roots.size());
  157. if (roots) { // sanity check
  158. int p = 0;
  159. for (Set< Identity >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
  160. const int pp = r->marshal(roots + p, false);
  161. if (pp > 0)
  162. p += pp;
  163. }
  164. uint64_t id[2];
  165. id[0] = 0;
  166. id[1] = 0;
  167. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_ROOTS, id, roots, (unsigned int)p);
  168. free(roots);
  169. }
  170. }
  171. void Topology::m_updateRootPeers(void *tPtr)
  172. {
  173. // assumes m_peers_l is locked for write
  174. Vector< SharedPtr< Peer > > rp;
  175. for (Set< Identity >::iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
  176. Map< Address, SharedPtr< Peer > >::iterator pp(m_peers.find(r->address()));
  177. SharedPtr< Peer > p;
  178. if (pp != m_peers.end())
  179. p = pp->second;
  180. if (!p)
  181. m_loadCached(tPtr, r->address(), p);
  182. if ((!p) || (p->identity() != *r)) {
  183. p.set(new Peer(RR));
  184. p->init(*r);
  185. m_peers[r->address()] = p;
  186. }
  187. rp.push_back(p);
  188. }
  189. m_rootPeers.swap(rp);
  190. std::sort(m_rootPeers.begin(), m_rootPeers.end(), p_RootSortComparisonOperator());
  191. }
  192. } // namespace ZeroTier