Topology.hpp 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_TOPOLOGY_HPP
  14. #define ZT_TOPOLOGY_HPP
  15. #include "Constants.hpp"
  16. #include "Address.hpp"
  17. #include "Identity.hpp"
  18. #include "Peer.hpp"
  19. #include "Path.hpp"
  20. #include "Mutex.hpp"
  21. #include "InetAddress.hpp"
  22. #include "SharedPtr.hpp"
  23. #include "ScopedPtr.hpp"
  24. #include "Fingerprint.hpp"
  25. #include "Containers.hpp"
  26. namespace ZeroTier {
  27. class RuntimeEnvironment;
  28. /**
  29. * Database of network topology
  30. */
  31. class Topology
  32. {
  33. public:
  34. Topology(const RuntimeEnvironment *renv, void *tPtr);
  35. /**
  36. * Add peer to database
  37. *
  38. * This will not replace existing peers. In that case the existing peer
  39. * record is returned.
  40. *
  41. * @param peer Peer to add
  42. * @return New or existing peer (should replace 'peer')
  43. */
  44. SharedPtr< Peer > add(void *tPtr, const SharedPtr< Peer > &peer);
  45. /**
  46. * Get a peer from its address
  47. *
  48. * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
  49. * @param zta ZeroTier address of peer
  50. * @param loadFromCached If false do not load from cache if not in memory (default: true)
  51. * @return Peer or NULL if not found
  52. */
  53. ZT_INLINE SharedPtr< Peer > peer(void *tPtr, const Address &zta, const bool loadFromCached = true)
  54. {
  55. {
  56. RWMutex::RLock l(m_peers_l);
  57. const SharedPtr< Peer > *const ap = m_peers.get(zta);
  58. if (likely(ap != nullptr))
  59. return *ap;
  60. }
  61. {
  62. SharedPtr< Peer > p;
  63. if (loadFromCached) {
  64. m_loadCached(tPtr, zta, p);
  65. if (p) {
  66. RWMutex::Lock l(m_peers_l);
  67. SharedPtr< Peer > &hp = m_peers[zta];
  68. if (hp)
  69. return hp;
  70. hp = p;
  71. }
  72. }
  73. return p;
  74. }
  75. }
  76. /**
  77. * Get a Path object for a given local and remote physical address, creating if needed
  78. *
  79. * @param l Local socket
  80. * @param r Remote address
  81. * @return Pointer to canonicalized Path object or NULL on error
  82. */
  83. ZT_INLINE SharedPtr< Path > path(const int64_t l, const InetAddress &r)
  84. {
  85. const uint64_t k = s_getPathKey(l, r);
  86. {
  87. RWMutex::RLock lck(m_paths_l);
  88. SharedPtr< Path > *const p = m_paths.get(k);
  89. if (likely(p != nullptr))
  90. return *p;
  91. }
  92. {
  93. SharedPtr< Path > p(new Path(l, r));
  94. RWMutex::Lock lck(m_paths_l);
  95. SharedPtr< Path > &p2 = m_paths[k];
  96. if (p2)
  97. return p2;
  98. p2 = p;
  99. return p;
  100. }
  101. }
  102. /**
  103. * @return Current best root server
  104. */
  105. ZT_INLINE SharedPtr< Peer > root() const
  106. {
  107. RWMutex::RLock l(m_peers_l);
  108. if (unlikely(m_rootPeers.empty()))
  109. return SharedPtr< Peer >();
  110. return m_rootPeers.front();
  111. }
  112. /**
  113. * @param id Identity to check
  114. * @return True if this identity corresponds to a root
  115. */
  116. ZT_INLINE bool isRoot(const Identity &id) const
  117. {
  118. RWMutex::RLock l(m_peers_l);
  119. return (m_roots.find(id) != m_roots.end());
  120. }
  121. /**
  122. * Apply a function or function object to all peers
  123. *
  124. * This locks the peer map during execution, so calls to get() etc. during
  125. * eachPeer() will deadlock.
  126. *
  127. * @param f Function to apply
  128. * @tparam F Function or function object type
  129. */
  130. template< typename F >
  131. ZT_INLINE void eachPeer(F f) const
  132. {
  133. RWMutex::RLock l(m_peers_l);
  134. for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  135. f(i->second);
  136. }
  137. /**
  138. * @param allPeers vector to fill with all current peers
  139. */
  140. ZT_INLINE void getAllPeers(Vector< SharedPtr< Peer > > &allPeers) const
  141. {
  142. allPeers.clear();
  143. RWMutex::RLock l(m_peers_l);
  144. allPeers.reserve(m_peers.size());
  145. for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  146. allPeers.push_back(i->second);
  147. }
  148. /**
  149. * @param allPeers vector to fill with all current peers
  150. */
  151. ZT_INLINE void getAllPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
  152. {
  153. allPeers.clear();
  154. RWMutex::RLock l(m_peers_l);
  155. allPeers.reserve(m_peers.size());
  156. for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  157. allPeers.push_back(i->second);
  158. rootPeers = m_rootPeers;
  159. }
  160. /**
  161. * Flag a peer as a root, adding the peer if it is not known
  162. *
  163. * @param tPtr Thread pointer
  164. * @param id Root identity (will be locally validated)
  165. * @return Root peer or NULL if some problem occurred
  166. */
  167. SharedPtr< Peer > addRoot(void *tPtr, const Identity &id);
  168. /**
  169. * Remove a root server's identity from the root server set
  170. *
  171. * @param tPtr Thread pointer
  172. * @param address Root address
  173. * @return True if root found and removed, false if not found
  174. */
  175. bool removeRoot(void *tPtr, Address address);
  176. /**
  177. * Sort roots in ascending order of apparent latency
  178. *
  179. * @param now Current time
  180. */
  181. void rankRoots();
  182. /**
  183. * Do periodic tasks such as database cleanup
  184. */
  185. void doPeriodicTasks(void *tPtr, int64_t now);
  186. /**
  187. * Save all currently known peers to data store
  188. */
  189. void saveAll(void *tPtr);
  190. private:
  191. void m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer);
  192. void m_writeRootList(void *tPtr);
  193. void m_updateRootPeers(void *tPtr);
  194. // This gets an integer key from an InetAddress for looking up paths.
  195. static ZT_INLINE uint64_t s_getPathKey(const int64_t l, const InetAddress &r) noexcept
  196. {
  197. // SECURITY: these will be used as keys in a Map<> which uses its own hasher that
  198. // mixes in a per-invocation secret to work against hash collision attacks. See the
  199. // map hasher in Containers.hpp. Otherwise the point here is really really fast
  200. // path lookup by address. The number of paths is never likely to be high enough
  201. // for a collision to be something we worry about. That would require a minimum of
  202. // millions and millions of paths on a single node.
  203. if (r.family() == AF_INET) {
  204. return ((uint64_t)(r.as.sa_in.sin_addr.s_addr) << 32U) ^ ((uint64_t)r.as.sa_in.sin_port << 16U) ^ (uint64_t)l;
  205. } else if (r.family() == AF_INET6) {
  206. return Utils::loadAsIsEndian< uint64_t >(r.as.sa_in6.sin6_addr.s6_addr) + Utils::loadAsIsEndian< uint64_t >(r.as.sa_in6.sin6_addr.s6_addr + 8) + (uint64_t)r.as.sa_in6.sin6_port + (uint64_t)l;
  207. } else {
  208. // This should never really be used but it's here just in case.
  209. return (uint64_t)Utils::fnv1a32(reinterpret_cast<const void *>(&r), sizeof(InetAddress)) + (uint64_t)l;
  210. }
  211. }
  212. const RuntimeEnvironment *const RR;
  213. RWMutex m_paths_l; // locks m_paths
  214. RWMutex m_peers_l; // locks m_peers, m_roots, and m_rootPeers
  215. Map< uint64_t, SharedPtr< Path > > m_paths;
  216. Map< Address, SharedPtr< Peer > > m_peers;
  217. Set< Identity > m_roots;
  218. Vector< SharedPtr< Peer > > m_rootPeers;
  219. };
  220. } // namespace ZeroTier
  221. #endif