Topology.hpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * ZeroTier One - Global Peer to Peer Ethernet
  3. * Copyright (C) 2011-2014 ZeroTier Networks LLC
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #ifndef ZT_TOPOLOGY_HPP
  28. #define ZT_TOPOLOGY_HPP
  29. #include <stdio.h>
  30. #include <string.h>
  31. #include <map>
  32. #include <vector>
  33. #include <stdexcept>
  34. #include <algorithm>
  35. #include "Constants.hpp"
  36. #include "Address.hpp"
  37. #include "Identity.hpp"
  38. #include "Peer.hpp"
  39. #include "Mutex.hpp"
  40. #include "InetAddress.hpp"
  41. #include "Utils.hpp"
  42. #include "Packet.hpp"
  43. #include "Logger.hpp"
  44. #include "Dictionary.hpp"
  45. namespace ZeroTier {
  46. class RuntimeEnvironment;
  47. /**
  48. * Database of network topology
  49. */
  50. class Topology
  51. {
  52. public:
  53. Topology(const RuntimeEnvironment *renv);
  54. ~Topology();
  55. /**
  56. * Set up supernodes for this network
  57. *
  58. * @param sn Supernodes for this network
  59. */
  60. void setSupernodes(const std::map< Identity,std::vector< std::pair<InetAddress,bool> > > &sn);
  61. /**
  62. * Set up supernodes for this network
  63. *
  64. * This performs no signature verification of any kind. The caller must
  65. * check the signature of the root topology dictionary first.
  66. *
  67. * @param sn Supernodes dictionary from root-topology
  68. */
  69. void setSupernodes(const Dictionary &sn);
  70. /**
  71. * Add a peer to database
  72. *
  73. * This will not replace existing peers. In that case the existing peer
  74. * record is returned.
  75. *
  76. * @param peer Peer to add
  77. * @return New or existing peer (should replace 'peer')
  78. */
  79. SharedPtr<Peer> addPeer(const SharedPtr<Peer> &peer);
  80. /**
  81. * Get a peer from its address
  82. *
  83. * @param zta ZeroTier address of peer
  84. * @return Peer or NULL if not found
  85. */
  86. SharedPtr<Peer> getPeer(const Address &zta);
  87. /**
  88. * @return Vector of peers that are supernodes
  89. */
  90. inline std::vector< SharedPtr<Peer> > supernodePeers() const
  91. {
  92. Mutex::Lock _l(_lock);
  93. return _supernodePeers;
  94. }
  95. /**
  96. * @return Number of supernodes
  97. */
  98. inline unsigned int numSupernodes() const
  99. {
  100. Mutex::Lock _l(_lock);
  101. return (unsigned int)_supernodePeers.size();
  102. }
  103. /**
  104. * Get the current favorite supernode
  105. *
  106. * @return Supernode with lowest latency or NULL if none
  107. */
  108. inline SharedPtr<Peer> getBestSupernode()
  109. {
  110. return getBestSupernode((const Address *)0,0,false);
  111. }
  112. /**
  113. * Get the best supernode, avoiding supernodes listed in an array
  114. *
  115. * This will get the best supernode (lowest latency, etc.) but will
  116. * try to avoid the listed supernodes, only using them if no others
  117. * are available.
  118. *
  119. * @param avoid Nodes to avoid
  120. * @param avoidCount Number of nodes to avoid
  121. * @param strictAvoid If false, consider avoided supernodes anyway if no non-avoid supernodes are available
  122. * @return Supernode or NULL if none
  123. */
  124. SharedPtr<Peer> getBestSupernode(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
  125. /**
  126. * @param zta ZeroTier address
  127. * @return True if this is a designated supernode
  128. */
  129. inline bool isSupernode(const Address &zta) const
  130. throw()
  131. {
  132. Mutex::Lock _l(_lock);
  133. return (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),zta) != _supernodeAddresses.end());
  134. }
  135. /**
  136. * @return Vector of supernode addresses
  137. */
  138. inline std::vector<Address> supernodeAddresses() const
  139. {
  140. Mutex::Lock _l(_lock);
  141. return _supernodeAddresses;
  142. }
  143. /**
  144. * @return True if this node's identity is in the supernode set
  145. */
  146. inline bool amSupernode() const { return _amSupernode; }
  147. /**
  148. * Clean and flush database
  149. */
  150. void clean(uint64_t now);
  151. /**
  152. * Apply a function or function object to all peers
  153. *
  154. * Note: explicitly template this by reference if you want the object
  155. * passed by reference instead of copied.
  156. *
  157. * Warning: be careful not to use features in these that call any other
  158. * methods of Topology that may lock _lock, otherwise a recursive lock
  159. * and deadlock or lock corruption may occur.
  160. *
  161. * @param f Function to apply
  162. * @tparam F Function or function object type
  163. */
  164. template<typename F>
  165. inline void eachPeer(F f)
  166. {
  167. Mutex::Lock _l(_lock);
  168. for(std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p)
  169. f(*this,p->second);
  170. }
  171. /**
  172. * Apply a function or function object to all supernode peers
  173. *
  174. * Note: explicitly template this by reference if you want the object
  175. * passed by reference instead of copied.
  176. *
  177. * Warning: be careful not to use features in these that call any other
  178. * methods of Topology that may lock _lock, otherwise a recursive lock
  179. * and deadlock or lock corruption may occur.
  180. *
  181. * @param f Function to apply
  182. * @tparam F Function or function object type
  183. */
  184. template<typename F>
  185. inline void eachSupernodePeer(F f)
  186. {
  187. Mutex::Lock _l(_lock);
  188. for(std::vector< SharedPtr<Peer> >::const_iterator p(_supernodePeers.begin());p!=_supernodePeers.end();++p)
  189. f(*this,*p);
  190. }
  191. /**
  192. * Pings all peers that need a ping sent, excluding supernodes
  193. *
  194. * Ordinary peers are pinged if we haven't heard from them recently. Receive
  195. * time rather than send time as OK is returned on success and we want to
  196. * keep trying if a packet is lost. Ordinary peers are subject to a frame
  197. * inactivity timeout. We give up if we haven't actually transferred any
  198. * data to them recently, and eventually Topology purges them from memory.
  199. */
  200. class PingPeersThatNeedPing
  201. {
  202. public:
  203. PingPeersThatNeedPing(const RuntimeEnvironment *renv,uint64_t now) throw() :
  204. _now(now),
  205. _supernodeAddresses(renv->topology->supernodeAddresses()),
  206. RR(renv) {}
  207. inline void operator()(Topology &t,const SharedPtr<Peer> &p)
  208. {
  209. /* For ordinary nodes we ping if they've sent us a frame recently,
  210. * otherwise they are stale and we let the link die.
  211. *
  212. * Note that we measure ping time from time of last receive rather
  213. * than time of last send in order to only count full round trips. */
  214. if ( (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) == _supernodeAddresses.end()) &&
  215. ((_now - p->lastFrame()) < ZT_PEER_PATH_ACTIVITY_TIMEOUT) &&
  216. ((_now - p->lastDirectReceive()) >= ZT_PEER_DIRECT_PING_DELAY) ) {
  217. p->sendPing(RR,_now);
  218. }
  219. }
  220. private:
  221. uint64_t _now;
  222. std::vector<Address> _supernodeAddresses;
  223. const RuntimeEnvironment *RR;
  224. };
  225. /**
  226. * Ping peers that need ping according to supernode rules
  227. *
  228. * Supernodes ping aggressively if a ping is unanswered and they are not
  229. * subject to the activity timeout. In other words: we assume they are
  230. * always there and always try to reach them.
  231. *
  232. * The ultimate rate limit for this is controlled up in the Node main loop.
  233. */
  234. class PingSupernodesThatNeedPing
  235. {
  236. public:
  237. PingSupernodesThatNeedPing(const RuntimeEnvironment *renv,uint64_t now) throw() :
  238. _now(now),
  239. RR(renv) {}
  240. inline void operator()(Topology &t,const SharedPtr<Peer> &p)
  241. {
  242. /* For supernodes we always ping even if no frames have been seen, and
  243. * we ping aggressively if pings are unanswered. The limit to this
  244. * frequency is set in the main loop to no more than ZT_STARTUP_AGGRO. */
  245. uint64_t lp = 0;
  246. uint64_t lr = 0;
  247. p->lastPingAndDirectReceive(lp,lr);
  248. if ( (lr < RR->timeOfLastResynchronize) || ((lr < lp)&&((lp - lr) >= ZT_PING_UNANSWERED_AFTER)) || ((_now - lr) >= ZT_PEER_DIRECT_PING_DELAY) )
  249. p->sendPing(RR,_now);
  250. }
  251. private:
  252. uint64_t _now;
  253. const RuntimeEnvironment *RR;
  254. };
  255. /**
  256. * Computes most recent timestamp of direct packet receive over a list of peers
  257. */
  258. class FindMostRecentDirectReceiveTimestamp
  259. {
  260. public:
  261. FindMostRecentDirectReceiveTimestamp(uint64_t &ts) throw() : _ts(ts) {}
  262. inline void operator()(Topology &t,const SharedPtr<Peer> &p) throw() { _ts = std::max(p->lastDirectReceive(),_ts); }
  263. private:
  264. uint64_t &_ts;
  265. };
  266. /**
  267. * Function object to forget direct links to active peers and then ping them indirectly
  268. */
  269. class ResetActivePeers
  270. {
  271. public:
  272. ResetActivePeers(const RuntimeEnvironment *renv,uint64_t now) throw() :
  273. _now(now),
  274. _supernode(renv->topology->getBestSupernode()),
  275. _supernodeAddresses(renv->topology->supernodeAddresses()),
  276. RR(renv) {}
  277. inline void operator()(Topology &t,const SharedPtr<Peer> &p)
  278. {
  279. p->clearPaths(false); // false means don't forget 'fixed' paths e.g. supernodes
  280. Packet outp(p->address(),RR->identity.address(),Packet::VERB_NOP);
  281. outp.armor(p->key(),false); // no need to encrypt a NOP
  282. if (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) != _supernodeAddresses.end()) {
  283. // Send NOP directly to supernodes
  284. p->send(RR,outp.data(),outp.size(),_now);
  285. } else {
  286. // Send NOP indirectly to regular peers if still active, triggering a new RENDEZVOUS
  287. if (((_now - p->lastFrame()) < ZT_PEER_PATH_ACTIVITY_TIMEOUT)&&(_supernode)) {
  288. TRACE("sending reset NOP to %s",p->address().toString().c_str());
  289. _supernode->send(RR,outp.data(),outp.size(),_now);
  290. }
  291. }
  292. }
  293. private:
  294. uint64_t _now;
  295. SharedPtr<Peer> _supernode;
  296. std::vector<Address> _supernodeAddresses;
  297. const RuntimeEnvironment *RR;
  298. };
  299. /**
  300. * Function object to collect peers with any known direct path
  301. */
  302. class CollectPeersWithActiveDirectPath
  303. {
  304. public:
  305. CollectPeersWithActiveDirectPath(std::vector< SharedPtr<Peer> > &v,uint64_t now) throw() :
  306. _now(now),
  307. _v(v) {}
  308. inline void operator()(Topology &t,const SharedPtr<Peer> &p)
  309. {
  310. if (p->hasActiveDirectPath(_now))
  311. _v.push_back(p);
  312. }
  313. private:
  314. uint64_t _now;
  315. std::vector< SharedPtr<Peer> > &_v;
  316. };
  317. /**
  318. * Validate a root topology dictionary against the identities specified in Defaults
  319. *
  320. * @param rt Root topology dictionary
  321. * @return True if dictionary signature is valid
  322. */
  323. static bool authenticateRootTopology(const Dictionary &rt);
  324. private:
  325. Identity _getIdentity(const Address &zta);
  326. void _saveIdentity(const Identity &id);
  327. const RuntimeEnvironment *RR;
  328. std::string _idCacheBase;
  329. std::map< Address,SharedPtr<Peer> > _activePeers;
  330. std::map< Identity,std::vector< std::pair<InetAddress,bool> > > _supernodes;
  331. std::vector< Address > _supernodeAddresses;
  332. std::vector< SharedPtr<Peer> > _supernodePeers;
  333. Mutex _lock;
  334. // Set to true if my identity is in _supernodes
  335. volatile bool _amSupernode;
  336. };
  337. } // namespace ZeroTier
  338. #endif