Topology.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*
  2. * ZeroTier One - Global Peer to Peer Ethernet
  3. * Copyright (C) 2011-2014 ZeroTier Networks LLC
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <algorithm>
  28. #include "Constants.hpp"
  29. #include "Defaults.hpp"
  30. #include "Topology.hpp"
  31. #include "NodeConfig.hpp"
  32. #include "CMWC4096.hpp"
  33. #include "Dictionary.hpp"
  34. #define ZT_PEER_WRITE_BUF_SIZE 131072
  35. namespace ZeroTier {
  36. Topology::Topology(const RuntimeEnvironment *renv,bool enablePermanentIdCaching) :
  37. RR(renv),
  38. _amSupernode(false)
  39. {
  40. if (enablePermanentIdCaching)
  41. _idCacheBase = (RR->homePath + ZT_PATH_SEPARATOR_S + "iddb.d");
  42. _loadPeers();
  43. }
  44. Topology::~Topology()
  45. {
  46. clean(Utils::now());
  47. _dumpPeers();
  48. }
  49. void Topology::setSupernodes(const std::map< Identity,std::vector< std::pair<InetAddress,bool> > > &sn)
  50. {
  51. Mutex::Lock _l(_supernodes_m);
  52. if (_supernodes == sn)
  53. return; // no change
  54. _supernodes = sn;
  55. _supernodeAddresses.clear();
  56. _supernodePeers.clear();
  57. uint64_t now = Utils::now();
  58. for(std::map< Identity,std::vector< std::pair<InetAddress,bool> > >::const_iterator i(sn.begin());i!=sn.end();++i) {
  59. if (i->first != RR->identity) {
  60. SharedPtr<Peer> p(getPeer(i->first.address()));
  61. if (!p)
  62. p = addPeer(SharedPtr<Peer>(new Peer(RR->identity,i->first)));
  63. for(std::vector< std::pair<InetAddress,bool> >::const_iterator j(i->second.begin());j!=i->second.end();++j)
  64. p->addPath(Path(j->first,(j->second) ? Path::PATH_TYPE_TCP_OUT : Path::PATH_TYPE_UDP,true));
  65. p->use(now);
  66. _supernodePeers.push_back(p);
  67. }
  68. _supernodeAddresses.insert(i->first.address());
  69. }
  70. _amSupernode = (_supernodes.find(RR->identity) != _supernodes.end());
  71. }
  72. void Topology::setSupernodes(const Dictionary &sn)
  73. {
  74. std::map< Identity,std::vector< std::pair<InetAddress,bool> > > m;
  75. for(Dictionary::const_iterator d(sn.begin());d!=sn.end();++d) {
  76. if ((d->first.length() == ZT_ADDRESS_LENGTH_HEX)&&(d->second.length() > 0)) {
  77. try {
  78. Dictionary snspec(d->second);
  79. std::vector< std::pair<InetAddress,bool> > &a = m[Identity(snspec.get("id"))];
  80. std::string udp(snspec.get("udp",std::string()));
  81. if (udp.length() > 0)
  82. a.push_back(std::pair<InetAddress,bool>(InetAddress(udp),false));
  83. std::string tcp(snspec.get("tcp",std::string()));
  84. a.push_back(std::pair<InetAddress,bool>(InetAddress(tcp),true));
  85. } catch ( ... ) {
  86. LOG("supernode list contained invalid entry for: %s",d->first.c_str());
  87. }
  88. }
  89. }
  90. this->setSupernodes(m);
  91. }
  92. SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
  93. {
  94. if (peer->address() == RR->identity.address()) {
  95. TRACE("BUG: addNewPeer() caught and ignored attempt to add peer for self");
  96. throw std::logic_error("cannot add peer for self");
  97. }
  98. uint64_t now = Utils::now();
  99. Mutex::Lock _l(_activePeers_m);
  100. SharedPtr<Peer> p(_activePeers.insert(std::pair< Address,SharedPtr<Peer> >(peer->address(),peer)).first->second);
  101. p->use(now);
  102. saveIdentity(p->identity());
  103. return p;
  104. }
  105. SharedPtr<Peer> Topology::getPeer(const Address &zta) const
  106. {
  107. if (zta == RR->identity.address()) {
  108. TRACE("BUG: ignored attempt to getPeer() for self, returned NULL");
  109. return SharedPtr<Peer>();
  110. }
  111. uint64_t now = Utils::now();
  112. Mutex::Lock _l(_activePeers_m);
  113. std::map< Address,SharedPtr<Peer> >::const_iterator ap(_activePeers.find(zta));
  114. if ((ap != _activePeers.end())&&(ap->second)) {
  115. ap->second->use(now);
  116. return ap->second;
  117. }
  118. return SharedPtr<Peer>();
  119. }
  120. Identity Topology::getIdentity(const Address &zta)
  121. {
  122. SharedPtr<Peer> p(getPeer(zta));
  123. if (p)
  124. return p->identity();
  125. if (_idCacheBase.length()) {
  126. std::string idcPath(_idCacheBase + ZT_PATH_SEPARATOR_S + zta.toString());
  127. std::string ids;
  128. if (Utils::readFile(idcPath.c_str(),ids)) {
  129. try {
  130. return Identity(ids);
  131. } catch ( ... ) {} // ignore invalid IDs
  132. }
  133. }
  134. return Identity();
  135. }
  136. void Topology::saveIdentity(const Identity &id)
  137. {
  138. if ((id)&&(_idCacheBase.length())) {
  139. std::string idcPath(_idCacheBase + ZT_PATH_SEPARATOR_S + id.address().toString());
  140. if (!Utils::fileExists(idcPath.c_str()))
  141. Utils::writeFile(idcPath.c_str(),id.toString(false));
  142. }
  143. }
  144. SharedPtr<Peer> Topology::getBestSupernode(const Address *avoid,unsigned int avoidCount,bool strictAvoid) const
  145. {
  146. SharedPtr<Peer> bestSupernode;
  147. uint64_t now = Utils::now();
  148. Mutex::Lock _l(_supernodes_m);
  149. if (_amSupernode) {
  150. /* If I am a supernode, the "best" supernode is the one whose address
  151. * is numerically greater than mine (with wrap at top of list). This
  152. * causes packets searching for a route to pretty much literally
  153. * circumnavigate the globe rather than bouncing between just two. */
  154. if (_supernodeAddresses.size() > 1) { // gotta be one other than me for this to work
  155. std::set<Address>::const_iterator sna(_supernodeAddresses.find(RR->identity.address()));
  156. if (sna != _supernodeAddresses.end()) { // sanity check -- _amSupernode should've been false in this case
  157. for(;;) {
  158. if (++sna == _supernodeAddresses.end())
  159. sna = _supernodeAddresses.begin(); // wrap around at end
  160. if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
  161. SharedPtr<Peer> p(getPeer(*sna));
  162. if ((p)&&(p->hasActiveDirectPath(now))) {
  163. bestSupernode = p;
  164. break;
  165. }
  166. }
  167. }
  168. }
  169. }
  170. } else {
  171. /* If I am not a supernode, the best supernode is the active one with
  172. * the lowest latency. */
  173. unsigned int l,bestSupernodeLatency = 65536;
  174. uint64_t lds,ldr;
  175. // First look for a best supernode by comparing latencies, but exclude
  176. // supernodes that have not responded to direct messages in order to
  177. // try to exclude any that are dead or unreachable.
  178. for(std::vector< SharedPtr<Peer> >::const_iterator sn(_supernodePeers.begin());sn!=_supernodePeers.end();) {
  179. // Skip explicitly avoided relays
  180. for(unsigned int i=0;i<avoidCount;++i) {
  181. if (avoid[i] == (*sn)->address())
  182. goto keep_searching_for_supernodes;
  183. }
  184. // Skip possibly comatose or unreachable relays
  185. lds = (*sn)->lastDirectSend();
  186. ldr = (*sn)->lastDirectReceive();
  187. if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
  188. goto keep_searching_for_supernodes;
  189. if ((*sn)->hasActiveDirectPath(now)) {
  190. l = (*sn)->latency();
  191. if (bestSupernode) {
  192. if ((l)&&(l < bestSupernodeLatency)) {
  193. bestSupernodeLatency = l;
  194. bestSupernode = *sn;
  195. }
  196. } else {
  197. if (l)
  198. bestSupernodeLatency = l;
  199. bestSupernode = *sn;
  200. }
  201. }
  202. keep_searching_for_supernodes:
  203. ++sn;
  204. }
  205. if (bestSupernode) {
  206. bestSupernode->use(now);
  207. return bestSupernode;
  208. } else if (strictAvoid)
  209. return SharedPtr<Peer>();
  210. // If we have nothing from above, just pick one without avoidance criteria.
  211. for(std::vector< SharedPtr<Peer> >::const_iterator sn=_supernodePeers.begin();sn!=_supernodePeers.end();++sn) {
  212. if ((*sn)->hasActiveDirectPath(now)) {
  213. unsigned int l = (*sn)->latency();
  214. if (bestSupernode) {
  215. if ((l)&&(l < bestSupernodeLatency)) {
  216. bestSupernodeLatency = l;
  217. bestSupernode = *sn;
  218. }
  219. } else {
  220. if (l)
  221. bestSupernodeLatency = l;
  222. bestSupernode = *sn;
  223. }
  224. }
  225. }
  226. }
  227. if (bestSupernode)
  228. bestSupernode->use(now);
  229. return bestSupernode;
  230. }
  231. void Topology::clean(uint64_t now)
  232. {
  233. Mutex::Lock _l(_activePeers_m);
  234. Mutex::Lock _l2(_supernodes_m);
  235. for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();) {
  236. if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(!_supernodeAddresses.count(p->second->address())))
  237. _activePeers.erase(p++);
  238. else {
  239. p->second->clean(now);
  240. ++p;
  241. }
  242. }
  243. }
  244. bool Topology::authenticateRootTopology(const Dictionary &rt)
  245. {
  246. try {
  247. std::string signer(rt.signingIdentity());
  248. if (!signer.length())
  249. return false;
  250. Identity signerId(signer);
  251. std::map< Address,Identity >::const_iterator authority(ZT_DEFAULTS.rootTopologyAuthorities.find(signerId.address()));
  252. if (authority == ZT_DEFAULTS.rootTopologyAuthorities.end())
  253. return false;
  254. if (signerId != authority->second)
  255. return false;
  256. return rt.verify(authority->second);
  257. } catch ( ... ) {
  258. return false;
  259. }
  260. }
  261. void Topology::_dumpPeers()
  262. {
  263. Buffer<ZT_PEER_WRITE_BUF_SIZE> buf;
  264. std::string pdpath(RR->homePath + ZT_PATH_SEPARATOR_S + "peers.persist");
  265. Mutex::Lock _l(_activePeers_m);
  266. FILE *pd = fopen(pdpath.c_str(),"wb");
  267. if (!pd)
  268. return;
  269. if (fwrite("ZTPD0",5,1,pd) != 1) {
  270. fclose(pd);
  271. Utils::rm(pdpath);
  272. return;
  273. }
  274. for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();++p) {
  275. try {
  276. p->second->serialize(buf);
  277. if (buf.size() >= (ZT_PEER_WRITE_BUF_SIZE / 2)) {
  278. if (fwrite(buf.data(),buf.size(),1,pd) != 1) {
  279. fclose(pd);
  280. Utils::rm(pdpath);
  281. buf.burn();
  282. return;
  283. }
  284. buf.clear();
  285. buf.burn();
  286. }
  287. } catch ( ... ) {
  288. fclose(pd);
  289. Utils::rm(pdpath);
  290. buf.burn();
  291. return;
  292. }
  293. }
  294. if (buf.size()) {
  295. if (fwrite(buf.data(),buf.size(),1,pd) != 1) {
  296. fclose(pd);
  297. Utils::rm(pdpath);
  298. buf.burn();
  299. return;
  300. }
  301. buf.burn();
  302. }
  303. fclose(pd);
  304. Utils::lockDownFile(pdpath.c_str(),false);
  305. buf.burn();
  306. }
  307. void Topology::_loadPeers()
  308. {
  309. Buffer<ZT_PEER_WRITE_BUF_SIZE> buf;
  310. std::string pdpath(RR->homePath + ZT_PATH_SEPARATOR_S + "peers.persist");
  311. Mutex::Lock _l(_activePeers_m);
  312. _activePeers.clear();
  313. FILE *pd = fopen(pdpath.c_str(),"rb");
  314. if (!pd)
  315. return;
  316. try {
  317. char magic[5];
  318. if ((fread(magic,5,1,pd) == 1)&&(!memcmp("ZTPD0",magic,5))) {
  319. long rlen = 0;
  320. do {
  321. long rlen = (long)fread(const_cast<char *>(static_cast<const char *>(buf.data())) + buf.size(),1,ZT_PEER_WRITE_BUF_SIZE - buf.size(),pd);
  322. if (rlen < 0) rlen = 0;
  323. buf.setSize(buf.size() + (unsigned int)rlen);
  324. unsigned int ptr = 0;
  325. while ((ptr < (ZT_PEER_WRITE_BUF_SIZE / 2))&&(ptr < buf.size())) {
  326. SharedPtr<Peer> p(new Peer());
  327. ptr += p->deserialize(buf,ptr);
  328. _activePeers[p->address()] = p;
  329. saveIdentity(p->identity());
  330. }
  331. buf.behead(ptr);
  332. } while (rlen > 0);
  333. }
  334. } catch ( ... ) {
  335. _activePeers.clear();
  336. }
  337. fclose(pd);
  338. Utils::rm(pdpath);
  339. buf.burn();
  340. }
  341. } // namespace ZeroTier