Topology.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. namespace ZeroTier {
  15. Topology::Topology(const RuntimeEnvironment *renv, void *tPtr) :
  16. RR(renv)
  17. {
  18. uint64_t idtmp[2];
  19. idtmp[0] = 0;
  20. idtmp[1] = 0;
  21. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_ROOTS, idtmp));
  22. // TODO
  23. m_updateRootPeers_l_roots_certs(tPtr);
  24. }
  25. SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
  26. {
  27. RWMutex::Lock _l(m_peers_l);
  28. SharedPtr< Peer > &hp = m_peers[peer->address()];
  29. if (hp)
  30. return hp;
  31. m_loadCached(tPtr, peer->address(), hp);
  32. if (hp)
  33. return hp;
  34. hp = peer;
  35. return peer;
  36. }
  37. SharedPtr< Peer > Topology::addRoot(void *const tPtr, const Identity &id)
  38. {
  39. if ((id != RR->identity) && id.locallyValidate()) {
  40. RWMutex::Lock l1(m_roots_l);
  41. // A null pointer in the set of certificates specifying a root indicates that
  42. // the root has been directly added.
  43. m_roots[id.fingerprint()].insert(SharedPtr< const Certificate >());
  44. {
  45. Mutex::Lock certsLock(m_certs_l);
  46. m_updateRootPeers_l_roots_certs(tPtr);
  47. }
  48. m_writeRootList_l_roots(tPtr);
  49. for (Vector< SharedPtr< Peer > >::const_iterator p(m_rootPeers.begin()); p != m_rootPeers.end(); ++p) {
  50. if ((*p)->identity() == id)
  51. return *p;
  52. }
  53. }
  54. return SharedPtr< Peer >();
  55. }
  56. bool Topology::removeRoot(void *const tPtr, Address address)
  57. {
  58. RWMutex::Lock l1(m_roots_l);
  59. // TODO
  60. return true;
  61. }
  62. struct p_RootRankingComparisonOperator
  63. {
  64. ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
  65. {
  66. // Sort roots first in order of which root has spoken most recently, but
  67. // only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
  68. // means that living roots that seem responsive are ranked the same. Then
  69. // they're sorted in descending order of latency so that the apparently
  70. // fastest root is ranked first.
  71. const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
  72. const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
  73. if (alr < blr) {
  74. return true;
  75. } else if (blr == alr) {
  76. const int bb = b->latency();
  77. if (bb < 0)
  78. return true;
  79. return bb < a->latency();
  80. }
  81. }
  82. };
  83. void Topology::rankRoots()
  84. {
  85. RWMutex::Lock l1(m_roots_l);
  86. std::sort(m_rootPeers.begin(), m_rootPeers.end(), p_RootRankingComparisonOperator());
  87. }
  88. void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
  89. {
  90. // Peer and path delete operations are batched to avoid holding write locks on
  91. // these structures for any length of time. A list is compiled in read mode,
  92. // then the write lock is acquired for each delete. This adds overhead if there
  93. // are a lot of deletions, but that's not common.
  94. // Delete peers that are stale or offline.
  95. {
  96. Vector< Address > toDelete;
  97. {
  98. RWMutex::RLock l1(m_peers_l);
  99. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
  100. // TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
  101. // a network frame or non-trivial control packet.
  102. if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (m_roots.find(i->second->identity().fingerprint()) == m_roots.end()))
  103. toDelete.push_back(i->first);
  104. }
  105. }
  106. for (Vector< Address >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
  107. RWMutex::Lock l1(m_peers_l);
  108. const Map< Address, SharedPtr< Peer > >::iterator p(m_peers.find(*i));
  109. if (likely(p != m_peers.end())) {
  110. p->second->save(tPtr);
  111. m_peers.erase(p);
  112. }
  113. }
  114. }
  115. // Delete paths that are no longer held by anyone else ("weak reference" type behavior).
  116. {
  117. Vector< uint64_t > toDelete;
  118. {
  119. RWMutex::RLock l1(m_paths_l);
  120. for (Map< uint64_t, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end(); ++i) {
  121. if (i->second.weakGC())
  122. toDelete.push_back(i->first);
  123. }
  124. }
  125. for (Vector< uint64_t >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
  126. RWMutex::Lock l1(m_paths_l);
  127. const Map< uint64_t, SharedPtr< Path > >::iterator p(m_paths.find(*i));
  128. if (likely(p != m_paths.end()))
  129. m_paths.erase(p);
  130. }
  131. }
  132. // Clean any expired certificates
  133. {
  134. Mutex::Lock l1(m_certs_l);
  135. m_cleanCertificates_l_certs(now);
  136. }
  137. }
  138. void Topology::saveAll(void *tPtr)
  139. {
  140. RWMutex::RLock l(m_peers_l);
  141. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  142. i->second->save(tPtr);
  143. }
  144. ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert, const int64_t now, const unsigned int localTrust)
  145. {
  146. Mutex::Lock certsLock(m_certs_l);
  147. // Check to see if we already have this specific certificate.
  148. const SHA384Hash serial(cert.serialNo);
  149. if (m_certs.find(serial) != m_certs.end())
  150. return ZT_CERTIFICATE_ERROR_NONE;
  151. // Verify certificate all the way to a trusted root.
  152. const ZT_CertificateError err = m_verifyCertificate_l_certs(cert, now, localTrust, false);
  153. if (err != ZT_CERTIFICATE_ERROR_NONE)
  154. return err;
  155. // Create entry containing copy of certificate and trust flags.
  156. const std::pair< SharedPtr< const Certificate >, unsigned int > certEntry(SharedPtr< const Certificate >(new Certificate(cert)), localTrust);
  157. // If the subject contains a unique ID, check if we already have a cert for the
  158. // same uniquely identified subject. If so, check its subject timestamp and keep
  159. // the one we have if newer. Otherwise replace it. Note that the verification
  160. // function will have checked the unique ID proof signature already if a unique
  161. // ID was present.
  162. FCV< uint8_t, ZT_CERTIFICATE_MAX_UNIQUE_ID_SIZE > uniqueId(cert.subject.uniqueId, cert.subject.uniqueIdSize);
  163. if (!uniqueId.empty()) {
  164. std::pair< SharedPtr< const Certificate >, unsigned int > &bySubjectUniqueId = m_certsBySubjectUniqueId[uniqueId];
  165. if (bySubjectUniqueId.first) {
  166. if (bySubjectUniqueId.first->subject.timestamp >= cert.subject.timestamp)
  167. return ZT_CERTIFICATE_ERROR_HAVE_NEWER_CERT;
  168. m_eraseCertificate_l_certs(bySubjectUniqueId.first);
  169. m_certsBySubjectUniqueId[uniqueId] = certEntry;
  170. } else {
  171. bySubjectUniqueId = certEntry;
  172. }
  173. }
  174. // Save certificate by serial number.
  175. m_certs[serial] = certEntry;
  176. // Add certificate to sets of certificates whose subject references a given identity.
  177. for (unsigned int i = 0; i < cert.subject.identityCount; ++i) {
  178. const Identity *const ii = reinterpret_cast<const Identity *>(cert.subject.identities[i].identity);
  179. m_certsBySubjectIdentity[ii->fingerprint()].insert(certEntry);
  180. }
  181. // Clean any certificates whose chains are now broken, which can happen if there was
  182. // an update that replaced an old cert with a given unique ID. Otherwise this generally
  183. // does nothing here.
  184. m_cleanCertificates_l_certs(now);
  185. // Refresh the root peers lists, since certs may enumerate roots.
  186. {
  187. RWMutex::Lock rootsLock(m_roots_l);
  188. m_updateRootPeers_l_roots_certs(tPtr);
  189. }
  190. return ZT_CERTIFICATE_ERROR_NONE;
  191. }
  192. void Topology::m_eraseCertificate_l_certs(const SharedPtr< const Certificate > &cert)
  193. {
  194. // assumes m_certs is locked for writing
  195. m_certsBySubjectUniqueId.erase(FCV< uint8_t, ZT_CERTIFICATE_MAX_UNIQUE_ID_SIZE >(cert->subject.uniqueId, cert->subject.uniqueIdSize));
  196. m_certs.erase(SHA384Hash(cert->serialNo));
  197. for (unsigned int i = 0; i < cert->subject.identityCount; ++i) {
  198. const Identity *const ii = reinterpret_cast<const Identity *>(cert->subject.identities[i].identity);
  199. Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::iterator bySubjectIdentity(m_certsBySubjectIdentity.find(ii->fingerprint()));
  200. if (bySubjectIdentity != m_certsBySubjectIdentity.end()) {
  201. bySubjectIdentity->second.erase(cert);
  202. if (bySubjectIdentity->second.empty())
  203. m_certsBySubjectIdentity.erase(bySubjectIdentity);
  204. }
  205. }
  206. }
  207. void Topology::m_cleanCertificates_l_certs(int64_t now)
  208. {
  209. // assumes m_certs is locked for writing
  210. Vector< SharedPtr< const Certificate > > toDelete;
  211. for (;;) {
  212. for (Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > >::iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
  213. const ZT_CertificateError err = m_verifyCertificate_l_certs(*(c->second.first), now, c->second.second, true);
  214. if (err != ZT_CERTIFICATE_ERROR_NONE)
  215. toDelete.push_back(c->second.first);
  216. }
  217. if (toDelete.empty())
  218. break;
  219. for (Vector< SharedPtr< const Certificate > >::iterator c(toDelete.begin()); c != toDelete.end(); ++c)
  220. m_eraseCertificate_l_certs(*c);
  221. toDelete.clear();
  222. }
  223. }
  224. bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, const int64_t now) const
  225. {
  226. // assumes m_certs is at least locked for reading
  227. Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::const_iterator c = m_certsBySubjectIdentity.find(reinterpret_cast<const Identity *>(current->issuer)->fingerprint());
  228. if (c != m_certsBySubjectIdentity.end()) {
  229. for (Map< SharedPtr< const Certificate >, unsigned int >::const_iterator cc(c->second.begin()); cc != c->second.end(); ++cc) {
  230. if (
  231. (cc->first->maxPathLength > current->maxPathLength) &&
  232. (cc->first->validity[0] <= now) && // not before now
  233. (cc->first->validity[1] >= now) && // not after now
  234. (cc->first->validity[0] <= current->timestamp) && // not before child cert's timestamp
  235. (cc->first->validity[1] >= current->timestamp) // not after child cert's timestamp
  236. ) {
  237. if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) != 0)
  238. return true;
  239. if (m_verifyCertificateChain_l_certs(cc->first.ptr(), now))
  240. return true;
  241. }
  242. }
  243. }
  244. return false;
  245. }
  246. ZT_CertificateError Topology::m_verifyCertificate_l_certs(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const
  247. {
  248. // assumes m_certs is at least locked for reading
  249. if ((cert.validity[0] > now) || (cert.validity[1] < now))
  250. return ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW;
  251. if (!skipSignatureCheck) {
  252. const ZT_CertificateError ce = cert.verify();
  253. if (ce != ZT_CERTIFICATE_ERROR_NONE)
  254. return ce;
  255. }
  256. if ((localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) {
  257. if (!m_verifyCertificateChain_l_certs(&cert, now))
  258. return ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
  259. }
  260. }
  261. void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer)
  262. {
  263. try {
  264. uint64_t id[2];
  265. id[0] = zta.toInt();
  266. id[1] = 0;
  267. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id));
  268. if (data.size() > 8) {
  269. const uint8_t *d = data.data();
  270. int dl = (int)data.size();
  271. const int64_t ts = (int64_t)Utils::loadBigEndian< uint64_t >(d);
  272. Peer *const p = new Peer(RR);
  273. int n = p->unmarshal(d + 8, dl - 8);
  274. if (n < 0) {
  275. delete p;
  276. return;
  277. }
  278. if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
  279. // TODO: handle many peers, same address (?)
  280. peer.set(p);
  281. return;
  282. }
  283. }
  284. } catch (...) {
  285. peer.zero();
  286. }
  287. }
  288. void Topology::m_writeRootList_l_roots(void *tPtr)
  289. {
  290. // assumes m_peers_l is locked for read or write
  291. // TODO
  292. #if 0
  293. uint8_t *const roots = (uint8_t *)malloc((ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 2) * m_roots.size());
  294. if (roots) { // sanity check
  295. int p = 0;
  296. for (Set< Identity >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
  297. const int pp = r->marshal(roots + p, false);
  298. if (pp > 0)
  299. p += pp;
  300. }
  301. uint64_t id[2];
  302. id[0] = 0;
  303. id[1] = 0;
  304. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_ROOTS, id, roots, (unsigned int)p);
  305. free(roots);
  306. }
  307. #endif
  308. }
  309. void Topology::m_updateRootPeers_l_roots_certs(void *tPtr)
  310. {
  311. // assumes m_peers_l and m_certs_l are locked for write
  312. // TODO
  313. #if 0
  314. Vector< SharedPtr< Peer > > rp;
  315. for (Map< Identity, Set< SubscriptionKeyHash > >::iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
  316. Map< Address, SharedPtr< Peer > >::iterator pp(m_peers.find(r->first.address()));
  317. SharedPtr< Peer > p;
  318. if (pp != m_peers.end())
  319. p = pp->second;
  320. if (!p)
  321. m_loadCached(tPtr, r->first.address(), p);
  322. if ((!p) || (p->identity() != r->first)) {
  323. p.set(new Peer(RR));
  324. p->init(r->first);
  325. m_peers[r->first.address()] = p;
  326. }
  327. rp.push_back(p);
  328. }
  329. std::sort(rp.begin(), rp.end(), p_RootSortComparisonOperator());
  330. m_rootPeers.swap(rp);
  331. #endif
  332. }
  333. } // namespace ZeroTier