|
@@ -12,21 +12,24 @@
|
|
|
/****/
|
|
|
|
|
|
#include "Topology.hpp"
|
|
|
+#include "Defaults.hpp"
|
|
|
|
|
|
namespace ZeroTier {
|
|
|
|
|
|
-static const SharedPtr< const Certificate > s_nullCert;
|
|
|
-
|
|
|
Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now) :
|
|
|
- RR(renv)
|
|
|
+ RR(renv),
|
|
|
+ m_lastRankedRoots(0)
|
|
|
{
|
|
|
char tmp[32];
|
|
|
- Vector< uint8_t > trustData(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256));
|
|
|
-
|
|
|
Dictionary d;
|
|
|
+
|
|
|
+ Vector< uint8_t > trustData(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256));
|
|
|
if (trustData.empty() || (!d.decode(trustData.data(), (unsigned int)trustData.size()))) {
|
|
|
- // TODO: import default certificates including default root set
|
|
|
- } else {
|
|
|
+ if (!d.decode(Defaults::CERTIFICATES, Defaults::CERTIFICATES_BYTES))
|
|
|
+ d.clear();
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!d.empty()) {
|
|
|
const unsigned long certCount = (unsigned long)d.getUI("c$");
|
|
|
for (unsigned long idx = 0; idx < certCount; ++idx) {
|
|
|
uint64_t id[6];
|
|
@@ -39,19 +42,9 @@ Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now
|
|
|
addCertificate(tPtr, cert, now, (unsigned int)d.getUI(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx)), false, false, false);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- const unsigned long localRootCount = (unsigned long)d.getUI("lr$");
|
|
|
- for (unsigned long idx = 0; idx < localRootCount; ++idx) {
|
|
|
- Identity lr;
|
|
|
- if (d.getO(Dictionary::arraySubscript(tmp, sizeof(tmp), "lr$.i", idx), lr)) {
|
|
|
- if (lr)
|
|
|
- m_roots[lr].insert(s_nullCert);
|
|
|
- }
|
|
|
- }
|
|
|
+ m_cleanCertificates(tPtr, now);
|
|
|
+ m_updateRootPeers(tPtr, now);
|
|
|
}
|
|
|
-
|
|
|
- m_cleanCertificates_l_certs(now);
|
|
|
- m_updateRootPeers_l_roots_certs(tPtr);
|
|
|
}
|
|
|
|
|
|
SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
|
|
@@ -67,79 +60,19 @@ SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
|
|
|
return peer;
|
|
|
}
|
|
|
|
|
|
-SharedPtr< Peer > Topology::addRoot(void *const tPtr, const Identity &id)
|
|
|
+void Topology::allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
|
|
|
{
|
|
|
- if ((id != RR->identity) && id.locallyValidate()) {
|
|
|
- RWMutex::Lock l1(m_roots_l);
|
|
|
-
|
|
|
- // A null pointer in the set of certificates specifying a root indicates that
|
|
|
- // the root has been directly added.
|
|
|
- m_roots[id].insert(s_nullCert);
|
|
|
-
|
|
|
- {
|
|
|
- Mutex::Lock certsLock(m_certs_l);
|
|
|
- m_updateRootPeers_l_roots_certs(tPtr);
|
|
|
- m_writeTrustStore_l_roots_certs(tPtr);
|
|
|
- }
|
|
|
-
|
|
|
- for (Vector< SharedPtr< Peer > >::const_iterator p(m_rootPeers.begin()); p != m_rootPeers.end(); ++p) {
|
|
|
- if ((*p)->identity() == id)
|
|
|
- return *p;
|
|
|
- }
|
|
|
- }
|
|
|
- return SharedPtr< Peer >();
|
|
|
-}
|
|
|
-
|
|
|
-bool Topology::removeRoot(void *const tPtr, Address address)
|
|
|
-{
|
|
|
- RWMutex::Lock l1(m_roots_l);
|
|
|
- bool removed = false;
|
|
|
- for (Map< Identity, Set< SharedPtr< const Certificate > > >::iterator r(m_roots.begin()); r != m_roots.end();) {
|
|
|
- if (r->first.address() == address) {
|
|
|
- r->second.erase(s_nullCert);
|
|
|
- if (r->second.empty()) {
|
|
|
- m_roots.erase(r++);
|
|
|
- {
|
|
|
- Mutex::Lock certsLock(m_certs_l);
|
|
|
- m_updateRootPeers_l_roots_certs(tPtr);
|
|
|
- m_writeTrustStore_l_roots_certs(tPtr);
|
|
|
- }
|
|
|
- removed = true;
|
|
|
- } else {
|
|
|
- ++r;
|
|
|
- }
|
|
|
- } else ++r;
|
|
|
+ allPeers.clear();
|
|
|
+ {
|
|
|
+ RWMutex::RLock l(m_peers_l);
|
|
|
+ allPeers.reserve(m_peers.size());
|
|
|
+ for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
|
|
|
+ allPeers.push_back(i->second);
|
|
|
}
|
|
|
- return removed;
|
|
|
-}
|
|
|
-
|
|
|
-struct p_RootRankingComparisonOperator
|
|
|
-{
|
|
|
- ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
|
|
|
{
|
|
|
- // Sort roots first in order of which root has spoken most recently, but
|
|
|
- // only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
|
|
|
- // means that living roots that seem responsive are ranked the same. Then
|
|
|
- // they're sorted in descending order of latency so that the apparently
|
|
|
- // fastest root is ranked first.
|
|
|
- const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
|
|
|
- const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
|
|
|
- if (alr < blr) {
|
|
|
- return true;
|
|
|
- } else if (blr == alr) {
|
|
|
- const int bb = b->latency();
|
|
|
- if (bb < 0)
|
|
|
- return true;
|
|
|
- return bb < a->latency();
|
|
|
- }
|
|
|
- return false;
|
|
|
+ RWMutex::RLock l(m_roots_l);
|
|
|
+ rootPeers = m_roots;
|
|
|
}
|
|
|
-};
|
|
|
-
|
|
|
-void Topology::rankRoots()
|
|
|
-{
|
|
|
- RWMutex::Lock l1(m_roots_l);
|
|
|
- std::sort(m_rootPeers.begin(), m_rootPeers.end(), p_RootRankingComparisonOperator());
|
|
|
}
|
|
|
|
|
|
void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
|
@@ -152,23 +85,31 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
|
|
// Clean any expired certificates
|
|
|
{
|
|
|
Mutex::Lock l1(m_certs_l);
|
|
|
- if (m_cleanCertificates_l_certs(now)) {
|
|
|
+ if (m_cleanCertificates(tPtr, now)) {
|
|
|
+ RWMutex::Lock l3(m_peers_l);
|
|
|
RWMutex::Lock l2(m_roots_l);
|
|
|
- m_updateRootPeers_l_roots_certs(tPtr);
|
|
|
+ m_updateRootPeers(tPtr, now);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- // Delete peers that are stale or offline.
|
|
|
+ // Delete peers that are stale or offline and are not roots.
|
|
|
{
|
|
|
+ Vector< uintptr_t > rootLookup;
|
|
|
+ {
|
|
|
+ RWMutex::RLock l2(m_roots_l);
|
|
|
+ rootLookup.reserve(m_roots.size());
|
|
|
+ for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
|
|
|
+ rootLookup.push_back((uintptr_t)r->ptr());
|
|
|
+ }
|
|
|
+ std::sort(rootLookup.begin(), rootLookup.end());
|
|
|
+
|
|
|
Vector< Address > toDelete;
|
|
|
{
|
|
|
RWMutex::RLock l1(m_peers_l);
|
|
|
- RWMutex::RLock l2(m_roots_l);
|
|
|
- for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();
|
|
|
- ++i) {
|
|
|
+ for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
|
|
|
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
|
|
|
// a network frame or non-trivial control packet.
|
|
|
- if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (m_roots.find(i->second->identity()) == m_roots.end()))
|
|
|
+ if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (!std::binary_search(rootLookup.begin(), rootLookup.end(), (uintptr_t)i->second.ptr())))
|
|
|
toDelete.push_back(i->first);
|
|
|
}
|
|
|
}
|
|
@@ -187,8 +128,7 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
|
|
Vector< UniqueID > toDelete;
|
|
|
{
|
|
|
RWMutex::RLock l1(m_paths_l);
|
|
|
- for (Map< UniqueID, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end();
|
|
|
- ++i) {
|
|
|
+ for (Map< UniqueID, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end(); ++i) {
|
|
|
if (i->second.weakGC())
|
|
|
toDelete.push_back(i->first);
|
|
|
}
|
|
@@ -204,16 +144,35 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
|
|
|
|
|
void Topology::saveAll(void *tPtr)
|
|
|
{
|
|
|
- RWMutex::RLock l(m_peers_l);
|
|
|
- for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();
|
|
|
- ++i)
|
|
|
- i->second->save(tPtr);
|
|
|
+ {
|
|
|
+ RWMutex::RLock l(m_peers_l);
|
|
|
+ for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
|
|
|
+ i->second->save(tPtr);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ {
|
|
|
+ char tmp[32];
|
|
|
+ Dictionary d;
|
|
|
+ {
|
|
|
+ Mutex::Lock l(m_certs_l);
|
|
|
+ unsigned long idx = 0;
|
|
|
+ d.add("c$", (uint64_t)m_certs.size());
|
|
|
+ for (Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
|
|
|
+ d[Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.s", idx)].assign(c->first.data, c->first.data + ZT_SHA384_DIGEST_SIZE);
|
|
|
+ d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx), (uint64_t)c->second.second);
|
|
|
+ ++idx;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ Vector< uint8_t > trustStore;
|
|
|
+ d.encode(trustStore);
|
|
|
+ RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256, trustStore.data(), (unsigned int)trustStore.size());
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert, const int64_t now, const unsigned int localTrust, const bool writeToLocalStore, const bool refreshRootSets, const bool verify)
|
|
|
{
|
|
|
{
|
|
|
- Mutex::Lock certsLock(m_certs_l);
|
|
|
+ Mutex::Lock l1(m_certs_l);
|
|
|
|
|
|
// Check to see if we already have this specific certificate.
|
|
|
const SHA384Hash serial(cert.serialNo);
|
|
@@ -223,7 +182,7 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
|
|
|
// Verify certificate all the way to a trusted root. This also verifies inner
|
|
|
// signatures such as those of locators or the subject unique ID.
|
|
|
if (verify) {
|
|
|
- const ZT_CertificateError err = m_verifyCertificate_l_certs(cert, now, localTrust, false);
|
|
|
+ const ZT_CertificateError err = m_verifyCertificate(cert, now, localTrust, false);
|
|
|
if (err != ZT_CERTIFICATE_ERROR_NONE)
|
|
|
return err;
|
|
|
}
|
|
@@ -237,13 +196,14 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
|
|
|
// function will have checked the unique ID proof signature already if a unique
|
|
|
// ID was present.
|
|
|
if ((cert.subject.uniqueId) && (cert.subject.uniqueIdSize > 0)) {
|
|
|
- const Vector< uint8_t > uniqueId(cert.subject.uniqueId, cert.subject.uniqueId + cert.subject.uniqueIdSize);
|
|
|
- std::pair< SharedPtr< const Certificate >, unsigned int > &bySubjectUniqueId = m_certsBySubjectUniqueId[uniqueId];
|
|
|
+ SHA384Hash uniqueIdHash;
|
|
|
+ SHA384(uniqueIdHash.data, cert.subject.uniqueId, cert.subject.uniqueIdSize);
|
|
|
+ std::pair< SharedPtr< const Certificate >, unsigned int > &bySubjectUniqueId = m_certsBySubjectUniqueId[uniqueIdHash];
|
|
|
if (bySubjectUniqueId.first) {
|
|
|
if (bySubjectUniqueId.first->subject.timestamp >= cert.subject.timestamp)
|
|
|
return ZT_CERTIFICATE_ERROR_HAVE_NEWER_CERT;
|
|
|
- m_eraseCertificate_l_certs(bySubjectUniqueId.first);
|
|
|
- m_certsBySubjectUniqueId[uniqueId] = certEntry; // reference bySubjectUniqueId no longer valid
|
|
|
+ m_eraseCertificate(tPtr, bySubjectUniqueId.first, &uniqueIdHash);
|
|
|
+ m_certsBySubjectUniqueId[uniqueIdHash] = certEntry;
|
|
|
} else {
|
|
|
bySubjectUniqueId = certEntry;
|
|
|
}
|
|
@@ -255,7 +215,8 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
|
|
|
// Add certificate to sets of certificates whose subject references a given identity.
|
|
|
for (unsigned int i = 0; i < cert.subject.identityCount; ++i) {
|
|
|
const Identity *const ii = reinterpret_cast<const Identity *>(cert.subject.identities[i].identity);
|
|
|
- m_certsBySubjectIdentity[ii->fingerprint()].insert(certEntry);
|
|
|
+ if (ii)
|
|
|
+ m_certsBySubjectIdentity[ii->fingerprint()].insert(certEntry);
|
|
|
}
|
|
|
|
|
|
// Clean any certificates whose chains are now broken, which can happen if there was
|
|
@@ -264,12 +225,13 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
|
|
|
// certificates, which right now only happens on startup when they're loaded from the
|
|
|
// local certificate cache.
|
|
|
if (verify)
|
|
|
- m_cleanCertificates_l_certs(now);
|
|
|
+ m_cleanCertificates(tPtr, now);
|
|
|
|
|
|
// Refresh the root peers lists, since certs may enumerate roots.
|
|
|
if (refreshRootSets) {
|
|
|
- RWMutex::Lock rootsLock(m_roots_l);
|
|
|
- m_updateRootPeers_l_roots_certs(tPtr);
|
|
|
+ RWMutex::Lock l3(m_peers_l);
|
|
|
+ RWMutex::Lock l2(m_roots_l);
|
|
|
+ m_updateRootPeers(tPtr, now);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -284,14 +246,47 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
|
|
|
return ZT_CERTIFICATE_ERROR_NONE;
|
|
|
}
|
|
|
|
|
|
-void Topology::m_eraseCertificate_l_certs(const SharedPtr< const Certificate > &cert)
|
|
|
+struct p_RootRankingComparisonOperator
|
|
|
+{
|
|
|
+ ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
|
|
|
+ {
|
|
|
+ // Sort roots first in order of which root has spoken most recently, but
|
|
|
+ // only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
|
|
|
+ // means that living roots that seem responsive are ranked the same. Then
|
|
|
+ // they're sorted in descending order of latency so that the apparently
|
|
|
+ // fastest root is ranked first.
|
|
|
+ const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
|
|
|
+ const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
|
|
|
+ if (alr < blr) {
|
|
|
+ return true;
|
|
|
+ } else if (blr == alr) {
|
|
|
+ const int bb = b->latency();
|
|
|
+ if (bb < 0)
|
|
|
+ return true;
|
|
|
+ return bb < a->latency();
|
|
|
+ }
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
+void Topology::m_rankRoots(const int64_t now)
|
|
|
+{
|
|
|
+ // assumes m_roots is locked
|
|
|
+ m_lastRankedRoots = now;
|
|
|
+ std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
|
|
|
+}
|
|
|
+
|
|
|
+void Topology::m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash)
|
|
|
{
|
|
|
// assumes m_certs is locked for writing
|
|
|
|
|
|
- m_certs.erase(SHA384Hash(cert->serialNo));
|
|
|
+ const SHA384Hash serialNo(cert->serialNo);
|
|
|
+ m_certs.erase(serialNo);
|
|
|
+
|
|
|
+ RR->node->stateObjectDelete(tPtr, ZT_STATE_OBJECT_CERT, serialNo.data);
|
|
|
|
|
|
- if (cert->subject.uniqueIdSize > 0)
|
|
|
- m_certsBySubjectUniqueId.erase(Vector< uint8_t >(cert->subject.uniqueId, cert->subject.uniqueId + cert->subject.uniqueIdSize));
|
|
|
+ if (uniqueIdHash)
|
|
|
+ m_certsBySubjectUniqueId.erase(*uniqueIdHash);
|
|
|
|
|
|
for (unsigned int i = 0; i < cert->subject.identityCount; ++i) {
|
|
|
const Identity *const ii = reinterpret_cast<const Identity *>(cert->subject.identities[i].identity);
|
|
@@ -305,7 +300,7 @@ void Topology::m_eraseCertificate_l_certs(const SharedPtr< const Certificate > &
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-bool Topology::m_cleanCertificates_l_certs(int64_t now)
|
|
|
+bool Topology::m_cleanCertificates(void *tPtr, int64_t now)
|
|
|
{
|
|
|
// assumes m_certs is locked for writing
|
|
|
|
|
@@ -316,24 +311,31 @@ bool Topology::m_cleanCertificates_l_certs(int64_t now)
|
|
|
// Verify, but the last boolean option tells it to skip signature checks as this would
|
|
|
// already have been done. This will therefore just check the path and validity times
|
|
|
// of the certificate.
|
|
|
- const ZT_CertificateError err = m_verifyCertificate_l_certs(*(c->second.first), now, c->second.second, true);
|
|
|
+ const ZT_CertificateError err = m_verifyCertificate(*(c->second.first), now, c->second.second, true);
|
|
|
if (err != ZT_CERTIFICATE_ERROR_NONE)
|
|
|
toDelete.push_back(c->second.first);
|
|
|
}
|
|
|
|
|
|
if (toDelete.empty())
|
|
|
break;
|
|
|
-
|
|
|
deleted = true;
|
|
|
- for (Vector< SharedPtr< const Certificate > >::iterator c(toDelete.begin()); c != toDelete.end(); ++c)
|
|
|
- m_eraseCertificate_l_certs(*c);
|
|
|
+
|
|
|
+ SHA384Hash uniqueIdHash;
|
|
|
+ for (Vector< SharedPtr< const Certificate > >::iterator c(toDelete.begin()); c != toDelete.end(); ++c) {
|
|
|
+ if ((*c)->subject.uniqueId) {
|
|
|
+ SHA384(uniqueIdHash.data, (*c)->subject.uniqueId, (*c)->subject.uniqueIdSize);
|
|
|
+ m_eraseCertificate(tPtr, *c, &uniqueIdHash);
|
|
|
+ } else {
|
|
|
+ m_eraseCertificate(tPtr, *c, nullptr);
|
|
|
+ }
|
|
|
+ }
|
|
|
toDelete.clear();
|
|
|
}
|
|
|
|
|
|
return deleted;
|
|
|
}
|
|
|
|
|
|
-bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, const int64_t now) const
|
|
|
+bool Topology::m_verifyCertificateChain(const Certificate *current, const int64_t now) const
|
|
|
{
|
|
|
// assumes m_certs is at least locked for reading
|
|
|
|
|
@@ -350,7 +352,7 @@ bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, cons
|
|
|
) {
|
|
|
if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) != 0)
|
|
|
return true;
|
|
|
- if (m_verifyCertificateChain_l_certs(cc->first.ptr(), now))
|
|
|
+ if (m_verifyCertificateChain(cc->first.ptr(), now))
|
|
|
return true;
|
|
|
}
|
|
|
}
|
|
@@ -359,7 +361,7 @@ bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, cons
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-ZT_CertificateError Topology::m_verifyCertificate_l_certs(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const
|
|
|
+ZT_CertificateError Topology::m_verifyCertificate(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const
|
|
|
{
|
|
|
// assumes m_certs is at least locked for reading
|
|
|
|
|
@@ -378,7 +380,7 @@ ZT_CertificateError Topology::m_verifyCertificate_l_certs(const Certificate &cer
|
|
|
// If this is a root CA, we can skip this as we're already there. Otherwise we
|
|
|
// recurse up the tree until we hit a root CA.
|
|
|
if ((localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) {
|
|
|
- if (!m_verifyCertificateChain_l_certs(&cert, now))
|
|
|
+ if (!m_verifyCertificateChain(&cert, now))
|
|
|
return ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
|
|
|
}
|
|
|
|
|
@@ -430,69 +432,44 @@ SharedPtr< Peer > Topology::m_peerFromCached(void *tPtr, const Address &zta)
|
|
|
return p;
|
|
|
}
|
|
|
|
|
|
-void Topology::m_updateRootPeers_l_roots_certs(void *tPtr)
|
|
|
+SharedPtr< Path > Topology::m_newPath(const int64_t l, const InetAddress &r, const UniqueID &k)
|
|
|
{
|
|
|
- // assumes m_roots_l and m_certs_l are locked for write
|
|
|
-
|
|
|
- // Clear m_roots but preserve locally added roots (indicated by a null cert ptr entry).
|
|
|
- for (Map< Identity, Set< SharedPtr< const Certificate > > >::iterator r(m_roots.begin()); r != m_roots.end();) {
|
|
|
- if (r->second.find(s_nullCert) == r->second.end()) {
|
|
|
- m_roots.erase(r++);
|
|
|
- } else {
|
|
|
- r->second.clear();
|
|
|
- r->second.insert(s_nullCert);
|
|
|
- ++r;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // Populate m_roots from certificate subject identities from certificates flagged
|
|
|
- // as local root set certificates.
|
|
|
- for (SortedMap< Vector< uint8_t >, std::pair< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certsBySubjectUniqueId.begin()); c != m_certsBySubjectUniqueId.end(); ++c) {
|
|
|
- if ((c->second.second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ZEROTIER_ROOT_SET) != 0) {
|
|
|
- for (unsigned int i = 0; i < c->second.first->subject.identityCount; ++i)
|
|
|
- m_roots[*reinterpret_cast<const Identity *>(c->second.first->subject.identities[i].identity)].insert(c->second.first);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // Create a new rootPeers vector and swap.
|
|
|
- Vector< SharedPtr< Peer >> newRootPeers;
|
|
|
- newRootPeers.reserve(m_roots.size());
|
|
|
- for (Map< Identity, Set< SharedPtr< const Certificate > > >::iterator r(m_roots.begin()); r != m_roots.end();) {
|
|
|
- const SharedPtr< Peer > p(this->peer(tPtr, r->first.address(), true));
|
|
|
- if ((p) && (p->identity() == r->first))
|
|
|
- newRootPeers.push_back(p);
|
|
|
- }
|
|
|
- std::sort(newRootPeers.begin(), newRootPeers.end(), p_RootRankingComparisonOperator());
|
|
|
- m_rootPeers.swap(newRootPeers);
|
|
|
+ SharedPtr< Path > p(new Path(l, r));
|
|
|
+ RWMutex::Lock lck(m_paths_l);
|
|
|
+ SharedPtr< Path > &p2 = m_paths[k];
|
|
|
+ if (p2)
|
|
|
+ return p2;
|
|
|
+ p2 = p;
|
|
|
+ return p;
|
|
|
}
|
|
|
|
|
|
-void Topology::m_writeTrustStore_l_roots_certs(void *tPtr) const
|
|
|
+void Topology::m_updateRootPeers(void *tPtr, const int64_t now)
|
|
|
{
|
|
|
- // assumes m_roots_l and m_certs_l are locked for write
|
|
|
-
|
|
|
- char tmp[32];
|
|
|
- Dictionary d;
|
|
|
-
|
|
|
- d.add("v", (uint64_t)0); // version
|
|
|
+ // assumes m_certs_l, m_peers_l, and m_roots_l are locked for write
|
|
|
|
|
|
- unsigned long idx = 0;
|
|
|
- d.add("c$", (uint64_t)m_certs.size());
|
|
|
- for (Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
|
|
|
- d[Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.s", idx)].assign(c->first.data, c->first.data + ZT_SHA384_DIGEST_SIZE);
|
|
|
- d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx), (uint64_t)c->second.second);
|
|
|
- ++idx;
|
|
|
+ Set< Identity > rootIdentities;
|
|
|
+ for (Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certsBySubjectIdentity.begin()); c != m_certsBySubjectIdentity.end(); ++c) {
|
|
|
+ for (Map< SharedPtr< const Certificate >, unsigned int >::const_iterator cc(c->second.begin()); cc != c->second.end(); ++cc) {
|
|
|
+ if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ZEROTIER_ROOT_SET) != 0) {
|
|
|
+ for (unsigned int i = 0; i < cc->first->subject.identityCount; ++i) {
|
|
|
+ if (cc->first->subject.identities[i].identity)
|
|
|
+ rootIdentities.insert(*reinterpret_cast<const Identity *>(cc->first->subject.identities[i].identity));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- unsigned long localRootCount = 0;
|
|
|
- for (Map< Identity, Set< SharedPtr< const Certificate > > >::const_iterator r(m_roots.begin()); r != m_roots.end();) {
|
|
|
- if (r->second.find(s_nullCert) != r->second.end())
|
|
|
- d.addO(Dictionary::arraySubscript(tmp, sizeof(tmp), "lr$.i", localRootCount++), r->first);
|
|
|
+ m_roots.clear();
|
|
|
+ for (Set< Identity >::const_iterator i(rootIdentities.begin()); i != rootIdentities.end(); ++i) {
|
|
|
+ SharedPtr< Peer > &p = m_peers[i->address()];
|
|
|
+ if ((!p) || (p->identity() != *i)) {
|
|
|
+ p.set(new Peer(RR));
|
|
|
+ p->init(*i);
|
|
|
+ }
|
|
|
+ m_roots.push_back(p);
|
|
|
}
|
|
|
- d.add("lr$", (uint64_t)localRootCount);
|
|
|
|
|
|
- Vector< uint8_t > trustStore;
|
|
|
- d.encode(trustStore);
|
|
|
- RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256, trustStore.data(), (unsigned int)trustStore.size());
|
|
|
+ m_rankRoots(now);
|
|
|
}
|
|
|
|
|
|
} // namespace ZeroTier
|