Topology.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2025-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Topology.hpp"
  14. #include "Defaults.hpp"
  15. namespace ZeroTier {
  16. Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now) :
  17. RR(renv),
  18. m_lastRankedRoots(0)
  19. {
  20. char tmp[32];
  21. Dictionary d;
  22. Vector< uint8_t > trustData(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256));
  23. if (trustData.empty() || (!d.decode(trustData.data(), (unsigned int)trustData.size()))) {
  24. if (!d.decode(Defaults::CERTIFICATES, Defaults::CERTIFICATES_BYTES))
  25. d.clear();
  26. }
  27. if (!d.empty()) {
  28. const unsigned long certCount = (unsigned long)d.getUI("c$");
  29. for (unsigned long idx = 0; idx < certCount; ++idx) {
  30. uint64_t id[6];
  31. const Vector< uint8_t > &serialNo = d[Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.s", idx)];
  32. if (serialNo.size() == ZT_SHA384_DIGEST_SIZE) {
  33. Utils::copy< 48 >(id, serialNo.data());
  34. Certificate cert;
  35. Vector< uint8_t > enc(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_CERT, id));
  36. if (cert.decode(enc.data(), (unsigned int)enc.size()))
  37. addCertificate(tPtr, cert, now, (unsigned int)d.getUI(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx)), false, false, false);
  38. }
  39. }
  40. m_cleanCertificates(tPtr, now);
  41. m_updateRootPeers(tPtr, now);
  42. }
  43. }
  44. SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
  45. {
  46. RWMutex::Lock _l(m_peers_l);
  47. SharedPtr< Peer > &hp = m_peers[peer->address()];
  48. if (hp)
  49. return hp;
  50. m_loadCached(tPtr, peer->address(), hp);
  51. if (hp)
  52. return hp;
  53. hp = peer;
  54. return peer;
  55. }
  56. void Topology::allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
  57. {
  58. allPeers.clear();
  59. {
  60. RWMutex::RLock l(m_peers_l);
  61. allPeers.reserve(m_peers.size());
  62. for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  63. allPeers.push_back(i->second);
  64. }
  65. {
  66. RWMutex::RLock l(m_roots_l);
  67. rootPeers = m_roots;
  68. }
  69. }
  70. void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
  71. {
  72. // Clean any expired certificates, updating roots if they have changed.
  73. {
  74. Mutex::Lock l1(m_certs_l);
  75. if (m_cleanCertificates(tPtr, now)) {
  76. m_writeTrustStore(tPtr);
  77. {
  78. RWMutex::Lock l3(m_peers_l);
  79. RWMutex::Lock l2(m_roots_l);
  80. m_updateRootPeers(tPtr, now);
  81. }
  82. }
  83. }
  84. // Cleaning of peers and paths uses a two pass method to avoid write locking
  85. // m_peers or m_paths for any significant amount of time. This avoids pauses
  86. // on nodes with large numbers of peers or paths.
  87. // Delete peers that are stale or offline and are not roots. First pass: grab
  88. // peers to delete in read lock mode. Second pass: delete peers one by one,
  89. // acquiring hard write lock each time to avoid pauses.
  90. {
  91. Vector< uintptr_t > rootLookup;
  92. {
  93. RWMutex::RLock l2(m_roots_l);
  94. rootLookup.reserve(m_roots.size());
  95. for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
  96. rootLookup.push_back((uintptr_t)r->ptr());
  97. }
  98. Vector< Address > toDelete;
  99. {
  100. RWMutex::RLock l1(m_peers_l);
  101. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
  102. // TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
  103. // a network frame or non-trivial control packet.
  104. if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (std::find(rootLookup.begin(), rootLookup.end(), (uintptr_t)(i->second.ptr())) == rootLookup.end()))
  105. toDelete.push_back(i->first);
  106. }
  107. }
  108. if (!toDelete.empty()) {
  109. ZT_SPEW("garbage collecting %u offline or stale peer objects", (unsigned int)toDelete.size());
  110. for (Vector< Address >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
  111. SharedPtr< Peer > toSave;
  112. {
  113. RWMutex::Lock l1(m_peers_l);
  114. const Map< Address, SharedPtr< Peer > >::iterator p(m_peers.find(*i));
  115. if (p != m_peers.end()) {
  116. p->second.swap(toSave);
  117. m_peers.erase(p);
  118. }
  119. }
  120. if (toSave)
  121. toSave->save(tPtr);
  122. }
  123. }
  124. }
  125. // Delete paths that are no longer held by anyone else ("weak reference" type behavior).
  126. // First pass: make a list of paths with a reference count of 1 meaning they are likely
  127. // orphaned. Second pass: call weakGC() on each of these which does a hard compare/exchange
  128. // and delete those that actually are GC'd. Write lock is aquired only briefly on delete
  129. // just as with peers.
  130. {
  131. Vector< UniqueID > possibleDelete;
  132. {
  133. RWMutex::RLock l1(m_paths_l);
  134. for (Map< UniqueID, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end(); ++i) {
  135. if (i->second.references() <= 1)
  136. possibleDelete.push_back(i->first);
  137. }
  138. }
  139. if (!possibleDelete.empty()) {
  140. ZT_SPEW("garbage collecting (likely) %u orphaned paths", (unsigned int)possibleDelete.size());
  141. for (Vector< UniqueID >::const_iterator i(possibleDelete.begin()); i != possibleDelete.end(); ++i) {
  142. RWMutex::Lock l1(m_paths_l);
  143. Map< UniqueID, SharedPtr< Path > >::iterator p(m_paths.find(*i));
  144. if ((p != m_paths.end()) && p->second.weakGC())
  145. m_paths.erase(p);
  146. }
  147. }
  148. }
  149. }
  150. void Topology::saveAll(void *tPtr)
  151. {
  152. {
  153. RWMutex::RLock l(m_peers_l);
  154. for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
  155. i->second->save(tPtr);
  156. }
  157. {
  158. Mutex::Lock l(m_certs_l);
  159. m_writeTrustStore(tPtr);
  160. }
  161. }
  162. ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert, const int64_t now, const unsigned int localTrust, const bool writeToLocalStore, const bool refreshRootSets, const bool verify)
  163. {
  164. {
  165. const SHA384Hash serial(cert.serialNo);
  166. p_CertEntry certEntry;
  167. Mutex::Lock l1(m_certs_l);
  168. {
  169. Map< SHA384Hash, p_CertEntry >::iterator c(m_certs.find(serial));
  170. if (c != m_certs.end()) {
  171. if (c->second.localTrust == localTrust)
  172. return ZT_CERTIFICATE_ERROR_NONE;
  173. certEntry.certificate = c->second.certificate;
  174. }
  175. }
  176. if (!certEntry.certificate) {
  177. certEntry.certificate.set(new Certificate(cert));
  178. if (verify) {
  179. m_cleanCertificates(tPtr, now);
  180. const ZT_CertificateError err = m_verifyCertificate(cert, now, localTrust, false);
  181. if (err != ZT_CERTIFICATE_ERROR_NONE)
  182. return err;
  183. }
  184. }
  185. certEntry.localTrust = localTrust;
  186. if ((cert.subject.uniqueId) && (cert.subject.uniqueIdSize > 0)) {
  187. SHA384Hash uniqueIdHash;
  188. SHA384(uniqueIdHash.data, cert.subject.uniqueId, cert.subject.uniqueIdSize);
  189. p_CertEntry &bySubjectUniqueId = m_certsBySubjectUniqueID[uniqueIdHash];
  190. if (bySubjectUniqueId.certificate) {
  191. if (bySubjectUniqueId.certificate->subject.timestamp >= cert.subject.timestamp)
  192. return ZT_CERTIFICATE_ERROR_HAVE_NEWER_CERT;
  193. m_eraseCertificate(tPtr, bySubjectUniqueId.certificate, &uniqueIdHash);
  194. m_certsBySubjectUniqueID[uniqueIdHash] = certEntry;
  195. } else {
  196. bySubjectUniqueId = certEntry;
  197. }
  198. }
  199. for (unsigned int i = 0; i < cert.subject.identityCount; ++i) {
  200. const Identity *const ii = reinterpret_cast<const Identity *>(cert.subject.identities[i].identity);
  201. if (ii)
  202. m_certsBySubjectIdentity[ii->fingerprint()][certEntry.certificate] = localTrust;
  203. }
  204. m_certs[serial] = certEntry;
  205. if (refreshRootSets) {
  206. RWMutex::Lock l3(m_peers_l);
  207. RWMutex::Lock l2(m_roots_l);
  208. m_updateRootPeers(tPtr, now);
  209. }
  210. if (writeToLocalStore)
  211. m_writeTrustStore(tPtr);
  212. }
  213. if (writeToLocalStore) {
  214. Vector< uint8_t > certData(cert.encode());
  215. uint64_t id[6];
  216. Utils::copy< 48 >(id, cert.serialNo);
  217. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_CERT, id, certData.data(), (unsigned int)certData.size());
  218. }
  219. return ZT_CERTIFICATE_ERROR_NONE;
  220. }
  221. unsigned int Topology::deleteCertificate(void *tPtr,const uint8_t serialNo[ZT_SHA384_DIGEST_SIZE])
  222. {
  223. Mutex::Lock l(m_certs_l);
  224. const unsigned long origCertCount = (unsigned long)m_certs.size();
  225. Map< SHA384Hash, p_CertEntry >::const_iterator c(m_certs.find(SHA384Hash(serialNo)));
  226. if (c != m_certs.end()) {
  227. if ((c->second.certificate->subject.uniqueId) && (c->second.certificate->subject.uniqueIdSize > 0)) {
  228. SHA384Hash uniqueIdHash;
  229. SHA384(uniqueIdHash.data, c->second.certificate->subject.uniqueId, c->second.certificate->subject.uniqueIdSize);
  230. m_eraseCertificate(tPtr, c->second.certificate, &uniqueIdHash);
  231. } else {
  232. m_eraseCertificate(tPtr, c->second.certificate, nullptr);
  233. }
  234. const int64_t now = RR->node->now();
  235. m_cleanCertificates(tPtr, now);
  236. m_writeTrustStore(tPtr);
  237. {
  238. RWMutex::Lock l3(m_peers_l);
  239. RWMutex::Lock l2(m_roots_l);
  240. m_updateRootPeers(tPtr, now);
  241. }
  242. }
  243. return (unsigned int)(origCertCount - (unsigned long)m_certs.size());
  244. }
  245. void Topology::allCerts(Vector< SharedPtr<const Certificate> > &c,Vector< unsigned int > &t) const noexcept
  246. {
  247. Mutex::Lock l(m_certs_l);
  248. const unsigned long cs = (unsigned long)m_certs.size();
  249. c.reserve(cs);
  250. t.reserve(cs);
  251. for(Map< SHA384Hash, p_CertEntry >::const_iterator i(m_certs.begin());i!=m_certs.end();++i) {
  252. c.push_back(i->second.certificate);
  253. t.push_back(i->second.localTrust);
  254. }
  255. }
  256. struct p_RootRankingComparisonOperator
  257. {
  258. ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
  259. {
  260. // Sort roots first in order of which root has spoken most recently, but
  261. // only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
  262. // means that living roots that seem responsive are ranked the same. Then
  263. // they're sorted in descending order of latency so that the apparently
  264. // fastest root is ranked first.
  265. const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
  266. const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
  267. if (alr < blr) {
  268. return true;
  269. } else if (blr == alr) {
  270. const int bb = b->latency();
  271. if (bb < 0)
  272. return true;
  273. return bb < a->latency();
  274. }
  275. return false;
  276. }
  277. };
  278. void Topology::m_rankRoots(const int64_t now)
  279. {
  280. // assumes m_roots is locked
  281. m_lastRankedRoots = now;
  282. std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
  283. }
  284. void Topology::m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash)
  285. {
  286. // assumes m_certs is locked for writing
  287. const SHA384Hash serialNo(cert->serialNo);
  288. m_certs.erase(serialNo);
  289. if (uniqueIdHash)
  290. m_certsBySubjectUniqueID.erase(*uniqueIdHash);
  291. for (unsigned int i = 0; i < cert->subject.identityCount; ++i) {
  292. const Identity *const ii = reinterpret_cast<const Identity *>(cert->subject.identities[i].identity);
  293. Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::iterator bySubjectIdentity(m_certsBySubjectIdentity.find(ii->fingerprint()));
  294. if (bySubjectIdentity != m_certsBySubjectIdentity.end()) {
  295. bySubjectIdentity->second.erase(cert);
  296. if (bySubjectIdentity->second.empty())
  297. m_certsBySubjectIdentity.erase(bySubjectIdentity);
  298. }
  299. }
  300. RR->node->stateObjectDelete(tPtr, ZT_STATE_OBJECT_CERT, serialNo.data);
  301. }
  302. bool Topology::m_cleanCertificates(void *tPtr, int64_t now)
  303. {
  304. // assumes m_certs is locked for writing
  305. bool deleted = false;
  306. Vector< SharedPtr< const Certificate >> toDelete;
  307. for (;;) {
  308. for (Map< SHA384Hash, p_CertEntry >::iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
  309. // Verify, but the last boolean option tells it to skip signature checks as this would
  310. // already have been done. This will therefore just check the path and validity times
  311. // of the certificate.
  312. const ZT_CertificateError err = m_verifyCertificate(*(c->second.certificate), now, c->second.localTrust, true);
  313. if (err != ZT_CERTIFICATE_ERROR_NONE)
  314. toDelete.push_back(c->second.certificate);
  315. }
  316. if (toDelete.empty())
  317. break;
  318. deleted = true;
  319. SHA384Hash uniqueIdHash;
  320. for (Vector< SharedPtr< const Certificate > >::iterator c(toDelete.begin()); c != toDelete.end(); ++c) {
  321. if ((*c)->subject.uniqueId) {
  322. SHA384(uniqueIdHash.data, (*c)->subject.uniqueId, (*c)->subject.uniqueIdSize);
  323. m_eraseCertificate(tPtr, *c, &uniqueIdHash);
  324. } else {
  325. m_eraseCertificate(tPtr, *c, nullptr);
  326. }
  327. }
  328. toDelete.clear();
  329. }
  330. return deleted;
  331. }
  332. bool Topology::m_verifyCertificateChain(const Certificate *current, const int64_t now) const
  333. {
  334. // assumes m_certs is at least locked for reading
  335. Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certsBySubjectIdentity.find(reinterpret_cast<const Identity *>(current->issuer)->fingerprint()));
  336. if (c != m_certsBySubjectIdentity.end()) {
  337. for (Map< SharedPtr< const Certificate >, unsigned int >::const_iterator cc(c->second.begin()); cc != c->second.end(); ++cc) {
  338. if (
  339. (cc->first->maxPathLength > current->maxPathLength) &&
  340. (cc->first->validity[0] <= now) && // not before now
  341. (cc->first->validity[1] >= now) && // not after now
  342. (cc->first->validity[0] <= current->timestamp) && // not before child cert's timestamp
  343. (cc->first->validity[1] >= current->timestamp) // not after child cert's timestamp
  344. ) {
  345. if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) != 0)
  346. return true;
  347. if (m_verifyCertificateChain(cc->first.ptr(), now))
  348. return true;
  349. }
  350. }
  351. }
  352. return false;
  353. }
  354. ZT_CertificateError Topology::m_verifyCertificate(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const
  355. {
  356. // assumes m_certs is at least locked for reading
  357. // Check certificate time window against current time.
  358. if ((cert.validity[0] > now) || (cert.validity[1] < now))
  359. return ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW;
  360. // Verify primary and internal signatures and other objects unless the caller
  361. // elected to skip, which is done to re-check certs already in the DB.
  362. if (!skipSignatureCheck) {
  363. const ZT_CertificateError err = cert.verify();
  364. if (err != ZT_CERTIFICATE_ERROR_NONE)
  365. return err;
  366. }
  367. // If this is a root CA, we can skip this as we're already there. Otherwise we
  368. // recurse up the tree until we hit a root CA.
  369. if ((localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) {
  370. if (!m_verifyCertificateChain(&cert, now))
  371. return ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
  372. }
  373. return ZT_CERTIFICATE_ERROR_NONE;
  374. }
  375. void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer)
  376. {
  377. // does not require any locks to be held
  378. try {
  379. uint64_t id[2];
  380. id[0] = zta.toInt();
  381. id[1] = 0;
  382. Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id));
  383. if (data.size() > 8) {
  384. const uint8_t *d = data.data();
  385. int dl = (int)data.size();
  386. const int64_t ts = (int64_t)Utils::loadBigEndian< uint64_t >(d);
  387. Peer *const p = new Peer(RR);
  388. int n = p->unmarshal(d + 8, dl - 8);
  389. if (n < 0) {
  390. delete p;
  391. return;
  392. }
  393. if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
  394. // TODO: handle many peers, same address (?)
  395. peer.set(p);
  396. return;
  397. }
  398. }
  399. } catch (...) {
  400. peer.zero();
  401. }
  402. }
  403. SharedPtr< Peer > Topology::m_peerFromCached(void *tPtr, const Address &zta)
  404. {
  405. SharedPtr< Peer > p;
  406. m_loadCached(tPtr, zta, p);
  407. if (p) {
  408. RWMutex::Lock l(m_peers_l);
  409. SharedPtr< Peer > &hp = m_peers[zta];
  410. if (hp)
  411. return hp;
  412. hp = p;
  413. }
  414. return p;
  415. }
  416. SharedPtr< Path > Topology::m_newPath(const int64_t l, const InetAddress &r, const UniqueID &k)
  417. {
  418. SharedPtr< Path > p(new Path(l, r));
  419. RWMutex::Lock lck(m_paths_l);
  420. SharedPtr< Path > &p2 = m_paths[k];
  421. if (p2)
  422. return p2;
  423. p2 = p;
  424. return p;
  425. }
  426. void Topology::m_updateRootPeers(void *tPtr, const int64_t now)
  427. {
  428. // assumes m_certs_l, m_peers_l, and m_roots_l are locked for write
  429. Set< Identity > rootIdentities;
  430. for (Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certsBySubjectIdentity.begin()); c != m_certsBySubjectIdentity.end(); ++c) {
  431. for (Map< SharedPtr< const Certificate >, unsigned int >::const_iterator cc(c->second.begin()); cc != c->second.end(); ++cc) {
  432. if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ZEROTIER_ROOT_SET) != 0) {
  433. for (unsigned int i = 0; i < cc->first->subject.identityCount; ++i) {
  434. if (cc->first->subject.identities[i].identity)
  435. rootIdentities.insert(*reinterpret_cast<const Identity *>(cc->first->subject.identities[i].identity));
  436. }
  437. }
  438. }
  439. }
  440. m_roots.clear();
  441. for (Set< Identity >::const_iterator i(rootIdentities.begin()); i != rootIdentities.end(); ++i) {
  442. SharedPtr< Peer > &p = m_peers[i->address()];
  443. if ((!p) || (p->identity() != *i)) {
  444. p.set(new Peer(RR));
  445. p->init(*i);
  446. }
  447. m_roots.push_back(p);
  448. }
  449. m_rankRoots(now);
  450. }
  451. void Topology::m_writeTrustStore(void *tPtr)
  452. {
  453. // assumes m_certs is locked
  454. char tmp[32];
  455. Dictionary d;
  456. unsigned long idx = 0;
  457. d.add("c$", (uint64_t)m_certs.size());
  458. for (Map< SHA384Hash, p_CertEntry >::const_iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
  459. d[Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.s", idx)].assign(c->first.data, c->first.data + ZT_SHA384_DIGEST_SIZE);
  460. d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx), (uint64_t)c->second.localTrust);
  461. ++idx;
  462. }
  463. Vector< uint8_t > trustStore;
  464. d.encode(trustStore);
  465. RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256, trustStore.data(), (unsigned int)trustStore.size());
  466. }
  467. } // namespace ZeroTier