Node.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Constants.hpp"
  14. #include "SharedPtr.hpp"
  15. #include "Node.hpp"
  16. #include "NetworkController.hpp"
  17. #include "Topology.hpp"
  18. #include "Address.hpp"
  19. #include "Identity.hpp"
  20. #include "SelfAwareness.hpp"
  21. #include "Network.hpp"
  22. #include "Trace.hpp"
  23. #include "Locator.hpp"
  24. #include "Expect.hpp"
  25. #include "VL1.hpp"
  26. #include "VL2.hpp"
  27. #include "Buf.hpp"
  28. namespace ZeroTier {
  29. namespace {
  30. // Structure containing all the core objects for a ZeroTier node to reduce memory allocations.
  31. struct _NodeObjects
  32. {
  33. ZT_INLINE _NodeObjects(RuntimeEnvironment *const RR, void *const tPtr) :
  34. t(RR),
  35. expect(),
  36. vl2(RR),
  37. vl1(RR),
  38. sa(RR),
  39. topology(RR, tPtr)
  40. {
  41. RR->t = &t;
  42. RR->expect = &expect;
  43. RR->vl2 = &vl2;
  44. RR->vl1 = &vl1;
  45. RR->sa = &sa;
  46. RR->topology = &topology;
  47. }
  48. Trace t;
  49. Expect expect;
  50. VL2 vl2;
  51. VL1 vl1;
  52. SelfAwareness sa;
  53. Topology topology;
  54. };
  55. struct _sortPeerPtrsByAddress
  56. {
  57. ZT_INLINE bool operator()(const SharedPtr<Peer> &a, const SharedPtr<Peer> &b) const
  58. { return (a->address() < b->address()); }
  59. };
  60. } // anonymous namespace
  61. Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, int64_t now) :
  62. m_RR(this),
  63. RR(&m_RR),
  64. m_objects(nullptr),
  65. m_cb(*callbacks),
  66. m_uPtr(uPtr),
  67. m_networks(),
  68. m_lastPeerPulse(0),
  69. m_lastHousekeepingRun(0),
  70. m_lastNetworkHousekeepingRun(0),
  71. m_now(now),
  72. m_online(false)
  73. {
  74. // Load this node's identity.
  75. uint64_t idtmp[2];
  76. idtmp[0] = 0;
  77. idtmp[1] = 0;
  78. Vector<uint8_t> data(stateObjectGet(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, idtmp));
  79. bool haveIdentity = false;
  80. if (!data.empty()) {
  81. data.push_back(0); // zero-terminate string
  82. if (RR->identity.fromString((const char *) data.data())) {
  83. RR->identity.toString(false, RR->publicIdentityStr);
  84. RR->identity.toString(true, RR->secretIdentityStr);
  85. haveIdentity = true;
  86. }
  87. }
  88. // Generate a new identity if we don't have one.
  89. if (!haveIdentity) {
  90. RR->identity.generate(Identity::C25519);
  91. RR->identity.toString(false, RR->publicIdentityStr);
  92. RR->identity.toString(true, RR->secretIdentityStr);
  93. idtmp[0] = RR->identity.address();
  94. idtmp[1] = 0;
  95. stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, idtmp, RR->secretIdentityStr, (unsigned int) strlen(RR->secretIdentityStr));
  96. stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int) strlen(RR->publicIdentityStr));
  97. } else {
  98. idtmp[0] = RR->identity.address();
  99. idtmp[1] = 0;
  100. data = stateObjectGet(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp);
  101. if ((data.empty()) || (memcmp(data.data(), RR->publicIdentityStr, strlen(RR->publicIdentityStr)) != 0))
  102. stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int) strlen(RR->publicIdentityStr));
  103. }
  104. // 2X hash our identity private key(s) to obtain a symmetric key for encrypting
  105. // locally cached data at rest (as a defense in depth measure). This is not used
  106. // for any network level encryption or authentication.
  107. uint8_t tmph[ZT_SHA384_DIGEST_SIZE];
  108. RR->identity.hashWithPrivate(tmph);
  109. SHA384(tmph, tmph, ZT_SHA384_DIGEST_SIZE);
  110. RR->localCacheSymmetric.init(tmph);
  111. Utils::burn(tmph, ZT_SHA384_DIGEST_SIZE);
  112. // Generate a random sort order for privileged ports for use in NAT-t algorithms.
  113. for(unsigned int i=0;i<1023;++i)
  114. RR->randomPrivilegedPortOrder[i] = (uint16_t)(i + 1);
  115. for(unsigned int i=0;i<512;++i) {
  116. uint64_t rn = Utils::random();
  117. const unsigned int a = (unsigned int)rn % 1023;
  118. const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
  119. if (a != b) {
  120. const uint16_t tmp = RR->randomPrivilegedPortOrder[a];
  121. RR->randomPrivilegedPortOrder[a] = RR->randomPrivilegedPortOrder[b];
  122. RR->randomPrivilegedPortOrder[b] = tmp;
  123. }
  124. }
  125. // This constructs all the components of the ZeroTier core within a single contiguous memory container,
  126. // which reduces memory fragmentation and may improve cache locality.
  127. m_objects = new _NodeObjects(RR, tPtr);
  128. postEvent(tPtr, ZT_EVENT_UP);
  129. }
  130. Node::~Node()
  131. {
  132. m_networks_l.lock();
  133. m_networks_l.unlock();
  134. m_networks.clear();
  135. m_networks_l.lock();
  136. m_networks_l.unlock();
  137. if (m_objects)
  138. delete (_NodeObjects *) m_objects;
  139. // Let go of cached Buf objects. If other nodes happen to be running in this
  140. // same process space new Bufs will be allocated as needed, but this is almost
  141. // never the case. Calling this here saves RAM if we are running inside something
  142. // that wants to keep running after tearing down its ZeroTier core instance.
  143. Buf::freePool();
  144. }
  145. void Node::shutdown(void *tPtr)
  146. {
  147. postEvent(tPtr, ZT_EVENT_DOWN);
  148. if (RR->topology)
  149. RR->topology->saveAll(tPtr);
  150. }
  151. ZT_ResultCode Node::processWirePacket(
  152. void *tPtr,
  153. int64_t now,
  154. int64_t localSocket,
  155. const struct sockaddr_storage *remoteAddress,
  156. SharedPtr<Buf> &packetData,
  157. unsigned int packetLength,
  158. volatile int64_t *nextBackgroundTaskDeadline)
  159. {
  160. m_now = now;
  161. RR->vl1->onRemotePacket(tPtr, localSocket, (remoteAddress) ? InetAddress::NIL : *asInetAddress(remoteAddress), packetData, packetLength);
  162. return ZT_RESULT_OK;
  163. }
  164. ZT_ResultCode Node::processVirtualNetworkFrame(
  165. void *tPtr,
  166. int64_t now,
  167. uint64_t nwid,
  168. uint64_t sourceMac,
  169. uint64_t destMac,
  170. unsigned int etherType,
  171. unsigned int vlanId,
  172. SharedPtr<Buf> &frameData,
  173. unsigned int frameLength,
  174. volatile int64_t *nextBackgroundTaskDeadline)
  175. {
  176. m_now = now;
  177. SharedPtr<Network> nw(this->network(nwid));
  178. if (nw) {
  179. RR->vl2->onLocalEthernet(tPtr, nw, MAC(sourceMac), MAC(destMac), etherType, vlanId, frameData, frameLength);
  180. return ZT_RESULT_OK;
  181. } else {
  182. return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  183. }
  184. }
  185. struct _processBackgroundTasks_eachPeer
  186. {
  187. const int64_t now;
  188. void *const tPtr;
  189. bool online;
  190. ZT_INLINE _processBackgroundTasks_eachPeer(const int64_t now_, void *const tPtr_) noexcept :
  191. now(now_),
  192. tPtr(tPtr_),
  193. online(false)
  194. {}
  195. ZT_INLINE void operator()(const SharedPtr<Peer> &peer, const bool isRoot) noexcept
  196. {
  197. peer->pulse(tPtr, now, isRoot);
  198. this->online |= (isRoot && peer->directlyConnected(now));
  199. }
  200. };
  201. ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int64_t *nextBackgroundTaskDeadline)
  202. {
  203. m_now = now;
  204. Mutex::Lock bl(m_backgroundTasksLock);
  205. try {
  206. // Call peer pulse() method of all peers every ZT_PEER_PULSE_INTERVAL.
  207. if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
  208. m_lastPeerPulse = now;
  209. try {
  210. _processBackgroundTasks_eachPeer pf(now, tPtr);
  211. RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf);
  212. if (m_online.exchange(pf.online) != pf.online)
  213. postEvent(tPtr, pf.online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
  214. RR->topology->rankRoots();
  215. } catch (...) {
  216. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  217. }
  218. }
  219. // Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
  220. if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
  221. m_lastHousekeepingRun = now;
  222. RWMutex::RLock l(m_networks_l);
  223. for (Map<uint64_t, SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) {
  224. i->second->doPeriodicTasks(tPtr, now);
  225. }
  226. }
  227. // Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
  228. if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
  229. m_lastHousekeepingRun = now;
  230. // Clean up any old local controller auth memoizations. This is an
  231. // optimization for network controllers to know whether to accept
  232. // or trust nodes without doing an extra cert check.
  233. m_localControllerAuthorizations_l.lock();
  234. for (Map<p_LocalControllerAuth, int64_t>::iterator i(m_localControllerAuthorizations.begin());i != m_localControllerAuthorizations.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
  235. if ((i->second - now) > (ZT_NETWORK_AUTOCONF_DELAY * 3))
  236. m_localControllerAuthorizations.erase(i++);
  237. else ++i;
  238. }
  239. m_localControllerAuthorizations_l.unlock();
  240. RR->topology->doPeriodicTasks(tPtr, now);
  241. RR->sa->clean(now);
  242. }
  243. *nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL;
  244. } catch (...) {
  245. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  246. }
  247. return ZT_RESULT_OK;
  248. }
  249. ZT_ResultCode Node::join(uint64_t nwid, const ZT_Fingerprint *controllerFingerprint, void *uptr, void *tptr)
  250. {
  251. Fingerprint fp;
  252. if (controllerFingerprint)
  253. fp = *controllerFingerprint;
  254. RWMutex::Lock l(m_networks_l);
  255. SharedPtr<Network> &nw = m_networks[nwid];
  256. if (nw)
  257. return ZT_RESULT_OK;
  258. nw.set(new Network(RR, tptr, nwid, fp, uptr, nullptr));
  259. return ZT_RESULT_OK;
  260. }
  261. ZT_ResultCode Node::leave(uint64_t nwid, void **uptr, void *tptr)
  262. {
  263. ZT_VirtualNetworkConfig ctmp;
  264. m_networks_l.lock();
  265. Map<uint64_t, SharedPtr<Network> >::iterator nwi(m_networks.find(nwid)); // NOLINT(hicpp-use-auto,modernize-use-auto)
  266. if (nwi == m_networks.end()) {
  267. m_networks_l.unlock();
  268. return ZT_RESULT_OK;
  269. }
  270. SharedPtr<Network> nw(nwi->second);
  271. m_networks.erase(nwi);
  272. m_networks_l.unlock();
  273. if (uptr)
  274. *uptr = *nw->userPtr();
  275. nw->externalConfig(&ctmp);
  276. RR->node->configureVirtualNetworkPort(tptr, nwid, uptr, ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY, &ctmp);
  277. nw->destroy();
  278. nw.zero();
  279. uint64_t tmp[2];
  280. tmp[0] = nwid;
  281. tmp[1] = 0;
  282. RR->node->stateObjectDelete(tptr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp);
  283. return ZT_RESULT_OK;
  284. }
  285. ZT_ResultCode Node::multicastSubscribe(void *tPtr, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
  286. {
  287. const SharedPtr<Network> nw(this->network(nwid));
  288. if (nw) {
  289. nw->multicastSubscribe(tPtr, MulticastGroup(MAC(multicastGroup), (uint32_t) (multicastAdi & 0xffffffff)));
  290. return ZT_RESULT_OK;
  291. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  292. }
  293. ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
  294. {
  295. const SharedPtr<Network> nw(this->network(nwid));
  296. if (nw) {
  297. nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup), (uint32_t) (multicastAdi & 0xffffffff)));
  298. return ZT_RESULT_OK;
  299. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  300. }
  301. ZT_ResultCode Node::addRoot(void *tPtr, const ZT_Identity *id, const ZT_Locator *loc)
  302. {
  303. if ((!id)||(!loc))
  304. return ZT_RESULT_ERROR_BAD_PARAMETER;
  305. const SharedPtr<const Locator> locator(new Locator(*reinterpret_cast<const Locator *>(loc)));
  306. // SECURITY: locator credential validation happens in Topology.cpp in addRoot().
  307. return RR->topology->addRoot(tPtr, *reinterpret_cast<const Identity *>(id), locator) ? ZT_RESULT_OK : ZT_RESULT_ERROR_INVALID_CREDENTIAL;
  308. }
  309. ZT_ResultCode Node::removeRoot(void *tPtr, const uint64_t address)
  310. {
  311. RR->topology->removeRoot(tPtr, Address(address));
  312. return ZT_RESULT_OK;
  313. }
  314. uint64_t Node::address() const
  315. {
  316. return RR->identity.address().toInt();
  317. }
  318. void Node::status(ZT_NodeStatus *status) const
  319. {
  320. status->address = RR->identity.address().toInt();
  321. status->identity = reinterpret_cast<const ZT_Identity *>(&RR->identity);
  322. status->publicIdentity = RR->publicIdentityStr;
  323. status->secretIdentity = RR->secretIdentityStr;
  324. status->online = m_online ? 1 : 0;
  325. }
  326. ZT_PeerList *Node::peers() const
  327. {
  328. Vector<SharedPtr<Peer> > peers;
  329. RR->topology->getAllPeers(peers);
  330. std::sort(peers.begin(), peers.end(), _sortPeerPtrsByAddress());
  331. const unsigned int bufSize =
  332. sizeof(ZT_PeerList) +
  333. (sizeof(ZT_Peer) * peers.size()) +
  334. ((sizeof(ZT_Path) * ZT_MAX_PEER_NETWORK_PATHS) * peers.size()) +
  335. (sizeof(Identity) * peers.size()) +
  336. ((sizeof(ZT_Endpoint) * ZT_LOCATOR_MAX_ENDPOINTS) * peers.size());
  337. char *buf = (char *) malloc(bufSize);
  338. if (!buf)
  339. return nullptr;
  340. Utils::zero(buf, bufSize);
  341. ZT_PeerList *pl = reinterpret_cast<ZT_PeerList *>(buf);
  342. buf += sizeof(ZT_PeerList);
  343. pl->peers = reinterpret_cast<ZT_Peer *>(buf);
  344. buf += sizeof(ZT_Peer) * peers.size();
  345. ZT_Path *peerPath = reinterpret_cast<ZT_Path *>(buf);
  346. buf += (sizeof(ZT_Path) * ZT_MAX_PEER_NETWORK_PATHS) * peers.size();
  347. Identity *identities = reinterpret_cast<Identity *>(buf);
  348. buf += sizeof(Identity) * peers.size();
  349. ZT_Endpoint *locatorEndpoint = reinterpret_cast<ZT_Endpoint *>(buf);
  350. const int64_t now = m_now;
  351. pl->peerCount = 0;
  352. for (Vector<SharedPtr<Peer> >::iterator pi(peers.begin());pi != peers.end();++pi) {
  353. ZT_Peer *const p = pl->peers + pl->peerCount;
  354. p->address = (*pi)->address().toInt();
  355. identities[pl->peerCount] = (*pi)->identity(); // need to make a copy in case peer gets deleted
  356. p->identity = identities + pl->peerCount;
  357. p->fingerprint.address = p->address;
  358. Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(p->fingerprint.hash, (*pi)->identity().fingerprint().hash);
  359. if ((*pi)->remoteVersionKnown()) {
  360. p->versionMajor = (int) (*pi)->remoteVersionMajor();
  361. p->versionMinor = (int) (*pi)->remoteVersionMinor();
  362. p->versionRev = (int) (*pi)->remoteVersionRevision();
  363. } else {
  364. p->versionMajor = -1;
  365. p->versionMinor = -1;
  366. p->versionRev = -1;
  367. }
  368. p->latency = (*pi)->latency();
  369. p->root = RR->topology->isRoot((*pi)->identity()) ? 1 : 0;
  370. p->networkCount = 0;
  371. // TODO: enumerate network memberships
  372. Vector<SharedPtr<Path> > paths;
  373. (*pi)->getAllPaths(paths);
  374. p->pathCount = (unsigned int) paths.size();
  375. p->paths = peerPath;
  376. for (Vector<SharedPtr<Path> >::iterator path(paths.begin());path != paths.end();++path) {
  377. ZT_Path *const pp = peerPath++;
  378. pp->endpoint.type = ZT_ENDPOINT_TYPE_IP_UDP; // only type supported right now
  379. Utils::copy<sizeof(sockaddr_storage)>(&pp->endpoint.value.ss, &((*path)->address().as.ss));
  380. pp->lastSend = (*path)->lastOut();
  381. pp->lastReceive = (*path)->lastIn();
  382. pp->alive = (*path)->alive(now) ? 1 : 0;
  383. pp->preferred = (p->pathCount == 0) ? 1 : 0;
  384. }
  385. const SharedPtr<const Locator> loc((*pi)->locator());
  386. if (loc) {
  387. p->locatorTimestamp = loc->timestamp();
  388. p->locatorEndpointCount = (unsigned int)loc->endpoints().size();
  389. p->locatorEndpoints = locatorEndpoint;
  390. for(Vector<Endpoint>::const_iterator ep(loc->endpoints().begin());ep!=loc->endpoints().end();++ep)
  391. *(locatorEndpoint++) = *ep;
  392. }
  393. ++pl->peerCount;
  394. }
  395. return pl;
  396. }
  397. ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
  398. {
  399. SharedPtr<Network> nw(network(nwid));
  400. if (nw) {
  401. ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *) ::malloc(sizeof(ZT_VirtualNetworkConfig));
  402. nw->externalConfig(nc);
  403. return nc;
  404. }
  405. return nullptr;
  406. }
  407. ZT_VirtualNetworkList *Node::networks() const
  408. {
  409. RWMutex::RLock l(m_networks_l);
  410. char *const buf = (char *) ::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * m_networks.size()));
  411. if (!buf)
  412. return nullptr;
  413. ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *) buf; // NOLINT(modernize-use-auto,hicpp-use-auto)
  414. nl->networks = (ZT_VirtualNetworkConfig *) (buf + sizeof(ZT_VirtualNetworkList));
  415. nl->networkCount = 0;
  416. for (Map<uint64_t, SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) // NOLINT(modernize-use-auto,modernize-loop-convert,hicpp-use-auto)
  417. i->second->externalConfig(&(nl->networks[nl->networkCount++]));
  418. return nl;
  419. }
  420. void Node::setNetworkUserPtr(uint64_t nwid, void *ptr)
  421. {
  422. SharedPtr<Network> nw(network(nwid));
  423. if (nw)
  424. *(nw->userPtr()) = ptr;
  425. }
  426. void Node::setInterfaceAddresses(const ZT_InterfaceAddress *addrs, unsigned int addrCount)
  427. {
  428. Mutex::Lock _l(m_localInterfaceAddresses_m);
  429. m_localInterfaceAddresses.clear();
  430. for (unsigned int i = 0;i < addrCount;++i) {
  431. bool dupe = false;
  432. for (unsigned int j = 0;j < i;++j) {
  433. if (*(reinterpret_cast<const InetAddress *>(&addrs[j].address)) == *(reinterpret_cast<const InetAddress *>(&addrs[i].address))) {
  434. dupe = true;
  435. break;
  436. }
  437. }
  438. if (!dupe)
  439. m_localInterfaceAddresses.push_back(addrs[i]);
  440. }
  441. }
  442. int Node::sendUserMessage(void *tptr, uint64_t dest, uint64_t typeId, const void *data, unsigned int len)
  443. {
  444. try {
  445. if (RR->identity.address().toInt() != dest) {
  446. // TODO
  447. /*
  448. Packet outp(Address(dest),RR->identity.address(),Packet::VERB_USER_MESSAGE);
  449. outp.append(typeId);
  450. outp.append(data,len);
  451. outp.compress();
  452. RR->sw->send(tptr,outp,true);
  453. */
  454. return 1;
  455. }
  456. } catch (...) {}
  457. return 0;
  458. }
  459. void Node::setController(void *networkControllerInstance)
  460. {
  461. RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
  462. if (networkControllerInstance)
  463. RR->localNetworkController->init(RR->identity, this);
  464. }
  465. // Methods used only within the core ----------------------------------------------------------------------------------
  466. Vector<uint8_t> Node::stateObjectGet(void *const tPtr, ZT_StateObjectType type, const uint64_t id[2])
  467. {
  468. Vector<uint8_t> r;
  469. if (m_cb.stateGetFunction) {
  470. void *data = nullptr;
  471. void (*freeFunc)(void *) = nullptr;
  472. int l = m_cb.stateGetFunction(
  473. reinterpret_cast<ZT_Node *>(this),
  474. m_uPtr,
  475. tPtr,
  476. type,
  477. id,
  478. &data,
  479. &freeFunc);
  480. if ((l > 0) && (data) && (freeFunc)) {
  481. r.assign(reinterpret_cast<const uint8_t *>(data), reinterpret_cast<const uint8_t *>(data) + l);
  482. freeFunc(data);
  483. }
  484. }
  485. return r;
  486. }
  487. bool Node::shouldUsePathForZeroTierTraffic(void *tPtr, const Identity &id, const int64_t localSocket, const InetAddress &remoteAddress)
  488. {
  489. {
  490. RWMutex::RLock l(m_networks_l);
  491. for (Map<uint64_t, SharedPtr<Network> >::iterator i(m_networks.begin());i != m_networks.end();++i) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
  492. for (unsigned int k = 0, j = i->second->config().staticIpCount;k < j;++k) {
  493. if (i->second->config().staticIps[k].containsAddress(remoteAddress))
  494. return false;
  495. }
  496. }
  497. }
  498. if (m_cb.pathCheckFunction) {
  499. return (m_cb.pathCheckFunction(
  500. reinterpret_cast<ZT_Node *>(this),
  501. m_uPtr,
  502. tPtr,
  503. id.address().toInt(),
  504. (const ZT_Identity *) &id,
  505. localSocket,
  506. reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0);
  507. }
  508. return true;
  509. }
  510. bool Node::externalPathLookup(void *tPtr, const Identity &id, int family, InetAddress &addr)
  511. {
  512. if (m_cb.pathLookupFunction) {
  513. return (m_cb.pathLookupFunction(
  514. reinterpret_cast<ZT_Node *>(this),
  515. m_uPtr,
  516. tPtr,
  517. id.address().toInt(),
  518. reinterpret_cast<const ZT_Identity *>(&id),
  519. family,
  520. reinterpret_cast<sockaddr_storage *>(&addr)) == ZT_RESULT_OK);
  521. }
  522. return false;
  523. }
  524. bool Node::localControllerHasAuthorized(const int64_t now, const uint64_t nwid, const Address &addr) const
  525. {
  526. m_localControllerAuthorizations_l.lock();
  527. const int64_t *const at = m_localControllerAuthorizations.get(p_LocalControllerAuth(nwid, addr));
  528. m_localControllerAuthorizations_l.unlock();
  529. if (at)
  530. return ((now - *at) < (ZT_NETWORK_AUTOCONF_DELAY * 3));
  531. return false;
  532. }
  533. // Implementation of NetworkController::Sender ------------------------------------------------------------------------
  534. void Node::ncSendConfig(uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig)
  535. {
  536. m_localControllerAuthorizations_l.lock();
  537. m_localControllerAuthorizations[p_LocalControllerAuth(nwid, destination)] = now();
  538. m_localControllerAuthorizations_l.unlock();
  539. if (destination == RR->identity.address()) {
  540. SharedPtr<Network> n(network(nwid));
  541. if (!n)
  542. return;
  543. n->setConfiguration((void *) 0, nc, true);
  544. } else {
  545. Dictionary dconf;
  546. if (nc.toDictionary(dconf)) {
  547. uint64_t configUpdateId = Utils::random();
  548. if (!configUpdateId)
  549. ++configUpdateId;
  550. Vector<uint8_t> ddata;
  551. dconf.encode(ddata);
  552. // TODO
  553. /*
  554. unsigned int chunkIndex = 0;
  555. while (chunkIndex < totalSize) {
  556. const unsigned int chunkLen = std::min(totalSize - chunkIndex,(unsigned int)(ZT_PROTO_MAX_PACKET_LENGTH - (ZT_PACKET_IDX_PAYLOAD + 256)));
  557. Packet outp(destination,RR->identity.address(),(requestPacketId) ? Packet::VERB_OK : Packet::VERB_NETWORK_CONFIG);
  558. if (requestPacketId) {
  559. outp.append((unsigned char)Packet::VERB_NETWORK_CONFIG_REQUEST);
  560. outp.append(requestPacketId);
  561. }
  562. const unsigned int sigStart = outp.size();
  563. outp.append(nwid);
  564. outp.append((uint16_t)chunkLen);
  565. outp.append((const void *)(dconf->data() + chunkIndex),chunkLen);
  566. outp.append((uint8_t)0); // no flags
  567. outp.append((uint64_t)configUpdateId);
  568. outp.append((uint32_t)totalSize);
  569. outp.append((uint32_t)chunkIndex);
  570. uint8_t sig[256];
  571. const unsigned int siglen = RR->identity.sign(reinterpret_cast<const uint8_t *>(outp.data()) + sigStart,outp.size() - sigStart,sig,sizeof(sig));
  572. outp.append((uint8_t)1);
  573. outp.append((uint16_t)siglen);
  574. outp.append(sig,siglen);
  575. outp.compress();
  576. RR->sw->send((void *)0,outp,true);
  577. chunkIndex += chunkLen;
  578. }
  579. */
  580. }
  581. }
  582. }
  583. void Node::ncSendRevocation(const Address &destination, const Revocation &rev)
  584. {
  585. if (destination == RR->identity.address()) {
  586. SharedPtr<Network> n(network(rev.networkId()));
  587. if (!n) return;
  588. n->addCredential(nullptr, RR->identity, rev);
  589. } else {
  590. // TODO
  591. /*
  592. Packet outp(destination,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS);
  593. outp.append((uint8_t)0x00);
  594. outp.append((uint16_t)0);
  595. outp.append((uint16_t)0);
  596. outp.append((uint16_t)1);
  597. rev.serialize(outp);
  598. outp.append((uint16_t)0);
  599. RR->sw->send((void *)0,outp,true);
  600. */
  601. }
  602. }
  603. void Node::ncSendError(uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode)
  604. {
  605. if (destination == RR->identity.address()) {
  606. SharedPtr<Network> n(network(nwid));
  607. if (!n) return;
  608. switch (errorCode) {
  609. case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
  610. case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR:
  611. n->setNotFound();
  612. break;
  613. case NetworkController::NC_ERROR_ACCESS_DENIED:
  614. n->setAccessDenied();
  615. break;
  616. default:
  617. break;
  618. }
  619. } else if (requestPacketId) {
  620. // TODO
  621. /*
  622. Packet outp(destination,RR->identity.address(),Packet::VERB_ERROR);
  623. outp.append((unsigned char)Packet::VERB_NETWORK_CONFIG_REQUEST);
  624. outp.append(requestPacketId);
  625. switch(errorCode) {
  626. //case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
  627. //case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR:
  628. default:
  629. outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
  630. break;
  631. case NetworkController::NC_ERROR_ACCESS_DENIED:
  632. outp.append((unsigned char)Packet::ERROR_NETWORK_ACCESS_DENIED_);
  633. break;
  634. }
  635. outp.append(nwid);
  636. RR->sw->send((void *)0,outp,true);
  637. */
  638. } // else we can't send an ERROR() in response to nothing, so discard
  639. }
  640. } // namespace ZeroTier
  641. // C API --------------------------------------------------------------------------------------------------------------
  642. extern "C" {
  643. // These macros make the idiom of passing buffers to outside code via the API work properly even
  644. // if the first address of Buf does not overlap with its data field, since the C++ standard does
  645. // not absolutely guarantee this.
  646. #define _ZT_PTRTOBUF(p) ((ZeroTier::Buf *)( ((uintptr_t)(p)) - ((uintptr_t)&(((ZeroTier::Buf *)0)->unsafeData[0])) ))
  647. #define _ZT_BUFTOPTR(b) ((void *)(&((b)->unsafeData[0])))
  648. void *ZT_getBuffer()
  649. {
  650. // When external code requests a Buf, grab one from the pool (or freshly allocated)
  651. // and return it with its reference count left at zero. It's the responsibility of
  652. // external code to bring it back via freeBuffer() or one of the processX() calls.
  653. // When this occurs it's either sent back to the pool with Buf's delete operator or
  654. // wrapped in a SharedPtr<> to be passed into the core.
  655. try {
  656. return _ZT_BUFTOPTR(new ZeroTier::Buf());
  657. } catch (...) {
  658. return nullptr; // can only happen on out of memory condition
  659. }
  660. }
  661. void ZT_freeBuffer(void *b)
  662. {
  663. if (b)
  664. delete _ZT_PTRTOBUF(b);
  665. }
  666. void ZT_freeQueryResult(void *qr)
  667. {
  668. if (qr)
  669. free(qr);
  670. }
  671. enum ZT_ResultCode ZT_Node_new(ZT_Node **node, void *uptr, void *tptr, const struct ZT_Node_Callbacks *callbacks, int64_t now)
  672. {
  673. *node = (ZT_Node *) 0;
  674. try {
  675. *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(uptr, tptr, callbacks, now));
  676. return ZT_RESULT_OK;
  677. } catch (std::bad_alloc &exc) {
  678. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  679. } catch (std::runtime_error &exc) {
  680. return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
  681. } catch (...) {
  682. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  683. }
  684. }
  685. void ZT_Node_delete(ZT_Node *node, void *tPtr)
  686. {
  687. try {
  688. reinterpret_cast<ZeroTier::Node *>(node)->shutdown(tPtr);
  689. delete (reinterpret_cast<ZeroTier::Node *>(node));
  690. } catch (...) {}
  691. }
  692. enum ZT_ResultCode ZT_Node_processWirePacket(
  693. ZT_Node *node,
  694. void *tptr,
  695. int64_t now,
  696. int64_t localSocket,
  697. const struct sockaddr_storage *remoteAddress,
  698. const void *packetData,
  699. unsigned int packetLength,
  700. int isZtBuffer,
  701. volatile int64_t *nextBackgroundTaskDeadline)
  702. {
  703. try {
  704. ZeroTier::SharedPtr<ZeroTier::Buf> buf((isZtBuffer) ? _ZT_PTRTOBUF(packetData) : new ZeroTier::Buf(packetData, packetLength & ZT_BUF_MEM_MASK));
  705. return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(tptr, now, localSocket, remoteAddress, buf, packetLength, nextBackgroundTaskDeadline);
  706. } catch (std::bad_alloc &exc) {
  707. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  708. } catch (...) {
  709. return ZT_RESULT_OK; // "OK" since invalid packets are simply dropped, but the system is still up
  710. }
  711. }
  712. enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
  713. ZT_Node *node,
  714. void *tptr,
  715. int64_t now,
  716. uint64_t nwid,
  717. uint64_t sourceMac,
  718. uint64_t destMac,
  719. unsigned int etherType,
  720. unsigned int vlanId,
  721. const void *frameData,
  722. unsigned int frameLength,
  723. int isZtBuffer,
  724. volatile int64_t *nextBackgroundTaskDeadline)
  725. {
  726. try {
  727. ZeroTier::SharedPtr<ZeroTier::Buf> buf((isZtBuffer) ? _ZT_PTRTOBUF(frameData) : new ZeroTier::Buf(frameData, frameLength & ZT_BUF_MEM_MASK));
  728. return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(tptr, now, nwid, sourceMac, destMac, etherType, vlanId, buf, frameLength, nextBackgroundTaskDeadline);
  729. } catch (std::bad_alloc &exc) {
  730. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  731. } catch (...) {
  732. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  733. }
  734. }
  735. enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node, void *tptr, int64_t now, volatile int64_t *nextBackgroundTaskDeadline)
  736. {
  737. try {
  738. return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(tptr, now, nextBackgroundTaskDeadline);
  739. } catch (std::bad_alloc &exc) {
  740. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  741. } catch (...) {
  742. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  743. }
  744. }
  745. enum ZT_ResultCode ZT_Node_join(ZT_Node *node, uint64_t nwid, const ZT_Fingerprint *controllerFingerprint, void *uptr, void *tptr)
  746. {
  747. try {
  748. return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid, controllerFingerprint, uptr, tptr);
  749. } catch (std::bad_alloc &exc) {
  750. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  751. } catch (...) {
  752. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  753. }
  754. }
  755. enum ZT_ResultCode ZT_Node_leave(ZT_Node *node, uint64_t nwid, void **uptr, void *tptr)
  756. {
  757. try {
  758. return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid, uptr, tptr);
  759. } catch (std::bad_alloc &exc) {
  760. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  761. } catch (...) {
  762. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  763. }
  764. }
  765. enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node, void *tptr, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
  766. {
  767. try {
  768. return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(tptr, nwid, multicastGroup, multicastAdi);
  769. } catch (std::bad_alloc &exc) {
  770. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  771. } catch (...) {
  772. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  773. }
  774. }
  775. enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
  776. {
  777. try {
  778. return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(nwid, multicastGroup, multicastAdi);
  779. } catch (std::bad_alloc &exc) {
  780. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  781. } catch (...) {
  782. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  783. }
  784. }
  785. enum ZT_ResultCode ZT_Node_addRoot(ZT_Node *node, void *tptr, const ZT_Identity *id, const ZT_Locator *loc)
  786. {
  787. try {
  788. return reinterpret_cast<ZeroTier::Node *>(node)->addRoot(tptr, id, loc);
  789. } catch (std::bad_alloc &exc) {
  790. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  791. } catch (...) {
  792. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  793. }
  794. }
  795. enum ZT_ResultCode ZT_Node_removeRoot(ZT_Node *node, void *tptr, const uint64_t address)
  796. {
  797. try {
  798. return reinterpret_cast<ZeroTier::Node *>(node)->removeRoot(tptr, address);
  799. } catch (std::bad_alloc &exc) {
  800. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  801. } catch (...) {
  802. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  803. }
  804. }
  805. uint64_t ZT_Node_address(ZT_Node *node)
  806. {
  807. return reinterpret_cast<ZeroTier::Node *>(node)->address();
  808. }
  809. const ZT_Identity *ZT_Node_identity(ZT_Node *node)
  810. {
  811. return (const ZT_Identity *) (&(reinterpret_cast<ZeroTier::Node *>(node)->identity()));
  812. }
  813. void ZT_Node_status(ZT_Node *node, ZT_NodeStatus *status)
  814. {
  815. try {
  816. reinterpret_cast<ZeroTier::Node *>(node)->status(status);
  817. } catch (...) {}
  818. }
  819. ZT_PeerList *ZT_Node_peers(ZT_Node *node)
  820. {
  821. try {
  822. return reinterpret_cast<ZeroTier::Node *>(node)->peers();
  823. } catch (...) {
  824. return (ZT_PeerList *) 0;
  825. }
  826. }
  827. ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node, uint64_t nwid)
  828. {
  829. try {
  830. return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
  831. } catch (...) {
  832. return (ZT_VirtualNetworkConfig *) 0;
  833. }
  834. }
  835. ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
  836. {
  837. try {
  838. return reinterpret_cast<ZeroTier::Node *>(node)->networks();
  839. } catch (...) {
  840. return (ZT_VirtualNetworkList *) 0;
  841. }
  842. }
  843. void ZT_Node_setNetworkUserPtr(ZT_Node *node, uint64_t nwid, void *ptr)
  844. {
  845. try {
  846. reinterpret_cast<ZeroTier::Node *>(node)->setNetworkUserPtr(nwid, ptr);
  847. } catch (...) {}
  848. }
  849. void ZT_Node_setInterfaceAddresses(ZT_Node *node, const ZT_InterfaceAddress *addrs, unsigned int addrCount)
  850. {
  851. try {
  852. reinterpret_cast<ZeroTier::Node *>(node)->setInterfaceAddresses(addrs, addrCount);
  853. } catch (...) {}
  854. }
  855. int ZT_Node_sendUserMessage(ZT_Node *node, void *tptr, uint64_t dest, uint64_t typeId, const void *data, unsigned int len)
  856. {
  857. try {
  858. return reinterpret_cast<ZeroTier::Node *>(node)->sendUserMessage(tptr, dest, typeId, data, len);
  859. } catch (...) {
  860. return 0;
  861. }
  862. }
  863. void ZT_Node_setController(ZT_Node *node, void *networkControllerInstance)
  864. {
  865. try {
  866. reinterpret_cast<ZeroTier::Node *>(node)->setController(networkControllerInstance);
  867. } catch (...) {}
  868. }
  869. void ZT_version(int *major, int *minor, int *revision, int *build)
  870. {
  871. if (major)
  872. *major = ZEROTIER_VERSION_MAJOR;
  873. if (minor)
  874. *minor = ZEROTIER_VERSION_MINOR;
  875. if (revision)
  876. *revision = ZEROTIER_VERSION_REVISION;
  877. if (build)
  878. *build = ZEROTIER_VERSION_BUILD;
  879. }
  880. } // extern "C"