Node.cpp 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include <cstdlib>
  14. #include <cstring>
  15. #include <cstdint>
  16. #include "Constants.hpp"
  17. #include "SharedPtr.hpp"
  18. #include "Node.hpp"
  19. #include "NetworkController.hpp"
  20. #include "Topology.hpp"
  21. #include "Address.hpp"
  22. #include "Identity.hpp"
  23. #include "SelfAwareness.hpp"
  24. #include "Network.hpp"
  25. #include "Trace.hpp"
  26. #include "Locator.hpp"
  27. #include "Protocol.hpp"
  28. #include "Expect.hpp"
  29. #include "VL1.hpp"
  30. #include "VL2.hpp"
  31. #include "Buf.hpp"
  32. namespace ZeroTier {
  33. namespace {
  34. struct _NodeObjects
  35. {
  36. ZT_ALWAYS_INLINE _NodeObjects(RuntimeEnvironment *const RR,void *const tPtr) :
  37. t(RR),
  38. expect(),
  39. vl2(RR),
  40. vl1(RR),
  41. sa(RR),
  42. topology(RR,tPtr)
  43. {
  44. RR->t = &t;
  45. RR->expect = &expect;
  46. RR->vl2 = &vl2;
  47. RR->vl1 = &vl1;
  48. RR->sa = &sa;
  49. RR->topology = &topology;
  50. }
  51. Trace t;
  52. Expect expect;
  53. VL2 vl2;
  54. VL1 vl1;
  55. SelfAwareness sa;
  56. Topology topology;
  57. };
  58. struct _sortPeerPtrsByAddress
  59. {
  60. ZT_ALWAYS_INLINE bool operator()(const SharedPtr<Peer> &a,const SharedPtr<Peer> &b) const { return (a->address() < b->address()); }
  61. };
  62. } // anonymous namespace
  63. Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64_t now) :
  64. _RR(this),
  65. _objects(nullptr),
  66. RR(&_RR),
  67. _cb(*callbacks),
  68. _uPtr(uPtr),
  69. _networks(),
  70. _networksMask(15),
  71. _now(now),
  72. _lastPing(0),
  73. _lastHousekeepingRun(0),
  74. _lastNetworkHousekeepingRun(0),
  75. _lastPathKeepaliveCheck(0),
  76. _natMustDie(true),
  77. _online(false)
  78. {
  79. _networks.resize(16); // _networksMask + 1, must be power of two
  80. uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
  81. std::vector<uint8_t> data(stateObjectGet(tPtr,ZT_STATE_OBJECT_IDENTITY_SECRET,idtmp));
  82. bool haveIdentity = false;
  83. if (!data.empty()) {
  84. data.push_back(0); // zero-terminate string
  85. if (RR->identity.fromString((const char *)data.data())) {
  86. RR->identity.toString(false,RR->publicIdentityStr);
  87. RR->identity.toString(true,RR->secretIdentityStr);
  88. haveIdentity = true;
  89. }
  90. }
  91. if (!haveIdentity) {
  92. RR->identity.generate(Identity::C25519);
  93. RR->identity.toString(false,RR->publicIdentityStr);
  94. RR->identity.toString(true,RR->secretIdentityStr);
  95. idtmp[0] = RR->identity.address().toInt(); idtmp[1] = 0;
  96. stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_SECRET,idtmp,RR->secretIdentityStr,(unsigned int)strlen(RR->secretIdentityStr));
  97. stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr,(unsigned int)strlen(RR->publicIdentityStr));
  98. } else {
  99. idtmp[0] = RR->identity.address().toInt(); idtmp[1] = 0;
  100. data = stateObjectGet(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp);
  101. if ((data.empty())||(memcmp(data.data(),RR->publicIdentityStr,strlen(RR->publicIdentityStr)) != 0))
  102. stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr,(unsigned int)strlen(RR->publicIdentityStr));
  103. }
  104. uint8_t tmph[ZT_IDENTITY_HASH_SIZE];
  105. RR->identity.hashWithPrivate(tmph);
  106. RR->localCacheSymmetric.init(tmph);
  107. Utils::burn(tmph,sizeof(tmph));
  108. // This constructs all the components of the ZeroTier core within a single contiguous memory container,
  109. // which reduces memory fragmentation and may improve cache locality.
  110. _objects = new _NodeObjects(RR,tPtr);
  111. postEvent(tPtr, ZT_EVENT_UP);
  112. }
  113. Node::~Node()
  114. {
  115. // Let go of all networks to leave them. Do it this way in case Network wants to
  116. // do anything in its destructor that locks the _networks lock to avoid a deadlock.
  117. std::vector< SharedPtr<Network> > networks;
  118. {
  119. RWMutex::Lock _l(_networks_m);
  120. networks.swap(_networks);
  121. }
  122. networks.clear();
  123. _networks_m.lock();
  124. _networks_m.unlock();
  125. if (_objects)
  126. delete (_NodeObjects *)_objects;
  127. // Let go of cached Buf objects. If other nodes happen to be running in this
  128. // same process space new Bufs will be allocated as needed, but this is almost
  129. // never the case. Calling this here saves RAM if we are running inside something
  130. // that wants to keep running after tearing down its ZeroTier core instance.
  131. Buf::freePool();
  132. }
  133. void Node::shutdown(void *tPtr)
  134. {
  135. if (RR->topology)
  136. RR->topology->saveAll(tPtr);
  137. }
  138. ZT_ResultCode Node::processWirePacket(
  139. void *tPtr,
  140. int64_t now,
  141. int64_t localSocket,
  142. const struct sockaddr_storage *remoteAddress,
  143. SharedPtr<Buf> &packetData,
  144. unsigned int packetLength,
  145. volatile int64_t *nextBackgroundTaskDeadline)
  146. {
  147. _now = now;
  148. RR->vl1->onRemotePacket(tPtr,localSocket,(remoteAddress) ? InetAddress::NIL : *asInetAddress(remoteAddress),packetData,packetLength);
  149. return ZT_RESULT_OK;
  150. }
  151. ZT_ResultCode Node::processVirtualNetworkFrame(
  152. void *tPtr,
  153. int64_t now,
  154. uint64_t nwid,
  155. uint64_t sourceMac,
  156. uint64_t destMac,
  157. unsigned int etherType,
  158. unsigned int vlanId,
  159. SharedPtr<Buf> &frameData,
  160. unsigned int frameLength,
  161. volatile int64_t *nextBackgroundTaskDeadline)
  162. {
  163. _now = now;
  164. SharedPtr<Network> nw(this->network(nwid));
  165. if (nw) {
  166. RR->vl2->onLocalEthernet(tPtr,nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
  167. return ZT_RESULT_OK;
  168. } else {
  169. return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  170. }
  171. }
  172. struct _processBackgroundTasks_ping_eachPeer
  173. {
  174. int64_t now;
  175. Node *parent;
  176. void *tPtr;
  177. bool online;
  178. std::vector<Address> rootsNotOnline;
  179. ZT_ALWAYS_INLINE void operator()(const SharedPtr<Peer> &peer,const bool isRoot)
  180. {
  181. peer->ping(tPtr,now,isRoot);
  182. if (isRoot) {
  183. if (peer->active(now)) {
  184. online = true;
  185. } else {
  186. rootsNotOnline.push_back(peer->address());
  187. }
  188. }
  189. }
  190. };
  191. static uint8_t keepAlivePayload = 0; // junk payload for keepalive packets
  192. struct _processBackgroundTasks_path_keepalive
  193. {
  194. int64_t now;
  195. RuntimeEnvironment *RR;
  196. void *tPtr;
  197. ZT_ALWAYS_INLINE void operator()(const SharedPtr<Path> &path)
  198. {
  199. if ((now - path->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
  200. ++keepAlivePayload;
  201. path->send(RR,tPtr,&keepAlivePayload,1,now);
  202. }
  203. }
  204. };
  205. ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int64_t *nextBackgroundTaskDeadline)
  206. {
  207. _now = now;
  208. Mutex::Lock bl(_backgroundTasksLock);
  209. if ((now - _lastPing) >= ZT_PEER_PING_PERIOD) {
  210. _lastPing = now;
  211. try {
  212. _processBackgroundTasks_ping_eachPeer pf;
  213. pf.now = now;
  214. pf.parent = this;
  215. pf.tPtr = tPtr;
  216. pf.online = false;
  217. RR->topology->eachPeerWithRoot<_processBackgroundTasks_ping_eachPeer &>(pf);
  218. if (pf.online != _online) {
  219. _online = pf.online;
  220. postEvent(tPtr, _online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
  221. }
  222. RR->topology->rankRoots(now);
  223. if (pf.online) {
  224. // If we have at least one online root, request whois for roots not online.
  225. // This will give us updated locators for these roots which may contain new
  226. // IP addresses. It will also auto-discover IPs for roots that were not added
  227. // with an initial bootstrap address.
  228. // TODO
  229. //for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
  230. // RR->sw->requestWhois(tPtr,now,*r);
  231. }
  232. } catch ( ... ) {
  233. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  234. }
  235. }
  236. if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
  237. _lastHousekeepingRun = now;
  238. {
  239. RWMutex::RLock l(_networks_m);
  240. for(std::vector< SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) {
  241. if ((*i))
  242. (*i)->doPeriodicTasks(tPtr,now);
  243. }
  244. }
  245. }
  246. if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
  247. _lastHousekeepingRun = now;
  248. try {
  249. // Clean up any old local controller auth memoizations. This is an
  250. // optimization for network controllers to know whether to accept
  251. // or trust nodes without doing an extra cert check.
  252. {
  253. _localControllerAuthorizations_m.lock();
  254. Hashtable< _LocalControllerAuth,int64_t >::Iterator i(_localControllerAuthorizations);
  255. _LocalControllerAuth *k = (_LocalControllerAuth *)0;
  256. int64_t *v = (int64_t *)0;
  257. while (i.next(k,v)) {
  258. if ((*v - now) > (ZT_NETWORK_AUTOCONF_DELAY * 3)) {
  259. _localControllerAuthorizations.erase(*k);
  260. }
  261. }
  262. _localControllerAuthorizations_m.unlock();
  263. }
  264. RR->topology->doPeriodicTasks(tPtr, now);
  265. RR->sa->clean(now);
  266. } catch ( ... ) {
  267. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  268. }
  269. }
  270. if ((now - _lastPathKeepaliveCheck) >= ZT_PATH_KEEPALIVE_PERIOD) {
  271. _lastPathKeepaliveCheck = now;
  272. _processBackgroundTasks_path_keepalive pf;
  273. pf.now = now;
  274. pf.RR = RR;
  275. pf.tPtr = tPtr;
  276. RR->topology->eachPath<_processBackgroundTasks_path_keepalive &>(pf);
  277. }
  278. int64_t earliestAlarmAt = 0x7fffffffffffffffLL;
  279. std::vector<Address> bzzt;
  280. {
  281. RWMutex::RMaybeWLock l(_peerAlarms_l);
  282. for(std::map<Address,int64_t>::iterator a(_peerAlarms.begin());a!=_peerAlarms.end();) {
  283. if (now >= a->second) {
  284. bzzt.push_back(a->first);
  285. l.writing();
  286. _peerAlarms.erase(a++);
  287. } else {
  288. if (a->second < earliestAlarmAt)
  289. earliestAlarmAt = a->second;
  290. ++a;
  291. }
  292. }
  293. }
  294. for(std::vector<Address>::iterator a(bzzt.begin());a!=bzzt.end();++a) {
  295. const SharedPtr<Peer> p(RR->topology->peer(tPtr,*a,false));
  296. if (p)
  297. p->alarm(tPtr,now);
  298. }
  299. try {
  300. *nextBackgroundTaskDeadline = std::min(earliestAlarmAt,now + ZT_MAX_TIMER_TASK_INTERVAL);
  301. } catch ( ... ) {
  302. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  303. }
  304. return ZT_RESULT_OK;
  305. }
  306. ZT_ResultCode Node::join(uint64_t nwid,void *uptr,void *tptr)
  307. {
  308. RWMutex::Lock l(_networks_m);
  309. const uint64_t nwidHashed = nwid + (nwid >> 32U);
  310. SharedPtr<Network> *nw = &(_networks[(unsigned long)(nwidHashed & _networksMask)]);
  311. // Enlarge flat hash table of networks until all networks fit without collisions.
  312. if (*nw) {
  313. unsigned long newNetworksSize = (unsigned long)_networks.size();
  314. std::vector< SharedPtr<Network> > newNetworks;
  315. uint64_t newNetworksMask,id;
  316. std::vector< SharedPtr<Network> >::const_iterator i;
  317. try_larger_network_hashtable:
  318. newNetworksSize <<= 1U; // must remain a power of two
  319. newNetworks.clear();
  320. newNetworks.resize(newNetworksSize);
  321. newNetworksMask = (uint64_t)(newNetworksSize - 1);
  322. for(i=_networks.begin();i!=_networks.end();++i) {
  323. id = (*i)->id();
  324. nw = &(newNetworks[(unsigned long)((id + (id >> 32U)) & newNetworksMask)]);
  325. if (*nw)
  326. goto try_larger_network_hashtable;
  327. *nw = *i;
  328. }
  329. if (newNetworks[(unsigned long)(nwidHashed & newNetworksMask)])
  330. goto try_larger_network_hashtable;
  331. _networks.swap(newNetworks);
  332. _networksMask = newNetworksMask;
  333. nw = &(_networks[(unsigned long)(nwidHashed & newNetworksMask)]);
  334. }
  335. nw->set(new Network(RR,tptr,nwid,uptr,(const NetworkConfig *)0));
  336. return ZT_RESULT_OK;
  337. }
  338. ZT_ResultCode Node::leave(uint64_t nwid,void **uptr,void *tptr)
  339. {
  340. const uint64_t nwidHashed = nwid + (nwid >> 32U);
  341. ZT_VirtualNetworkConfig ctmp;
  342. void **nUserPtr = (void **)0;
  343. {
  344. RWMutex::RLock l(_networks_m);
  345. SharedPtr<Network> &nw = _networks[(unsigned long)(nwidHashed & _networksMask)];
  346. if (!nw)
  347. return ZT_RESULT_OK;
  348. if (uptr)
  349. *uptr = nw->userPtr();
  350. nw->externalConfig(&ctmp);
  351. nw->destroy();
  352. nUserPtr = nw->userPtr();
  353. }
  354. if (nUserPtr)
  355. RR->node->configureVirtualNetworkPort(tptr,nwid,nUserPtr,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
  356. {
  357. RWMutex::Lock _l(_networks_m);
  358. _networks[(unsigned long)(nwidHashed & _networksMask)].zero();
  359. }
  360. uint64_t tmp[2];
  361. tmp[0] = nwid; tmp[1] = 0;
  362. RR->node->stateObjectDelete(tptr,ZT_STATE_OBJECT_NETWORK_CONFIG,tmp);
  363. return ZT_RESULT_OK;
  364. }
  365. ZT_ResultCode Node::multicastSubscribe(void *tPtr,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  366. {
  367. SharedPtr<Network> nw(this->network(nwid));
  368. if (nw) {
  369. nw->multicastSubscribe(tPtr,MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
  370. return ZT_RESULT_OK;
  371. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  372. }
  373. ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  374. {
  375. SharedPtr<Network> nw(this->network(nwid));
  376. if (nw) {
  377. nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
  378. return ZT_RESULT_OK;
  379. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  380. }
  381. ZT_ResultCode Node::addRoot(void *tPtr,const ZT_Identity *identity,const sockaddr_storage *bootstrap)
  382. {
  383. if (!identity)
  384. return ZT_RESULT_ERROR_BAD_PARAMETER;
  385. InetAddress a;
  386. if (bootstrap)
  387. a = bootstrap;
  388. RR->topology->addRoot(tPtr,*reinterpret_cast<const Identity *>(identity),a);
  389. return ZT_RESULT_OK;
  390. }
  391. ZT_ResultCode Node::removeRoot(void *tPtr,const ZT_Identity *identity)
  392. {
  393. if (!identity)
  394. return ZT_RESULT_ERROR_BAD_PARAMETER;
  395. RR->topology->removeRoot(*reinterpret_cast<const Identity *>(identity));
  396. return ZT_RESULT_OK;
  397. }
  398. uint64_t Node::address() const
  399. {
  400. return RR->identity.address().toInt();
  401. }
  402. void Node::status(ZT_NodeStatus *status) const
  403. {
  404. status->address = RR->identity.address().toInt();
  405. status->identity = reinterpret_cast<const ZT_Identity *>(&RR->identity);
  406. status->publicIdentity = RR->publicIdentityStr;
  407. status->secretIdentity = RR->secretIdentityStr;
  408. status->online = _online ? 1 : 0;
  409. }
  410. ZT_PeerList *Node::peers() const
  411. {
  412. std::vector< SharedPtr<Peer> > peers;
  413. RR->topology->getAllPeers(peers);
  414. std::sort(peers.begin(),peers.end(),_sortPeerPtrsByAddress());
  415. char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()) + (sizeof(Identity) * peers.size()));
  416. if (!buf)
  417. return (ZT_PeerList *)0;
  418. ZT_PeerList *pl = (ZT_PeerList *)buf;
  419. pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
  420. Identity *identities = (Identity *)(buf + sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
  421. const int64_t now = _now;
  422. pl->peerCount = 0;
  423. for(std::vector< SharedPtr<Peer> >::iterator pi(peers.begin());pi!=peers.end();++pi) {
  424. ZT_Peer *p = &(pl->peers[pl->peerCount]);
  425. p->address = (*pi)->address().toInt();
  426. identities[pl->peerCount] = (*pi)->identity(); // need to make a copy in case peer gets deleted
  427. p->identity = &identities[pl->peerCount];
  428. if ((*pi)->remoteVersionKnown()) {
  429. p->versionMajor = (int)(*pi)->remoteVersionMajor();
  430. p->versionMinor = (int)(*pi)->remoteVersionMinor();
  431. p->versionRev = (int)(*pi)->remoteVersionRevision();
  432. } else {
  433. p->versionMajor = -1;
  434. p->versionMinor = -1;
  435. p->versionRev = -1;
  436. }
  437. p->latency = (int)(*pi)->latency();
  438. if (p->latency >= 0xffff)
  439. p->latency = -1;
  440. p->root = RR->topology->isRoot((*pi)->identity()) ? 1 : 0;
  441. memcpy(&p->bootstrap,&((*pi)->bootstrap()),sizeof(sockaddr_storage));
  442. std::vector< SharedPtr<Path> > paths;
  443. (*pi)->getAllPaths(paths);
  444. p->pathCount = 0;
  445. for(std::vector< SharedPtr<Path> >::iterator path(paths.begin());path!=paths.end();++path) {
  446. memcpy(&(p->paths[p->pathCount].address),&((*path)->address()),sizeof(struct sockaddr_storage));
  447. p->paths[p->pathCount].lastSend = (*path)->lastOut();
  448. p->paths[p->pathCount].lastReceive = (*path)->lastIn();
  449. p->paths[p->pathCount].trustedPathId = RR->topology->getOutboundPathTrust((*path)->address());
  450. p->paths[p->pathCount].alive = (*path)->alive(now) ? 1 : 0;
  451. p->paths[p->pathCount].preferred = (p->pathCount == 0) ? 1 : 0;
  452. ++p->pathCount;
  453. }
  454. ++pl->peerCount;
  455. }
  456. return pl;
  457. }
  458. ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
  459. {
  460. SharedPtr<Network> nw(network(nwid));
  461. if (nw) {
  462. ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
  463. nw->externalConfig(nc);
  464. return nc;
  465. }
  466. return (ZT_VirtualNetworkConfig *)0;
  467. }
  468. ZT_VirtualNetworkList *Node::networks() const
  469. {
  470. RWMutex::RLock l(_networks_m);
  471. unsigned long networkCount = 0;
  472. for(std::vector< SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) {
  473. if ((*i))
  474. ++networkCount;
  475. }
  476. char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * networkCount));
  477. if (!buf)
  478. return (ZT_VirtualNetworkList *)0;
  479. ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
  480. nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
  481. nl->networkCount = 0;
  482. for(std::vector< SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) {
  483. if ((*i))
  484. (*i)->externalConfig(&(nl->networks[nl->networkCount++]));
  485. }
  486. return nl;
  487. }
  488. void Node::setNetworkUserPtr(uint64_t nwid,void *ptr)
  489. {
  490. SharedPtr<Network> nw(network(nwid));
  491. if (nw)
  492. *(nw->userPtr()) = ptr;
  493. }
  494. void Node::freeQueryResult(void *qr)
  495. {
  496. if (qr)
  497. ::free(qr);
  498. }
  499. void Node::setInterfaceAddresses(const ZT_InterfaceAddress *addrs,unsigned int addrCount)
  500. {
  501. Mutex::Lock _l(_localInterfaceAddresses_m);
  502. _localInterfaceAddresses.clear();
  503. for(unsigned int i=0;i<addrCount;++i) {
  504. bool dupe = false;
  505. for(unsigned int j=0;j<i;++j) {
  506. if (*(reinterpret_cast<const InetAddress *>(&addrs[j].address)) == *(reinterpret_cast<const InetAddress *>(&addrs[i].address))) {
  507. dupe = true;
  508. break;
  509. }
  510. }
  511. if (!dupe)
  512. _localInterfaceAddresses.push_back(addrs[i]);
  513. }
  514. }
  515. int Node::sendUserMessage(void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len)
  516. {
  517. try {
  518. if (RR->identity.address().toInt() != dest) {
  519. // TODO
  520. /*
  521. Packet outp(Address(dest),RR->identity.address(),Packet::VERB_USER_MESSAGE);
  522. outp.append(typeId);
  523. outp.append(data,len);
  524. outp.compress();
  525. RR->sw->send(tptr,outp,true);
  526. */
  527. return 1;
  528. }
  529. } catch ( ... ) {}
  530. return 0;
  531. }
  532. void Node::setController(void *networkControllerInstance)
  533. {
  534. RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
  535. if (networkControllerInstance)
  536. RR->localNetworkController->init(RR->identity,this);
  537. }
  538. // Methods used only within the core ----------------------------------------------------------------------------------
  539. std::vector<uint8_t> Node::stateObjectGet(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2])
  540. {
  541. std::vector<uint8_t> r;
  542. if (_cb.stateGetFunction) {
  543. void *data = 0;
  544. void (*freeFunc)(void *) = 0;
  545. int l = _cb.stateGetFunction(
  546. reinterpret_cast<ZT_Node *>(this),
  547. _uPtr,
  548. tPtr,
  549. type,
  550. id,
  551. &data,
  552. &freeFunc);
  553. if ((l > 0)&&(data)&&(freeFunc)) {
  554. r.assign(reinterpret_cast<const uint8_t *>(data),reinterpret_cast<const uint8_t *>(data) + l);
  555. freeFunc(data);
  556. }
  557. }
  558. return r;
  559. }
  560. bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Identity &id,const int64_t localSocket,const InetAddress &remoteAddress)
  561. {
  562. {
  563. RWMutex::RLock l(_networks_m);
  564. for (std::vector<SharedPtr<Network> >::iterator i(_networks.begin()); i != _networks.end(); ++i) {
  565. if ((*i)) {
  566. for (unsigned int k = 0,j = (*i)->config().staticIpCount; k < j; ++k) {
  567. if ((*i)->config().staticIps[k].containsAddress(remoteAddress))
  568. return false;
  569. }
  570. }
  571. }
  572. }
  573. if (_cb.pathCheckFunction) {
  574. return (_cb.pathCheckFunction(
  575. reinterpret_cast<ZT_Node *>(this),
  576. _uPtr,
  577. tPtr,
  578. id.address().toInt(),
  579. (const ZT_Identity *)&id,
  580. localSocket,
  581. reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0);
  582. }
  583. return true;
  584. }
  585. bool Node::externalPathLookup(void *tPtr,const Identity &id,int family,InetAddress &addr)
  586. {
  587. if (_cb.pathLookupFunction) {
  588. return (_cb.pathLookupFunction(
  589. reinterpret_cast<ZT_Node *>(this),
  590. _uPtr,
  591. tPtr,
  592. id.address().toInt(),
  593. reinterpret_cast<const ZT_Identity *>(&id),
  594. family,
  595. reinterpret_cast<sockaddr_storage *>(&addr)) == ZT_RESULT_OK);
  596. }
  597. return false;
  598. }
  599. ZT_ResultCode Node::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork, const ZT_PhysicalPathConfiguration *pathConfig)
  600. {
  601. RR->topology->setPhysicalPathConfiguration(pathNetwork,pathConfig);
  602. return ZT_RESULT_OK;
  603. }
  604. bool Node::localControllerHasAuthorized(const int64_t now,const uint64_t nwid,const Address &addr) const
  605. {
  606. _localControllerAuthorizations_m.lock();
  607. const int64_t *const at = _localControllerAuthorizations.get(_LocalControllerAuth(nwid,addr));
  608. _localControllerAuthorizations_m.unlock();
  609. if (at)
  610. return ((now - *at) < (ZT_NETWORK_AUTOCONF_DELAY * 3));
  611. return false;
  612. }
  613. // Implementation of NetworkController::Sender ------------------------------------------------------------------------
  614. void Node::ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &destination,const NetworkConfig &nc,bool sendLegacyFormatConfig)
  615. {
  616. _localControllerAuthorizations_m.lock();
  617. _localControllerAuthorizations[_LocalControllerAuth(nwid,destination)] = now();
  618. _localControllerAuthorizations_m.unlock();
  619. if (destination == RR->identity.address()) {
  620. SharedPtr<Network> n(network(nwid));
  621. if (!n) return;
  622. n->setConfiguration((void *)0,nc,true);
  623. } else {
  624. Dictionary dconf;
  625. if (nc.toDictionary(dconf,sendLegacyFormatConfig)) {
  626. uint64_t configUpdateId = Utils::random();
  627. if (!configUpdateId) ++configUpdateId;
  628. std::vector<uint8_t> ddata;
  629. dconf.encode(ddata);
  630. // TODO
  631. /*
  632. unsigned int chunkIndex = 0;
  633. while (chunkIndex < totalSize) {
  634. const unsigned int chunkLen = std::min(totalSize - chunkIndex,(unsigned int)(ZT_PROTO_MAX_PACKET_LENGTH - (ZT_PACKET_IDX_PAYLOAD + 256)));
  635. Packet outp(destination,RR->identity.address(),(requestPacketId) ? Packet::VERB_OK : Packet::VERB_NETWORK_CONFIG);
  636. if (requestPacketId) {
  637. outp.append((unsigned char)Packet::VERB_NETWORK_CONFIG_REQUEST);
  638. outp.append(requestPacketId);
  639. }
  640. const unsigned int sigStart = outp.size();
  641. outp.append(nwid);
  642. outp.append((uint16_t)chunkLen);
  643. outp.append((const void *)(dconf->data() + chunkIndex),chunkLen);
  644. outp.append((uint8_t)0); // no flags
  645. outp.append((uint64_t)configUpdateId);
  646. outp.append((uint32_t)totalSize);
  647. outp.append((uint32_t)chunkIndex);
  648. uint8_t sig[256];
  649. const unsigned int siglen = RR->identity.sign(reinterpret_cast<const uint8_t *>(outp.data()) + sigStart,outp.size() - sigStart,sig,sizeof(sig));
  650. outp.append((uint8_t)1);
  651. outp.append((uint16_t)siglen);
  652. outp.append(sig,siglen);
  653. outp.compress();
  654. RR->sw->send((void *)0,outp,true);
  655. chunkIndex += chunkLen;
  656. }
  657. */
  658. }
  659. }
  660. }
  661. void Node::ncSendRevocation(const Address &destination,const Revocation &rev)
  662. {
  663. if (destination == RR->identity.address()) {
  664. SharedPtr<Network> n(network(rev.networkId()));
  665. if (!n) return;
  666. n->addCredential((void *)0,RR->identity,rev);
  667. } else {
  668. // TODO
  669. /*
  670. Packet outp(destination,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS);
  671. outp.append((uint8_t)0x00);
  672. outp.append((uint16_t)0);
  673. outp.append((uint16_t)0);
  674. outp.append((uint16_t)1);
  675. rev.serialize(outp);
  676. outp.append((uint16_t)0);
  677. RR->sw->send((void *)0,outp,true);
  678. */
  679. }
  680. }
  681. void Node::ncSendError(uint64_t nwid,uint64_t requestPacketId,const Address &destination,NetworkController::ErrorCode errorCode)
  682. {
  683. if (destination == RR->identity.address()) {
  684. SharedPtr<Network> n(network(nwid));
  685. if (!n) return;
  686. switch(errorCode) {
  687. case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
  688. case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR:
  689. n->setNotFound();
  690. break;
  691. case NetworkController::NC_ERROR_ACCESS_DENIED:
  692. n->setAccessDenied();
  693. break;
  694. default: break;
  695. }
  696. } else if (requestPacketId) {
  697. // TODO
  698. /*
  699. Packet outp(destination,RR->identity.address(),Packet::VERB_ERROR);
  700. outp.append((unsigned char)Packet::VERB_NETWORK_CONFIG_REQUEST);
  701. outp.append(requestPacketId);
  702. switch(errorCode) {
  703. //case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
  704. //case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR:
  705. default:
  706. outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
  707. break;
  708. case NetworkController::NC_ERROR_ACCESS_DENIED:
  709. outp.append((unsigned char)Packet::ERROR_NETWORK_ACCESS_DENIED_);
  710. break;
  711. }
  712. outp.append(nwid);
  713. RR->sw->send((void *)0,outp,true);
  714. */
  715. } // else we can't send an ERROR() in response to nothing, so discard
  716. }
  717. } // namespace ZeroTier
  718. // C API --------------------------------------------------------------------------------------------------------------
  719. extern "C" {
  720. // These macros make the idiom of passing buffers to outside code via the API work properly even
  721. // if the first address of Buf does not overlap with its data field, since the C++ standard does
  722. // not absolutely guarantee this.
  723. #define _ZT_PTRTOBUF(p) ((ZeroTier::Buf *)( ((uintptr_t)(p)) - ((uintptr_t)&(((ZeroTier::Buf *)0)->unsafeData[0])) ))
  724. #define _ZT_BUFTOPTR(b) ((void *)(&((b)->unsafeData[0])))
  725. void *ZT_getBuffer()
  726. {
  727. // When external code requests a Buf, grab one from the pool (or freshly allocated)
  728. // and return it with its reference count left at zero. It's the responsibility of
  729. // external code to bring it back via freeBuffer() or one of the processX() calls.
  730. // When this occurs it's either sent back to the pool with Buf's delete operator or
  731. // wrapped in a SharedPtr<> to be passed into the core.
  732. try {
  733. return _ZT_BUFTOPTR(new ZeroTier::Buf());
  734. } catch ( ... ) {
  735. return nullptr; // can only happen on out of memory condition
  736. }
  737. }
  738. ZT_SDK_API void ZT_freeBuffer(void *b)
  739. {
  740. if (b)
  741. delete _ZT_PTRTOBUF(b);
  742. }
  743. enum ZT_ResultCode ZT_Node_new(ZT_Node **node,void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,int64_t now)
  744. {
  745. *node = (ZT_Node *)0;
  746. try {
  747. *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(uptr,tptr,callbacks,now));
  748. return ZT_RESULT_OK;
  749. } catch (std::bad_alloc &exc) {
  750. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  751. } catch (std::runtime_error &exc) {
  752. return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
  753. } catch ( ... ) {
  754. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  755. }
  756. }
  757. void ZT_Node_delete(ZT_Node *node,void *tPtr)
  758. {
  759. try {
  760. reinterpret_cast<ZeroTier::Node *>(node)->shutdown(tPtr);
  761. delete (reinterpret_cast<ZeroTier::Node *>(node));
  762. } catch ( ... ) {}
  763. }
  764. enum ZT_ResultCode ZT_Node_processWirePacket(
  765. ZT_Node *node,
  766. void *tptr,
  767. int64_t now,
  768. int64_t localSocket,
  769. const struct sockaddr_storage *remoteAddress,
  770. const void *packetData,
  771. unsigned int packetLength,
  772. int isZtBuffer,
  773. volatile int64_t *nextBackgroundTaskDeadline)
  774. {
  775. try {
  776. ZeroTier::SharedPtr<ZeroTier::Buf> buf((isZtBuffer) ? _ZT_PTRTOBUF(packetData) : new ZeroTier::Buf(packetData,packetLength & ZT_BUF_MEM_MASK));
  777. return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(tptr,now,localSocket,remoteAddress,buf,packetLength,nextBackgroundTaskDeadline);
  778. } catch (std::bad_alloc &exc) {
  779. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  780. } catch ( ... ) {
  781. return ZT_RESULT_OK; // "OK" since invalid packets are simply dropped, but the system is still up
  782. }
  783. }
  784. enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
  785. ZT_Node *node,
  786. void *tptr,
  787. int64_t now,
  788. uint64_t nwid,
  789. uint64_t sourceMac,
  790. uint64_t destMac,
  791. unsigned int etherType,
  792. unsigned int vlanId,
  793. const void *frameData,
  794. unsigned int frameLength,
  795. int isZtBuffer,
  796. volatile int64_t *nextBackgroundTaskDeadline)
  797. {
  798. try {
  799. ZeroTier::SharedPtr<ZeroTier::Buf> buf((isZtBuffer) ? _ZT_PTRTOBUF(frameData) : new ZeroTier::Buf(frameData,frameLength & ZT_BUF_MEM_MASK));
  800. return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(tptr,now,nwid,sourceMac,destMac,etherType,vlanId,buf,frameLength,nextBackgroundTaskDeadline);
  801. } catch (std::bad_alloc &exc) {
  802. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  803. } catch ( ... ) {
  804. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  805. }
  806. }
  807. enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node,void *tptr,int64_t now,volatile int64_t *nextBackgroundTaskDeadline)
  808. {
  809. try {
  810. return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(tptr,now,nextBackgroundTaskDeadline);
  811. } catch (std::bad_alloc &exc) {
  812. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  813. } catch ( ... ) {
  814. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  815. }
  816. }
  817. enum ZT_ResultCode ZT_Node_join(ZT_Node *node,uint64_t nwid,void *uptr,void *tptr)
  818. {
  819. try {
  820. return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid,uptr,tptr);
  821. } catch (std::bad_alloc &exc) {
  822. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  823. } catch ( ... ) {
  824. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  825. }
  826. }
  827. enum ZT_ResultCode ZT_Node_leave(ZT_Node *node,uint64_t nwid,void **uptr,void *tptr)
  828. {
  829. try {
  830. return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid,uptr,tptr);
  831. } catch (std::bad_alloc &exc) {
  832. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  833. } catch ( ... ) {
  834. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  835. }
  836. }
  837. enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node,void *tptr,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  838. {
  839. try {
  840. return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(tptr,nwid,multicastGroup,multicastAdi);
  841. } catch (std::bad_alloc &exc) {
  842. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  843. } catch ( ... ) {
  844. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  845. }
  846. }
  847. enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  848. {
  849. try {
  850. return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(nwid,multicastGroup,multicastAdi);
  851. } catch (std::bad_alloc &exc) {
  852. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  853. } catch ( ... ) {
  854. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  855. }
  856. }
  857. enum ZT_ResultCode ZT_Node_addRoot(ZT_Node *node,void *tptr,const ZT_Identity *identity,const struct sockaddr_storage *bootstrap)
  858. {
  859. try {
  860. return reinterpret_cast<ZeroTier::Node *>(node)->addRoot(tptr,identity,bootstrap);
  861. } catch (std::bad_alloc &exc) {
  862. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  863. } catch ( ... ) {
  864. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  865. }
  866. }
  867. enum ZT_ResultCode ZT_Node_removeRoot(ZT_Node *node,void *tptr,const ZT_Identity *identity)
  868. {
  869. try {
  870. return reinterpret_cast<ZeroTier::Node *>(node)->removeRoot(tptr,identity);
  871. } catch (std::bad_alloc &exc) {
  872. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  873. } catch ( ... ) {
  874. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  875. }
  876. }
  877. uint64_t ZT_Node_address(ZT_Node *node)
  878. {
  879. return reinterpret_cast<ZeroTier::Node *>(node)->address();
  880. }
  881. const ZT_Identity *ZT_Node_identity(ZT_Node *node)
  882. {
  883. return (const ZT_Identity *)(&(reinterpret_cast<ZeroTier::Node *>(node)->identity()));
  884. }
  885. void ZT_Node_status(ZT_Node *node,ZT_NodeStatus *status)
  886. {
  887. try {
  888. reinterpret_cast<ZeroTier::Node *>(node)->status(status);
  889. } catch ( ... ) {}
  890. }
  891. ZT_PeerList *ZT_Node_peers(ZT_Node *node)
  892. {
  893. try {
  894. return reinterpret_cast<ZeroTier::Node *>(node)->peers();
  895. } catch ( ... ) {
  896. return (ZT_PeerList *)0;
  897. }
  898. }
  899. ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node,uint64_t nwid)
  900. {
  901. try {
  902. return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
  903. } catch ( ... ) {
  904. return (ZT_VirtualNetworkConfig *)0;
  905. }
  906. }
  907. ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
  908. {
  909. try {
  910. return reinterpret_cast<ZeroTier::Node *>(node)->networks();
  911. } catch ( ... ) {
  912. return (ZT_VirtualNetworkList *)0;
  913. }
  914. }
  915. void ZT_Node_setNetworkUserPtr(ZT_Node *node,uint64_t nwid,void *ptr)
  916. {
  917. try {
  918. reinterpret_cast<ZeroTier::Node *>(node)->setNetworkUserPtr(nwid,ptr);
  919. } catch ( ... ) {}
  920. }
  921. void ZT_Node_freeQueryResult(ZT_Node *node,void *qr)
  922. {
  923. try {
  924. reinterpret_cast<ZeroTier::Node *>(node)->freeQueryResult(qr);
  925. } catch ( ... ) {}
  926. }
  927. void ZT_Node_setInterfaceAddresses(ZT_Node *node,const ZT_InterfaceAddress *addrs,unsigned int addrCount)
  928. {
  929. try {
  930. reinterpret_cast<ZeroTier::Node *>(node)->setInterfaceAddresses(addrs,addrCount);
  931. } catch ( ... ) {}
  932. }
  933. int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len)
  934. {
  935. try {
  936. return reinterpret_cast<ZeroTier::Node *>(node)->sendUserMessage(tptr,dest,typeId,data,len);
  937. } catch ( ... ) {
  938. return 0;
  939. }
  940. }
  941. void ZT_Node_setController(ZT_Node *node,void *networkControllerInstance)
  942. {
  943. try {
  944. reinterpret_cast<ZeroTier::Node *>(node)->setController(networkControllerInstance);
  945. } catch ( ... ) {}
  946. }
  947. enum ZT_ResultCode ZT_Node_setPhysicalPathConfiguration(ZT_Node *node,const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig)
  948. {
  949. try {
  950. return reinterpret_cast<ZeroTier::Node *>(node)->setPhysicalPathConfiguration(pathNetwork,pathConfig);
  951. } catch ( ... ) {
  952. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  953. }
  954. }
  955. void ZT_version(int *major,int *minor,int *revision)
  956. {
  957. if (major)
  958. *major = ZEROTIER_VERSION_MAJOR;
  959. if (minor)
  960. *minor = ZEROTIER_VERSION_MINOR;
  961. if (revision)
  962. *revision = ZEROTIER_VERSION_REVISION;
  963. }
  964. } // extern "C"