Node.cpp 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <stdarg.h>
  30. #include <string.h>
  31. #include <stdint.h>
  32. #include "../version.h"
  33. #include "Constants.hpp"
  34. #include "Node.hpp"
  35. #include "RuntimeEnvironment.hpp"
  36. #include "NetworkController.hpp"
  37. #include "Switch.hpp"
  38. #include "Multicaster.hpp"
  39. #include "Topology.hpp"
  40. #include "Buffer.hpp"
  41. #include "Packet.hpp"
  42. #include "Address.hpp"
  43. #include "Identity.hpp"
  44. #include "SelfAwareness.hpp"
  45. #include "Cluster.hpp"
  46. #include "DeferredPackets.hpp"
  47. const struct sockaddr_storage ZT_SOCKADDR_NULL = {0};
  48. namespace ZeroTier {
  49. /****************************************************************************/
  50. /* Public Node interface (C++, exposed via CAPI bindings) */
  51. /****************************************************************************/
  52. Node::Node(
  53. uint64_t now,
  54. void *uptr,
  55. ZT_DataStoreGetFunction dataStoreGetFunction,
  56. ZT_DataStorePutFunction dataStorePutFunction,
  57. ZT_WirePacketSendFunction wirePacketSendFunction,
  58. ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
  59. ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
  60. ZT_PathCheckFunction pathCheckFunction,
  61. ZT_EventCallback eventCallback) :
  62. _RR(this),
  63. RR(&_RR),
  64. _uPtr(uptr),
  65. _dataStoreGetFunction(dataStoreGetFunction),
  66. _dataStorePutFunction(dataStorePutFunction),
  67. _wirePacketSendFunction(wirePacketSendFunction),
  68. _virtualNetworkFrameFunction(virtualNetworkFrameFunction),
  69. _virtualNetworkConfigFunction(virtualNetworkConfigFunction),
  70. _pathCheckFunction(pathCheckFunction),
  71. _eventCallback(eventCallback),
  72. _networks(),
  73. _networks_m(),
  74. _prngStreamPtr(0),
  75. _now(now),
  76. _lastPingCheck(0),
  77. _lastHousekeepingRun(0)
  78. {
  79. _online = false;
  80. // Use Salsa20 alone as a high-quality non-crypto PRNG
  81. {
  82. char foo[32];
  83. Utils::getSecureRandom(foo,32);
  84. _prng.init(foo,256,foo);
  85. memset(_prngStream,0,sizeof(_prngStream));
  86. _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
  87. }
  88. {
  89. std::string idtmp(dataStoreGet("identity.secret"));
  90. if ((!idtmp.length())||(!RR->identity.fromString(idtmp))||(!RR->identity.hasPrivate())) {
  91. TRACE("identity.secret not found, generating...");
  92. RR->identity.generate();
  93. idtmp = RR->identity.toString(true);
  94. if (!dataStorePut("identity.secret",idtmp,true))
  95. throw std::runtime_error("unable to write identity.secret");
  96. }
  97. RR->publicIdentityStr = RR->identity.toString(false);
  98. RR->secretIdentityStr = RR->identity.toString(true);
  99. idtmp = dataStoreGet("identity.public");
  100. if (idtmp != RR->publicIdentityStr) {
  101. if (!dataStorePut("identity.public",RR->publicIdentityStr,false))
  102. throw std::runtime_error("unable to write identity.public");
  103. }
  104. }
  105. try {
  106. RR->sw = new Switch(RR);
  107. RR->mc = new Multicaster(RR);
  108. RR->topology = new Topology(RR);
  109. RR->sa = new SelfAwareness(RR);
  110. RR->dp = new DeferredPackets(RR);
  111. } catch ( ... ) {
  112. delete RR->dp;
  113. delete RR->sa;
  114. delete RR->topology;
  115. delete RR->mc;
  116. delete RR->sw;
  117. throw;
  118. }
  119. postEvent(ZT_EVENT_UP);
  120. }
  121. Node::~Node()
  122. {
  123. Mutex::Lock _l(_networks_m);
  124. _networks.clear(); // ensure that networks are destroyed before shutdow
  125. RR->dpEnabled = 0;
  126. delete RR->dp;
  127. delete RR->sa;
  128. delete RR->topology;
  129. delete RR->mc;
  130. delete RR->sw;
  131. #ifdef ZT_ENABLE_CLUSTER
  132. delete RR->cluster;
  133. #endif
  134. }
  135. ZT_ResultCode Node::processWirePacket(
  136. uint64_t now,
  137. const struct sockaddr_storage *localAddress,
  138. const struct sockaddr_storage *remoteAddress,
  139. const void *packetData,
  140. unsigned int packetLength,
  141. volatile uint64_t *nextBackgroundTaskDeadline)
  142. {
  143. _now = now;
  144. RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(localAddress)),*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
  145. return ZT_RESULT_OK;
  146. }
  147. ZT_ResultCode Node::processVirtualNetworkFrame(
  148. uint64_t now,
  149. uint64_t nwid,
  150. uint64_t sourceMac,
  151. uint64_t destMac,
  152. unsigned int etherType,
  153. unsigned int vlanId,
  154. const void *frameData,
  155. unsigned int frameLength,
  156. volatile uint64_t *nextBackgroundTaskDeadline)
  157. {
  158. _now = now;
  159. SharedPtr<Network> nw(this->network(nwid));
  160. if (nw) {
  161. RR->sw->onLocalEthernet(nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
  162. return ZT_RESULT_OK;
  163. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  164. }
  165. class _PingPeersThatNeedPing
  166. {
  167. public:
  168. _PingPeersThatNeedPing(const RuntimeEnvironment *renv,uint64_t now,const std::vector< std::pair<Address,InetAddress> > &relays) :
  169. lastReceiveFromUpstream(0),
  170. RR(renv),
  171. _now(now),
  172. _relays(relays),
  173. _world(RR->topology->world())
  174. {
  175. }
  176. uint64_t lastReceiveFromUpstream; // tracks last time we got a packet from an 'upstream' peer like a root or a relay
  177. inline void operator()(Topology &t,const SharedPtr<Peer> &p)
  178. {
  179. bool upstream = false;
  180. InetAddress stableEndpoint4,stableEndpoint6;
  181. // If this is a world root, pick (if possible) both an IPv4 and an IPv6 stable endpoint to use if link isn't currently alive.
  182. for(std::vector<World::Root>::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) {
  183. if (r->identity.address() == p->address()) {
  184. upstream = true;
  185. for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)r->stableEndpoints.size();++k) {
  186. const InetAddress &addr = r->stableEndpoints[ptr++ % r->stableEndpoints.size()];
  187. if (!stableEndpoint4) {
  188. if (addr.ss_family == AF_INET)
  189. stableEndpoint4 = addr;
  190. }
  191. if (!stableEndpoint6) {
  192. if (addr.ss_family == AF_INET6)
  193. stableEndpoint6 = addr;
  194. }
  195. }
  196. break;
  197. }
  198. }
  199. if (!upstream) {
  200. // If I am a root server, only ping other root servers -- roots don't ping "down"
  201. // since that would just be a waste of bandwidth and could potentially cause route
  202. // flapping in Cluster mode.
  203. if (RR->topology->amRoot())
  204. return;
  205. // Check for network preferred relays, also considered 'upstream' and thus always
  206. // pinged to keep links up. If they have stable addresses we will try them there.
  207. for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(_relays.begin());r!=_relays.end();++r) {
  208. if (r->first == p->address()) {
  209. if (r->second.ss_family == AF_INET)
  210. stableEndpoint4 = r->second;
  211. else if (r->second.ss_family == AF_INET6)
  212. stableEndpoint6 = r->second;
  213. upstream = true;
  214. break;
  215. }
  216. }
  217. }
  218. if (upstream) {
  219. // "Upstream" devices are roots and relays and get special treatment -- they stay alive
  220. // forever and we try to keep (if available) both IPv4 and IPv6 channels open to them.
  221. bool needToContactIndirect = true;
  222. if (p->doPingAndKeepalive(_now,AF_INET)) {
  223. needToContactIndirect = false;
  224. } else {
  225. if (stableEndpoint4) {
  226. needToContactIndirect = false;
  227. p->sendHELLO(InetAddress(),stableEndpoint4,_now);
  228. }
  229. }
  230. if (p->doPingAndKeepalive(_now,AF_INET6)) {
  231. needToContactIndirect = false;
  232. } else {
  233. if (stableEndpoint6) {
  234. needToContactIndirect = false;
  235. p->sendHELLO(InetAddress(),stableEndpoint6,_now);
  236. }
  237. }
  238. if (needToContactIndirect) {
  239. // If this is an upstream and we have no stable endpoint for either IPv4 or IPv6,
  240. // send a NOP indirectly if possible to see if we can get to this peer in any
  241. // way whatsoever. This will e.g. find network preferred relays that lack
  242. // stable endpoints by using root servers.
  243. Packet outp(p->address(),RR->identity.address(),Packet::VERB_NOP);
  244. RR->sw->send(outp,true,0);
  245. }
  246. lastReceiveFromUpstream = std::max(p->lastReceive(),lastReceiveFromUpstream);
  247. } else if (p->activelyTransferringFrames(_now)) {
  248. // Normal nodes get their preferred link kept alive if the node has generated frame traffic recently
  249. p->doPingAndKeepalive(_now,0);
  250. }
  251. }
  252. private:
  253. const RuntimeEnvironment *RR;
  254. uint64_t _now;
  255. const std::vector< std::pair<Address,InetAddress> > &_relays;
  256. World _world;
  257. };
  258. ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
  259. {
  260. _now = now;
  261. Mutex::Lock bl(_backgroundTasksLock);
  262. unsigned long timeUntilNextPingCheck = ZT_PING_CHECK_INVERVAL;
  263. const uint64_t timeSinceLastPingCheck = now - _lastPingCheck;
  264. if (timeSinceLastPingCheck >= ZT_PING_CHECK_INVERVAL) {
  265. try {
  266. _lastPingCheck = now;
  267. // Get relays and networks that need config without leaving the mutex locked
  268. std::vector< std::pair<Address,InetAddress> > networkRelays;
  269. std::vector< SharedPtr<Network> > needConfig;
  270. {
  271. Mutex::Lock _l(_networks_m);
  272. for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
  273. SharedPtr<NetworkConfig> nc(n->second->config2());
  274. if (((now - n->second->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!nc))
  275. needConfig.push_back(n->second);
  276. if (nc)
  277. networkRelays.insert(networkRelays.end(),nc->relays().begin(),nc->relays().end());
  278. }
  279. }
  280. // Request updated configuration for networks that need it
  281. for(std::vector< SharedPtr<Network> >::const_iterator n(needConfig.begin());n!=needConfig.end();++n)
  282. (*n)->requestConfiguration();
  283. // Do pings and keepalives
  284. _PingPeersThatNeedPing pfunc(RR,now,networkRelays);
  285. RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
  286. // Update online status, post status change as event
  287. const bool oldOnline = _online;
  288. _online = (((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT)||(RR->topology->amRoot()));
  289. if (oldOnline != _online)
  290. postEvent(_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
  291. } catch ( ... ) {
  292. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  293. }
  294. } else {
  295. timeUntilNextPingCheck -= (unsigned long)timeSinceLastPingCheck;
  296. }
  297. if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
  298. try {
  299. _lastHousekeepingRun = now;
  300. RR->topology->clean(now);
  301. RR->sa->clean(now);
  302. RR->mc->clean(now);
  303. } catch ( ... ) {
  304. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  305. }
  306. }
  307. try {
  308. #ifdef ZT_ENABLE_CLUSTER
  309. // If clustering is enabled we have to call cluster->doPeriodicTasks() very often, so we override normal timer deadline behavior
  310. if (RR->cluster) {
  311. RR->sw->doTimerTasks(now);
  312. RR->cluster->doPeriodicTasks();
  313. *nextBackgroundTaskDeadline = now + ZT_CLUSTER_PERIODIC_TASK_PERIOD; // this is really short so just tick at this rate
  314. } else {
  315. #endif
  316. *nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
  317. #ifdef ZT_ENABLE_CLUSTER
  318. }
  319. #endif
  320. } catch ( ... ) {
  321. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  322. }
  323. return ZT_RESULT_OK;
  324. }
  325. ZT_ResultCode Node::join(uint64_t nwid,void *uptr)
  326. {
  327. Mutex::Lock _l(_networks_m);
  328. SharedPtr<Network> nw = _network(nwid);
  329. if(!nw)
  330. _networks.push_back(std::pair< uint64_t,SharedPtr<Network> >(nwid,SharedPtr<Network>(new Network(RR,nwid,uptr))));
  331. std::sort(_networks.begin(),_networks.end()); // will sort by nwid since it's the first in a pair<>
  332. return ZT_RESULT_OK;
  333. }
  334. ZT_ResultCode Node::leave(uint64_t nwid,void **uptr)
  335. {
  336. std::vector< std::pair< uint64_t,SharedPtr<Network> > > newn;
  337. Mutex::Lock _l(_networks_m);
  338. for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
  339. if (n->first != nwid)
  340. newn.push_back(*n);
  341. else {
  342. if (uptr)
  343. *uptr = n->second->userPtr();
  344. n->second->destroy();
  345. }
  346. }
  347. _networks.swap(newn);
  348. return ZT_RESULT_OK;
  349. }
  350. ZT_ResultCode Node::multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  351. {
  352. SharedPtr<Network> nw(this->network(nwid));
  353. if (nw) {
  354. nw->multicastSubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
  355. return ZT_RESULT_OK;
  356. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  357. }
  358. ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  359. {
  360. SharedPtr<Network> nw(this->network(nwid));
  361. if (nw) {
  362. nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
  363. return ZT_RESULT_OK;
  364. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  365. }
  366. uint64_t Node::address() const
  367. {
  368. return RR->identity.address().toInt();
  369. }
  370. void Node::status(ZT_NodeStatus *status) const
  371. {
  372. status->address = RR->identity.address().toInt();
  373. status->worldId = RR->topology->worldId();
  374. status->worldTimestamp = RR->topology->worldTimestamp();
  375. status->publicIdentity = RR->publicIdentityStr.c_str();
  376. status->secretIdentity = RR->secretIdentityStr.c_str();
  377. status->online = _online ? 1 : 0;
  378. }
  379. ZT_PeerList *Node::peers() const
  380. {
  381. std::vector< std::pair< Address,SharedPtr<Peer> > > peers(RR->topology->allPeers());
  382. std::sort(peers.begin(),peers.end());
  383. char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
  384. if (!buf)
  385. return (ZT_PeerList *)0;
  386. ZT_PeerList *pl = (ZT_PeerList *)buf;
  387. pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
  388. pl->peerCount = 0;
  389. for(std::vector< std::pair< Address,SharedPtr<Peer> > >::iterator pi(peers.begin());pi!=peers.end();++pi) {
  390. ZT_Peer *p = &(pl->peers[pl->peerCount++]);
  391. p->address = pi->second->address().toInt();
  392. p->lastUnicastFrame = pi->second->lastUnicastFrame();
  393. p->lastMulticastFrame = pi->second->lastMulticastFrame();
  394. if (pi->second->remoteVersionKnown()) {
  395. p->versionMajor = pi->second->remoteVersionMajor();
  396. p->versionMinor = pi->second->remoteVersionMinor();
  397. p->versionRev = pi->second->remoteVersionRevision();
  398. } else {
  399. p->versionMajor = -1;
  400. p->versionMinor = -1;
  401. p->versionRev = -1;
  402. }
  403. p->latency = pi->second->latency();
  404. p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_ROOT : ZT_PEER_ROLE_LEAF;
  405. std::vector<Path> paths(pi->second->paths());
  406. Path *bestPath = pi->second->getBestPath(_now);
  407. p->pathCount = 0;
  408. for(std::vector<Path>::iterator path(paths.begin());path!=paths.end();++path) {
  409. memcpy(&(p->paths[p->pathCount].address),&(path->address()),sizeof(struct sockaddr_storage));
  410. p->paths[p->pathCount].lastSend = path->lastSend();
  411. p->paths[p->pathCount].lastReceive = path->lastReceived();
  412. p->paths[p->pathCount].active = path->active(_now) ? 1 : 0;
  413. p->paths[p->pathCount].preferred = ((bestPath)&&(*path == *bestPath)) ? 1 : 0;
  414. ++p->pathCount;
  415. }
  416. }
  417. return pl;
  418. }
  419. ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
  420. {
  421. Mutex::Lock _l(_networks_m);
  422. SharedPtr<Network> nw = _network(nwid);
  423. if(nw) {
  424. ZT_VirtualNetworkConfig *nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
  425. nw->externalConfig(nc);
  426. return nc;
  427. }
  428. return (ZT_VirtualNetworkConfig *)0;
  429. }
  430. ZT_VirtualNetworkList *Node::networks() const
  431. {
  432. Mutex::Lock _l(_networks_m);
  433. char *buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * _networks.size()));
  434. if (!buf)
  435. return (ZT_VirtualNetworkList *)0;
  436. ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
  437. nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
  438. nl->networkCount = 0;
  439. for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n)
  440. n->second->externalConfig(&(nl->networks[nl->networkCount++]));
  441. return nl;
  442. }
  443. void Node::freeQueryResult(void *qr)
  444. {
  445. if (qr)
  446. ::free(qr);
  447. }
  448. int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr)
  449. {
  450. if (Path::isAddressValidForPath(*(reinterpret_cast<const InetAddress *>(addr)))) {
  451. Mutex::Lock _l(_directPaths_m);
  452. _directPaths.push_back(*(reinterpret_cast<const InetAddress *>(addr)));
  453. std::sort(_directPaths.begin(),_directPaths.end());
  454. _directPaths.erase(std::unique(_directPaths.begin(),_directPaths.end()),_directPaths.end());
  455. return 1;
  456. }
  457. return 0;
  458. }
  459. void Node::clearLocalInterfaceAddresses()
  460. {
  461. Mutex::Lock _l(_directPaths_m);
  462. _directPaths.clear();
  463. }
  464. void Node::setNetconfMaster(void *networkControllerInstance)
  465. {
  466. RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
  467. }
  468. ZT_ResultCode Node::circuitTestBegin(ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *))
  469. {
  470. if (test->hopCount > 0) {
  471. try {
  472. Packet outp(Address(),RR->identity.address(),Packet::VERB_CIRCUIT_TEST);
  473. RR->identity.address().appendTo(outp);
  474. outp.append((uint16_t)((test->reportAtEveryHop != 0) ? 0x03 : 0x02));
  475. outp.append((uint64_t)test->timestamp);
  476. outp.append((uint64_t)test->testId);
  477. outp.append((uint16_t)0); // originator credential length, updated later
  478. if (test->credentialNetworkId) {
  479. outp.append((uint8_t)0x01);
  480. outp.append((uint64_t)test->credentialNetworkId);
  481. outp.setAt<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 23,(uint16_t)9);
  482. }
  483. outp.append((uint16_t)0);
  484. C25519::Signature sig(RR->identity.sign(reinterpret_cast<const char *>(outp.data()) + ZT_PACKET_IDX_PAYLOAD,outp.size() - ZT_PACKET_IDX_PAYLOAD));
  485. outp.append((uint16_t)sig.size());
  486. outp.append(sig.data,(unsigned int)sig.size());
  487. outp.append((uint16_t)0); // originator doesn't need an extra credential, since it's the originator
  488. for(unsigned int h=1;h<test->hopCount;++h) {
  489. outp.append((uint8_t)0);
  490. outp.append((uint8_t)(test->hops[h].breadth & 0xff));
  491. for(unsigned int a=0;a<test->hops[h].breadth;++a)
  492. Address(test->hops[h].addresses[a]).appendTo(outp);
  493. }
  494. for(unsigned int a=0;a<test->hops[0].breadth;++a) {
  495. outp.newInitializationVector();
  496. outp.setDestination(Address(test->hops[0].addresses[a]));
  497. RR->sw->send(outp,true,0);
  498. }
  499. } catch ( ... ) {
  500. return ZT_RESULT_FATAL_ERROR_INTERNAL; // probably indicates FIFO too big for packet
  501. }
  502. }
  503. {
  504. test->_internalPtr = reinterpret_cast<void *>(reportCallback);
  505. Mutex::Lock _l(_circuitTests_m);
  506. if (std::find(_circuitTests.begin(),_circuitTests.end(),test) == _circuitTests.end())
  507. _circuitTests.push_back(test);
  508. }
  509. return ZT_RESULT_OK;
  510. }
  511. void Node::circuitTestEnd(ZT_CircuitTest *test)
  512. {
  513. Mutex::Lock _l(_circuitTests_m);
  514. for(;;) {
  515. std::vector< ZT_CircuitTest * >::iterator ct(std::find(_circuitTests.begin(),_circuitTests.end(),test));
  516. if (ct == _circuitTests.end())
  517. break;
  518. else _circuitTests.erase(ct);
  519. }
  520. }
  521. ZT_ResultCode Node::clusterInit(
  522. unsigned int myId,
  523. const struct sockaddr_storage *zeroTierPhysicalEndpoints,
  524. unsigned int numZeroTierPhysicalEndpoints,
  525. int x,
  526. int y,
  527. int z,
  528. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  529. void *sendFunctionArg,
  530. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  531. void *addressToLocationFunctionArg)
  532. {
  533. #ifdef ZT_ENABLE_CLUSTER
  534. if (RR->cluster)
  535. return ZT_RESULT_ERROR_BAD_PARAMETER;
  536. std::vector<InetAddress> eps;
  537. for(unsigned int i=0;i<numZeroTierPhysicalEndpoints;++i)
  538. eps.push_back(InetAddress(zeroTierPhysicalEndpoints[i]));
  539. std::sort(eps.begin(),eps.end());
  540. RR->cluster = new Cluster(RR,myId,eps,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
  541. return ZT_RESULT_OK;
  542. #else
  543. return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
  544. #endif
  545. }
  546. ZT_ResultCode Node::clusterAddMember(unsigned int memberId)
  547. {
  548. #ifdef ZT_ENABLE_CLUSTER
  549. if (!RR->cluster)
  550. return ZT_RESULT_ERROR_BAD_PARAMETER;
  551. RR->cluster->addMember((uint16_t)memberId);
  552. return ZT_RESULT_OK;
  553. #else
  554. return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
  555. #endif
  556. }
  557. void Node::clusterRemoveMember(unsigned int memberId)
  558. {
  559. #ifdef ZT_ENABLE_CLUSTER
  560. if (RR->cluster)
  561. RR->cluster->removeMember((uint16_t)memberId);
  562. #endif
  563. }
  564. void Node::clusterHandleIncomingMessage(const void *msg,unsigned int len)
  565. {
  566. #ifdef ZT_ENABLE_CLUSTER
  567. if (RR->cluster)
  568. RR->cluster->handleIncomingStateMessage(msg,len);
  569. #endif
  570. }
  571. void Node::clusterStatus(ZT_ClusterStatus *cs)
  572. {
  573. if (!cs)
  574. return;
  575. #ifdef ZT_ENABLE_CLUSTER
  576. if (RR->cluster)
  577. RR->cluster->status(*cs);
  578. else
  579. #endif
  580. memset(cs,0,sizeof(ZT_ClusterStatus));
  581. }
  582. void Node::backgroundThreadMain()
  583. {
  584. ++RR->dpEnabled;
  585. for(;;) {
  586. try {
  587. if (RR->dp->process() < 0)
  588. break;
  589. } catch ( ... ) {} // sanity check -- should not throw
  590. }
  591. --RR->dpEnabled;
  592. }
  593. /****************************************************************************/
  594. /* Node methods used only within node/ */
  595. /****************************************************************************/
  596. std::string Node::dataStoreGet(const char *name)
  597. {
  598. char buf[1024];
  599. std::string r;
  600. unsigned long olen = 0;
  601. do {
  602. long n = _dataStoreGetFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,buf,sizeof(buf),(unsigned long)r.length(),&olen);
  603. if (n <= 0)
  604. return std::string();
  605. r.append(buf,n);
  606. } while (r.length() < olen);
  607. return r;
  608. }
  609. bool Node::shouldUsePathForZeroTierTraffic(const InetAddress &localAddress,const InetAddress &remoteAddress)
  610. {
  611. {
  612. Mutex::Lock _l(_networks_m);
  613. for(std::vector< std::pair< uint64_t, SharedPtr<Network> > >::const_iterator i=_networks.begin();i!=_networks.end();++i) {
  614. SharedPtr<NetworkConfig> nc(i->second->config2());
  615. if (nc) {
  616. for(std::vector<InetAddress>::const_iterator a(nc->staticIps().begin());a!=nc->staticIps().end();++a) {
  617. if (a->containsAddress(remoteAddress)) {
  618. return false;
  619. }
  620. }
  621. }
  622. }
  623. }
  624. if (_pathCheckFunction)
  625. return (_pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,reinterpret_cast<const struct sockaddr_storage *>(&localAddress),reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0);
  626. else return true;
  627. }
  628. #ifdef ZT_TRACE
  629. void Node::postTrace(const char *module,unsigned int line,const char *fmt,...)
  630. {
  631. static Mutex traceLock;
  632. va_list ap;
  633. char tmp1[1024],tmp2[1024],tmp3[256];
  634. Mutex::Lock _l(traceLock);
  635. time_t now = (time_t)(_now / 1000ULL);
  636. #ifdef __WINDOWS__
  637. ctime_s(tmp3,sizeof(tmp3),&now);
  638. char *nowstr = tmp3;
  639. #else
  640. char *nowstr = ctime_r(&now,tmp3);
  641. #endif
  642. unsigned long nowstrlen = (unsigned long)strlen(nowstr);
  643. if (nowstr[nowstrlen-1] == '\n')
  644. nowstr[--nowstrlen] = (char)0;
  645. if (nowstr[nowstrlen-1] == '\r')
  646. nowstr[--nowstrlen] = (char)0;
  647. va_start(ap,fmt);
  648. vsnprintf(tmp2,sizeof(tmp2),fmt,ap);
  649. va_end(ap);
  650. tmp2[sizeof(tmp2)-1] = (char)0;
  651. Utils::snprintf(tmp1,sizeof(tmp1),"[%s] %s:%u %s",nowstr,module,line,tmp2);
  652. postEvent(ZT_EVENT_TRACE,tmp1);
  653. }
  654. #endif // ZT_TRACE
  655. uint64_t Node::prng()
  656. {
  657. unsigned int p = (++_prngStreamPtr % (sizeof(_prngStream) / sizeof(uint64_t)));
  658. if (!p)
  659. _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
  660. return _prngStream[p];
  661. }
  662. void Node::postCircuitTestReport(const ZT_CircuitTestReport *report)
  663. {
  664. std::vector< ZT_CircuitTest * > toNotify;
  665. {
  666. Mutex::Lock _l(_circuitTests_m);
  667. for(std::vector< ZT_CircuitTest * >::iterator i(_circuitTests.begin());i!=_circuitTests.end();++i) {
  668. if ((*i)->testId == report->testId)
  669. toNotify.push_back(*i);
  670. }
  671. }
  672. for(std::vector< ZT_CircuitTest * >::iterator i(toNotify.begin());i!=toNotify.end();++i)
  673. (reinterpret_cast<void (*)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *)>((*i)->_internalPtr))(reinterpret_cast<ZT_Node *>(this),*i,report);
  674. }
  675. } // namespace ZeroTier
  676. /****************************************************************************/
  677. /* CAPI bindings */
  678. /****************************************************************************/
  679. extern "C" {
  680. enum ZT_ResultCode ZT_Node_new(
  681. ZT_Node **node,
  682. void *uptr,
  683. uint64_t now,
  684. ZT_DataStoreGetFunction dataStoreGetFunction,
  685. ZT_DataStorePutFunction dataStorePutFunction,
  686. ZT_WirePacketSendFunction wirePacketSendFunction,
  687. ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
  688. ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
  689. ZT_PathCheckFunction pathCheckFunction,
  690. ZT_EventCallback eventCallback)
  691. {
  692. *node = (ZT_Node *)0;
  693. try {
  694. *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,pathCheckFunction,eventCallback));
  695. return ZT_RESULT_OK;
  696. } catch (std::bad_alloc &exc) {
  697. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  698. } catch (std::runtime_error &exc) {
  699. return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
  700. } catch ( ... ) {
  701. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  702. }
  703. }
  704. void ZT_Node_delete(ZT_Node *node)
  705. {
  706. try {
  707. delete (reinterpret_cast<ZeroTier::Node *>(node));
  708. } catch ( ... ) {}
  709. }
  710. enum ZT_ResultCode ZT_Node_processWirePacket(
  711. ZT_Node *node,
  712. uint64_t now,
  713. const struct sockaddr_storage *localAddress,
  714. const struct sockaddr_storage *remoteAddress,
  715. const void *packetData,
  716. unsigned int packetLength,
  717. volatile uint64_t *nextBackgroundTaskDeadline)
  718. {
  719. try {
  720. return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,localAddress,remoteAddress,packetData,packetLength,nextBackgroundTaskDeadline);
  721. } catch (std::bad_alloc &exc) {
  722. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  723. } catch ( ... ) {
  724. return ZT_RESULT_OK; // "OK" since invalid packets are simply dropped, but the system is still up
  725. }
  726. }
  727. enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
  728. ZT_Node *node,
  729. uint64_t now,
  730. uint64_t nwid,
  731. uint64_t sourceMac,
  732. uint64_t destMac,
  733. unsigned int etherType,
  734. unsigned int vlanId,
  735. const void *frameData,
  736. unsigned int frameLength,
  737. volatile uint64_t *nextBackgroundTaskDeadline)
  738. {
  739. try {
  740. return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(now,nwid,sourceMac,destMac,etherType,vlanId,frameData,frameLength,nextBackgroundTaskDeadline);
  741. } catch (std::bad_alloc &exc) {
  742. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  743. } catch ( ... ) {
  744. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  745. }
  746. }
  747. enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
  748. {
  749. try {
  750. return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(now,nextBackgroundTaskDeadline);
  751. } catch (std::bad_alloc &exc) {
  752. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  753. } catch ( ... ) {
  754. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  755. }
  756. }
  757. enum ZT_ResultCode ZT_Node_join(ZT_Node *node,uint64_t nwid,void *uptr)
  758. {
  759. try {
  760. return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid,uptr);
  761. } catch (std::bad_alloc &exc) {
  762. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  763. } catch ( ... ) {
  764. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  765. }
  766. }
  767. enum ZT_ResultCode ZT_Node_leave(ZT_Node *node,uint64_t nwid,void **uptr)
  768. {
  769. try {
  770. return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid,uptr);
  771. } catch (std::bad_alloc &exc) {
  772. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  773. } catch ( ... ) {
  774. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  775. }
  776. }
  777. enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  778. {
  779. try {
  780. return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(nwid,multicastGroup,multicastAdi);
  781. } catch (std::bad_alloc &exc) {
  782. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  783. } catch ( ... ) {
  784. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  785. }
  786. }
  787. enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  788. {
  789. try {
  790. return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(nwid,multicastGroup,multicastAdi);
  791. } catch (std::bad_alloc &exc) {
  792. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  793. } catch ( ... ) {
  794. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  795. }
  796. }
  797. uint64_t ZT_Node_address(ZT_Node *node)
  798. {
  799. return reinterpret_cast<ZeroTier::Node *>(node)->address();
  800. }
  801. void ZT_Node_status(ZT_Node *node,ZT_NodeStatus *status)
  802. {
  803. try {
  804. reinterpret_cast<ZeroTier::Node *>(node)->status(status);
  805. } catch ( ... ) {}
  806. }
  807. ZT_PeerList *ZT_Node_peers(ZT_Node *node)
  808. {
  809. try {
  810. return reinterpret_cast<ZeroTier::Node *>(node)->peers();
  811. } catch ( ... ) {
  812. return (ZT_PeerList *)0;
  813. }
  814. }
  815. ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node,uint64_t nwid)
  816. {
  817. try {
  818. return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
  819. } catch ( ... ) {
  820. return (ZT_VirtualNetworkConfig *)0;
  821. }
  822. }
  823. ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
  824. {
  825. try {
  826. return reinterpret_cast<ZeroTier::Node *>(node)->networks();
  827. } catch ( ... ) {
  828. return (ZT_VirtualNetworkList *)0;
  829. }
  830. }
  831. void ZT_Node_freeQueryResult(ZT_Node *node,void *qr)
  832. {
  833. try {
  834. reinterpret_cast<ZeroTier::Node *>(node)->freeQueryResult(qr);
  835. } catch ( ... ) {}
  836. }
  837. int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr)
  838. {
  839. try {
  840. return reinterpret_cast<ZeroTier::Node *>(node)->addLocalInterfaceAddress(addr);
  841. } catch ( ... ) {
  842. return 0;
  843. }
  844. }
  845. void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node)
  846. {
  847. try {
  848. reinterpret_cast<ZeroTier::Node *>(node)->clearLocalInterfaceAddresses();
  849. } catch ( ... ) {}
  850. }
  851. void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
  852. {
  853. try {
  854. reinterpret_cast<ZeroTier::Node *>(node)->setNetconfMaster(networkControllerInstance);
  855. } catch ( ... ) {}
  856. }
  857. enum ZT_ResultCode ZT_Node_circuitTestBegin(ZT_Node *node,ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *))
  858. {
  859. try {
  860. return reinterpret_cast<ZeroTier::Node *>(node)->circuitTestBegin(test,reportCallback);
  861. } catch ( ... ) {
  862. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  863. }
  864. }
  865. void ZT_Node_circuitTestEnd(ZT_Node *node,ZT_CircuitTest *test)
  866. {
  867. try {
  868. reinterpret_cast<ZeroTier::Node *>(node)->circuitTestEnd(test);
  869. } catch ( ... ) {}
  870. }
  871. enum ZT_ResultCode ZT_Node_clusterInit(
  872. ZT_Node *node,
  873. unsigned int myId,
  874. const struct sockaddr_storage *zeroTierPhysicalEndpoints,
  875. unsigned int numZeroTierPhysicalEndpoints,
  876. int x,
  877. int y,
  878. int z,
  879. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  880. void *sendFunctionArg,
  881. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  882. void *addressToLocationFunctionArg)
  883. {
  884. try {
  885. return reinterpret_cast<ZeroTier::Node *>(node)->clusterInit(myId,zeroTierPhysicalEndpoints,numZeroTierPhysicalEndpoints,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
  886. } catch ( ... ) {
  887. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  888. }
  889. }
  890. enum ZT_ResultCode ZT_Node_clusterAddMember(ZT_Node *node,unsigned int memberId)
  891. {
  892. try {
  893. return reinterpret_cast<ZeroTier::Node *>(node)->clusterAddMember(memberId);
  894. } catch ( ... ) {
  895. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  896. }
  897. }
  898. void ZT_Node_clusterRemoveMember(ZT_Node *node,unsigned int memberId)
  899. {
  900. try {
  901. reinterpret_cast<ZeroTier::Node *>(node)->clusterRemoveMember(memberId);
  902. } catch ( ... ) {}
  903. }
  904. void ZT_Node_clusterHandleIncomingMessage(ZT_Node *node,const void *msg,unsigned int len)
  905. {
  906. try {
  907. reinterpret_cast<ZeroTier::Node *>(node)->clusterHandleIncomingMessage(msg,len);
  908. } catch ( ... ) {}
  909. }
  910. void ZT_Node_clusterStatus(ZT_Node *node,ZT_ClusterStatus *cs)
  911. {
  912. try {
  913. reinterpret_cast<ZeroTier::Node *>(node)->clusterStatus(cs);
  914. } catch ( ... ) {}
  915. }
  916. void ZT_Node_backgroundThreadMain(ZT_Node *node)
  917. {
  918. try {
  919. reinterpret_cast<ZeroTier::Node *>(node)->backgroundThreadMain();
  920. } catch ( ... ) {}
  921. }
  922. void ZT_version(int *major,int *minor,int *revision,unsigned long *featureFlags)
  923. {
  924. if (major) *major = ZEROTIER_ONE_VERSION_MAJOR;
  925. if (minor) *minor = ZEROTIER_ONE_VERSION_MINOR;
  926. if (revision) *revision = ZEROTIER_ONE_VERSION_REVISION;
  927. if (featureFlags) {
  928. *featureFlags = (
  929. ZT_FEATURE_FLAG_THREAD_SAFE
  930. );
  931. }
  932. }
  933. } // extern "C"