Node.cpp 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <stdarg.h>
  30. #include <string.h>
  31. #include <stdint.h>
  32. #include "../version.h"
  33. #include "Constants.hpp"
  34. #include "Node.hpp"
  35. #include "RuntimeEnvironment.hpp"
  36. #include "NetworkController.hpp"
  37. #include "Switch.hpp"
  38. #include "Multicaster.hpp"
  39. #include "Topology.hpp"
  40. #include "Buffer.hpp"
  41. #include "Packet.hpp"
  42. #include "Address.hpp"
  43. #include "Identity.hpp"
  44. #include "SelfAwareness.hpp"
  45. #include "Cluster.hpp"
  46. #include "DeferredPackets.hpp"
  47. const struct sockaddr_storage ZT_SOCKADDR_NULL = {0};
  48. namespace ZeroTier {
  49. /****************************************************************************/
  50. /* Public Node interface (C++, exposed via CAPI bindings) */
  51. /****************************************************************************/
  52. Node::Node(
  53. uint64_t now,
  54. void *uptr,
  55. ZT_DataStoreGetFunction dataStoreGetFunction,
  56. ZT_DataStorePutFunction dataStorePutFunction,
  57. ZT_WirePacketSendFunction wirePacketSendFunction,
  58. ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
  59. ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
  60. ZT_PathCheckFunction pathCheckFunction,
  61. ZT_EventCallback eventCallback) :
  62. _RR(this),
  63. RR(&_RR),
  64. _uPtr(uptr),
  65. _dataStoreGetFunction(dataStoreGetFunction),
  66. _dataStorePutFunction(dataStorePutFunction),
  67. _wirePacketSendFunction(wirePacketSendFunction),
  68. _virtualNetworkFrameFunction(virtualNetworkFrameFunction),
  69. _virtualNetworkConfigFunction(virtualNetworkConfigFunction),
  70. _pathCheckFunction(pathCheckFunction),
  71. _eventCallback(eventCallback),
  72. _networks(),
  73. _networks_m(),
  74. _prngStreamPtr(0),
  75. _now(now),
  76. _lastPingCheck(0),
  77. _lastHousekeepingRun(0)
  78. {
  79. _online = false;
  80. // Use Salsa20 alone as a high-quality non-crypto PRNG
  81. {
  82. char foo[32];
  83. Utils::getSecureRandom(foo,32);
  84. _prng.init(foo,256,foo);
  85. memset(_prngStream,0,sizeof(_prngStream));
  86. _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
  87. }
  88. {
  89. std::string idtmp(dataStoreGet("identity.secret"));
  90. if ((!idtmp.length())||(!RR->identity.fromString(idtmp))||(!RR->identity.hasPrivate())) {
  91. TRACE("identity.secret not found, generating...");
  92. RR->identity.generate();
  93. idtmp = RR->identity.toString(true);
  94. if (!dataStorePut("identity.secret",idtmp,true))
  95. throw std::runtime_error("unable to write identity.secret");
  96. }
  97. RR->publicIdentityStr = RR->identity.toString(false);
  98. RR->secretIdentityStr = RR->identity.toString(true);
  99. idtmp = dataStoreGet("identity.public");
  100. if (idtmp != RR->publicIdentityStr) {
  101. if (!dataStorePut("identity.public",RR->publicIdentityStr,false))
  102. throw std::runtime_error("unable to write identity.public");
  103. }
  104. }
  105. try {
  106. RR->sw = new Switch(RR);
  107. RR->mc = new Multicaster(RR);
  108. RR->topology = new Topology(RR);
  109. RR->sa = new SelfAwareness(RR);
  110. RR->dp = new DeferredPackets(RR);
  111. } catch ( ... ) {
  112. delete RR->dp;
  113. delete RR->sa;
  114. delete RR->topology;
  115. delete RR->mc;
  116. delete RR->sw;
  117. throw;
  118. }
  119. postEvent(ZT_EVENT_UP);
  120. }
  121. Node::~Node()
  122. {
  123. Mutex::Lock _l(_networks_m);
  124. _networks.clear(); // ensure that networks are destroyed before shutdow
  125. RR->dpEnabled = 0;
  126. delete RR->dp;
  127. delete RR->sa;
  128. delete RR->topology;
  129. delete RR->mc;
  130. delete RR->sw;
  131. #ifdef ZT_ENABLE_CLUSTER
  132. delete RR->cluster;
  133. #endif
  134. }
  135. ZT_ResultCode Node::processWirePacket(
  136. uint64_t now,
  137. const struct sockaddr_storage *localAddress,
  138. const struct sockaddr_storage *remoteAddress,
  139. const void *packetData,
  140. unsigned int packetLength,
  141. volatile uint64_t *nextBackgroundTaskDeadline)
  142. {
  143. _now = now;
  144. RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(localAddress)),*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
  145. return ZT_RESULT_OK;
  146. }
  147. ZT_ResultCode Node::processVirtualNetworkFrame(
  148. uint64_t now,
  149. uint64_t nwid,
  150. uint64_t sourceMac,
  151. uint64_t destMac,
  152. unsigned int etherType,
  153. unsigned int vlanId,
  154. const void *frameData,
  155. unsigned int frameLength,
  156. volatile uint64_t *nextBackgroundTaskDeadline)
  157. {
  158. _now = now;
  159. SharedPtr<Network> nw(this->network(nwid));
  160. if (nw) {
  161. RR->sw->onLocalEthernet(nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
  162. return ZT_RESULT_OK;
  163. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  164. }
  165. class _PingPeersThatNeedPing
  166. {
  167. public:
  168. _PingPeersThatNeedPing(const RuntimeEnvironment *renv,uint64_t now,const std::vector< std::pair<Address,InetAddress> > &relays) :
  169. lastReceiveFromUpstream(0),
  170. RR(renv),
  171. _now(now),
  172. _relays(relays),
  173. _world(RR->topology->world())
  174. {
  175. }
  176. uint64_t lastReceiveFromUpstream; // tracks last time we got a packet from an 'upstream' peer like a root or a relay
  177. inline void operator()(Topology &t,const SharedPtr<Peer> &p)
  178. {
  179. bool upstream = false;
  180. InetAddress stableEndpoint4,stableEndpoint6;
  181. // If this is a world root, pick (if possible) both an IPv4 and an IPv6 stable endpoint to use if link isn't currently alive.
  182. for(std::vector<World::Root>::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) {
  183. if (r->identity.address() == p->address()) {
  184. upstream = true;
  185. for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)r->stableEndpoints.size();++k) {
  186. const InetAddress &addr = r->stableEndpoints[ptr++ % r->stableEndpoints.size()];
  187. if (!stableEndpoint4) {
  188. if (addr.ss_family == AF_INET)
  189. stableEndpoint4 = addr;
  190. }
  191. if (!stableEndpoint6) {
  192. if (addr.ss_family == AF_INET6)
  193. stableEndpoint6 = addr;
  194. }
  195. }
  196. break;
  197. }
  198. }
  199. if (!upstream) {
  200. // If I am a root server, only ping other root servers -- roots don't ping "down"
  201. // since that would just be a waste of bandwidth and could potentially cause route
  202. // flapping in Cluster mode.
  203. if (RR->topology->amRoot())
  204. return;
  205. // Check for network preferred relays, also considered 'upstream' and thus always
  206. // pinged to keep links up. If they have stable addresses we will try them there.
  207. for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(_relays.begin());r!=_relays.end();++r) {
  208. if (r->first == p->address()) {
  209. if (r->second.ss_family == AF_INET)
  210. stableEndpoint4 = r->second;
  211. else if (r->second.ss_family == AF_INET6)
  212. stableEndpoint6 = r->second;
  213. upstream = true;
  214. break;
  215. }
  216. }
  217. }
  218. if (upstream) {
  219. // "Upstream" devices are roots and relays and get special treatment -- they stay alive
  220. // forever and we try to keep (if available) both IPv4 and IPv6 channels open to them.
  221. bool needToContactIndirect = true;
  222. if (p->doPingAndKeepalive(_now,AF_INET)) {
  223. needToContactIndirect = false;
  224. } else {
  225. if (stableEndpoint4) {
  226. needToContactIndirect = false;
  227. p->sendHELLO(InetAddress(),stableEndpoint4,_now);
  228. }
  229. }
  230. if (p->doPingAndKeepalive(_now,AF_INET6)) {
  231. needToContactIndirect = false;
  232. } else {
  233. if (stableEndpoint6) {
  234. needToContactIndirect = false;
  235. p->sendHELLO(InetAddress(),stableEndpoint6,_now);
  236. }
  237. }
  238. if (needToContactIndirect) {
  239. // If this is an upstream and we have no stable endpoint for either IPv4 or IPv6,
  240. // send a NOP indirectly if possible to see if we can get to this peer in any
  241. // way whatsoever. This will e.g. find network preferred relays that lack
  242. // stable endpoints by using root servers.
  243. Packet outp(p->address(),RR->identity.address(),Packet::VERB_NOP);
  244. RR->sw->send(outp,true,0);
  245. }
  246. lastReceiveFromUpstream = std::max(p->lastReceive(),lastReceiveFromUpstream);
  247. } else if (p->activelyTransferringFrames(_now)) {
  248. // Normal nodes get their preferred link kept alive if the node has generated frame traffic recently
  249. p->doPingAndKeepalive(_now,0);
  250. }
  251. }
  252. private:
  253. const RuntimeEnvironment *RR;
  254. uint64_t _now;
  255. const std::vector< std::pair<Address,InetAddress> > &_relays;
  256. World _world;
  257. };
  258. ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
  259. {
  260. _now = now;
  261. Mutex::Lock bl(_backgroundTasksLock);
  262. unsigned long timeUntilNextPingCheck = ZT_PING_CHECK_INVERVAL;
  263. const uint64_t timeSinceLastPingCheck = now - _lastPingCheck;
  264. if (timeSinceLastPingCheck >= ZT_PING_CHECK_INVERVAL) {
  265. try {
  266. _lastPingCheck = now;
  267. // Get relays and networks that need config without leaving the mutex locked
  268. std::vector< std::pair<Address,InetAddress> > networkRelays;
  269. std::vector< SharedPtr<Network> > needConfig;
  270. {
  271. Mutex::Lock _l(_networks_m);
  272. for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
  273. SharedPtr<NetworkConfig> nc(n->second->config2());
  274. if (((now - n->second->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!nc))
  275. needConfig.push_back(n->second);
  276. if (nc)
  277. networkRelays.insert(networkRelays.end(),nc->relays().begin(),nc->relays().end());
  278. }
  279. }
  280. // Request updated configuration for networks that need it
  281. for(std::vector< SharedPtr<Network> >::const_iterator n(needConfig.begin());n!=needConfig.end();++n)
  282. (*n)->requestConfiguration();
  283. // Do pings and keepalives
  284. _PingPeersThatNeedPing pfunc(RR,now,networkRelays);
  285. RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
  286. // Update online status, post status change as event
  287. const bool oldOnline = _online;
  288. _online = (((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT)||(RR->topology->amRoot()));
  289. if (oldOnline != _online)
  290. postEvent(_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
  291. } catch ( ... ) {
  292. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  293. }
  294. } else {
  295. timeUntilNextPingCheck -= (unsigned long)timeSinceLastPingCheck;
  296. }
  297. if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
  298. try {
  299. _lastHousekeepingRun = now;
  300. RR->topology->clean(now);
  301. RR->sa->clean(now);
  302. RR->mc->clean(now);
  303. } catch ( ... ) {
  304. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  305. }
  306. }
  307. try {
  308. #ifdef ZT_ENABLE_CLUSTER
  309. // If clustering is enabled we have to call cluster->doPeriodicTasks() very often, so we override normal timer deadline behavior
  310. if (RR->cluster) {
  311. RR->sw->doTimerTasks(now);
  312. RR->cluster->doPeriodicTasks();
  313. *nextBackgroundTaskDeadline = now + ZT_CLUSTER_PERIODIC_TASK_PERIOD; // this is really short so just tick at this rate
  314. } else {
  315. #endif
  316. *nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
  317. #ifdef ZT_ENABLE_CLUSTER
  318. }
  319. #endif
  320. } catch ( ... ) {
  321. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  322. }
  323. return ZT_RESULT_OK;
  324. }
  325. ZT_ResultCode Node::join(uint64_t nwid)
  326. {
  327. Mutex::Lock _l(_networks_m);
  328. SharedPtr<Network> nw = _network(nwid);
  329. if(!nw)
  330. _networks.push_back(std::pair< uint64_t,SharedPtr<Network> >(nwid,SharedPtr<Network>(new Network(RR,nwid))));
  331. std::sort(_networks.begin(),_networks.end()); // will sort by nwid since it's the first in a pair<>
  332. return ZT_RESULT_OK;
  333. }
  334. ZT_ResultCode Node::leave(uint64_t nwid)
  335. {
  336. std::vector< std::pair< uint64_t,SharedPtr<Network> > > newn;
  337. Mutex::Lock _l(_networks_m);
  338. for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
  339. if (n->first != nwid)
  340. newn.push_back(*n);
  341. else n->second->destroy();
  342. }
  343. _networks.swap(newn);
  344. return ZT_RESULT_OK;
  345. }
  346. ZT_ResultCode Node::multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  347. {
  348. SharedPtr<Network> nw(this->network(nwid));
  349. if (nw) {
  350. nw->multicastSubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
  351. return ZT_RESULT_OK;
  352. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  353. }
  354. ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  355. {
  356. SharedPtr<Network> nw(this->network(nwid));
  357. if (nw) {
  358. nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
  359. return ZT_RESULT_OK;
  360. } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
  361. }
  362. uint64_t Node::address() const
  363. {
  364. return RR->identity.address().toInt();
  365. }
  366. void Node::status(ZT_NodeStatus *status) const
  367. {
  368. status->address = RR->identity.address().toInt();
  369. status->worldId = RR->topology->worldId();
  370. status->worldTimestamp = RR->topology->worldTimestamp();
  371. status->publicIdentity = RR->publicIdentityStr.c_str();
  372. status->secretIdentity = RR->secretIdentityStr.c_str();
  373. status->online = _online ? 1 : 0;
  374. }
  375. ZT_PeerList *Node::peers() const
  376. {
  377. std::vector< std::pair< Address,SharedPtr<Peer> > > peers(RR->topology->allPeers());
  378. std::sort(peers.begin(),peers.end());
  379. char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
  380. if (!buf)
  381. return (ZT_PeerList *)0;
  382. ZT_PeerList *pl = (ZT_PeerList *)buf;
  383. pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
  384. pl->peerCount = 0;
  385. for(std::vector< std::pair< Address,SharedPtr<Peer> > >::iterator pi(peers.begin());pi!=peers.end();++pi) {
  386. ZT_Peer *p = &(pl->peers[pl->peerCount++]);
  387. p->address = pi->second->address().toInt();
  388. p->lastUnicastFrame = pi->second->lastUnicastFrame();
  389. p->lastMulticastFrame = pi->second->lastMulticastFrame();
  390. if (pi->second->remoteVersionKnown()) {
  391. p->versionMajor = pi->second->remoteVersionMajor();
  392. p->versionMinor = pi->second->remoteVersionMinor();
  393. p->versionRev = pi->second->remoteVersionRevision();
  394. } else {
  395. p->versionMajor = -1;
  396. p->versionMinor = -1;
  397. p->versionRev = -1;
  398. }
  399. p->latency = pi->second->latency();
  400. p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_ROOT : ZT_PEER_ROLE_LEAF;
  401. std::vector<Path> paths(pi->second->paths());
  402. Path *bestPath = pi->second->getBestPath(_now);
  403. p->pathCount = 0;
  404. for(std::vector<Path>::iterator path(paths.begin());path!=paths.end();++path) {
  405. memcpy(&(p->paths[p->pathCount].address),&(path->address()),sizeof(struct sockaddr_storage));
  406. p->paths[p->pathCount].lastSend = path->lastSend();
  407. p->paths[p->pathCount].lastReceive = path->lastReceived();
  408. p->paths[p->pathCount].active = path->active(_now) ? 1 : 0;
  409. p->paths[p->pathCount].preferred = ((bestPath)&&(*path == *bestPath)) ? 1 : 0;
  410. ++p->pathCount;
  411. }
  412. }
  413. return pl;
  414. }
  415. ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
  416. {
  417. Mutex::Lock _l(_networks_m);
  418. SharedPtr<Network> nw = _network(nwid);
  419. if(nw) {
  420. ZT_VirtualNetworkConfig *nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
  421. nw->externalConfig(nc);
  422. return nc;
  423. }
  424. return (ZT_VirtualNetworkConfig *)0;
  425. }
  426. ZT_VirtualNetworkList *Node::networks() const
  427. {
  428. Mutex::Lock _l(_networks_m);
  429. char *buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * _networks.size()));
  430. if (!buf)
  431. return (ZT_VirtualNetworkList *)0;
  432. ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
  433. nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
  434. nl->networkCount = 0;
  435. for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n)
  436. n->second->externalConfig(&(nl->networks[nl->networkCount++]));
  437. return nl;
  438. }
  439. void Node::freeQueryResult(void *qr)
  440. {
  441. if (qr)
  442. ::free(qr);
  443. }
  444. int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr)
  445. {
  446. if (Path::isAddressValidForPath(*(reinterpret_cast<const InetAddress *>(addr)))) {
  447. Mutex::Lock _l(_directPaths_m);
  448. _directPaths.push_back(*(reinterpret_cast<const InetAddress *>(addr)));
  449. std::sort(_directPaths.begin(),_directPaths.end());
  450. _directPaths.erase(std::unique(_directPaths.begin(),_directPaths.end()),_directPaths.end());
  451. return 1;
  452. }
  453. return 0;
  454. }
  455. void Node::clearLocalInterfaceAddresses()
  456. {
  457. Mutex::Lock _l(_directPaths_m);
  458. _directPaths.clear();
  459. }
  460. void Node::setNetconfMaster(void *networkControllerInstance)
  461. {
  462. RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
  463. }
  464. ZT_ResultCode Node::circuitTestBegin(ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *))
  465. {
  466. if (test->hopCount > 0) {
  467. try {
  468. Packet outp(Address(),RR->identity.address(),Packet::VERB_CIRCUIT_TEST);
  469. RR->identity.address().appendTo(outp);
  470. outp.append((uint16_t)((test->reportAtEveryHop != 0) ? 0x03 : 0x02));
  471. outp.append((uint64_t)test->timestamp);
  472. outp.append((uint64_t)test->testId);
  473. outp.append((uint16_t)0); // originator credential length, updated later
  474. if (test->credentialNetworkId) {
  475. outp.append((uint8_t)0x01);
  476. outp.append((uint64_t)test->credentialNetworkId);
  477. outp.setAt<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 23,(uint16_t)9);
  478. }
  479. outp.append((uint16_t)0);
  480. C25519::Signature sig(RR->identity.sign(reinterpret_cast<const char *>(outp.data()) + ZT_PACKET_IDX_PAYLOAD,outp.size() - ZT_PACKET_IDX_PAYLOAD));
  481. outp.append((uint16_t)sig.size());
  482. outp.append(sig.data,(unsigned int)sig.size());
  483. outp.append((uint16_t)0); // originator doesn't need an extra credential, since it's the originator
  484. for(unsigned int h=1;h<test->hopCount;++h) {
  485. outp.append((uint8_t)0);
  486. outp.append((uint8_t)(test->hops[h].breadth & 0xff));
  487. for(unsigned int a=0;a<test->hops[h].breadth;++a)
  488. Address(test->hops[h].addresses[a]).appendTo(outp);
  489. }
  490. for(unsigned int a=0;a<test->hops[0].breadth;++a) {
  491. outp.newInitializationVector();
  492. outp.setDestination(Address(test->hops[0].addresses[a]));
  493. RR->sw->send(outp,true,0);
  494. }
  495. } catch ( ... ) {
  496. return ZT_RESULT_FATAL_ERROR_INTERNAL; // probably indicates FIFO too big for packet
  497. }
  498. }
  499. {
  500. test->_internalPtr = reinterpret_cast<void *>(reportCallback);
  501. Mutex::Lock _l(_circuitTests_m);
  502. if (std::find(_circuitTests.begin(),_circuitTests.end(),test) == _circuitTests.end())
  503. _circuitTests.push_back(test);
  504. }
  505. return ZT_RESULT_OK;
  506. }
  507. void Node::circuitTestEnd(ZT_CircuitTest *test)
  508. {
  509. Mutex::Lock _l(_circuitTests_m);
  510. for(;;) {
  511. std::vector< ZT_CircuitTest * >::iterator ct(std::find(_circuitTests.begin(),_circuitTests.end(),test));
  512. if (ct == _circuitTests.end())
  513. break;
  514. else _circuitTests.erase(ct);
  515. }
  516. }
  517. ZT_ResultCode Node::clusterInit(
  518. unsigned int myId,
  519. const struct sockaddr_storage *zeroTierPhysicalEndpoints,
  520. unsigned int numZeroTierPhysicalEndpoints,
  521. int x,
  522. int y,
  523. int z,
  524. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  525. void *sendFunctionArg,
  526. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  527. void *addressToLocationFunctionArg)
  528. {
  529. #ifdef ZT_ENABLE_CLUSTER
  530. if (RR->cluster)
  531. return ZT_RESULT_ERROR_BAD_PARAMETER;
  532. std::vector<InetAddress> eps;
  533. for(unsigned int i=0;i<numZeroTierPhysicalEndpoints;++i)
  534. eps.push_back(InetAddress(zeroTierPhysicalEndpoints[i]));
  535. std::sort(eps.begin(),eps.end());
  536. RR->cluster = new Cluster(RR,myId,eps,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
  537. return ZT_RESULT_OK;
  538. #else
  539. return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
  540. #endif
  541. }
  542. ZT_ResultCode Node::clusterAddMember(unsigned int memberId)
  543. {
  544. #ifdef ZT_ENABLE_CLUSTER
  545. if (!RR->cluster)
  546. return ZT_RESULT_ERROR_BAD_PARAMETER;
  547. RR->cluster->addMember((uint16_t)memberId);
  548. return ZT_RESULT_OK;
  549. #else
  550. return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
  551. #endif
  552. }
  553. void Node::clusterRemoveMember(unsigned int memberId)
  554. {
  555. #ifdef ZT_ENABLE_CLUSTER
  556. if (RR->cluster)
  557. RR->cluster->removeMember((uint16_t)memberId);
  558. #endif
  559. }
  560. void Node::clusterHandleIncomingMessage(const void *msg,unsigned int len)
  561. {
  562. #ifdef ZT_ENABLE_CLUSTER
  563. if (RR->cluster)
  564. RR->cluster->handleIncomingStateMessage(msg,len);
  565. #endif
  566. }
  567. void Node::clusterStatus(ZT_ClusterStatus *cs)
  568. {
  569. if (!cs)
  570. return;
  571. #ifdef ZT_ENABLE_CLUSTER
  572. if (RR->cluster)
  573. RR->cluster->status(*cs);
  574. else
  575. #endif
  576. memset(cs,0,sizeof(ZT_ClusterStatus));
  577. }
  578. void Node::backgroundThreadMain()
  579. {
  580. ++RR->dpEnabled;
  581. for(;;) {
  582. try {
  583. if (RR->dp->process() < 0)
  584. break;
  585. } catch ( ... ) {} // sanity check -- should not throw
  586. }
  587. --RR->dpEnabled;
  588. }
  589. /****************************************************************************/
  590. /* Node methods used only within node/ */
  591. /****************************************************************************/
  592. std::string Node::dataStoreGet(const char *name)
  593. {
  594. char buf[1024];
  595. std::string r;
  596. unsigned long olen = 0;
  597. do {
  598. long n = _dataStoreGetFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,buf,sizeof(buf),(unsigned long)r.length(),&olen);
  599. if (n <= 0)
  600. return std::string();
  601. r.append(buf,n);
  602. } while (r.length() < olen);
  603. return r;
  604. }
  605. bool Node::shouldUsePathForZeroTierTraffic(const InetAddress &localAddress,const InetAddress &remoteAddress)
  606. {
  607. {
  608. Mutex::Lock _l(_networks_m);
  609. for(std::vector< std::pair< uint64_t, SharedPtr<Network> > >::const_iterator i=_networks.begin();i!=_networks.end();++i) {
  610. SharedPtr<NetworkConfig> nc(i->second->config2());
  611. if (nc) {
  612. for(std::vector<InetAddress>::const_iterator a(nc->staticIps().begin());a!=nc->staticIps().end();++a) {
  613. if (a->containsAddress(remoteAddress)) {
  614. return false;
  615. }
  616. }
  617. }
  618. }
  619. }
  620. if (_pathCheckFunction)
  621. return (_pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,reinterpret_cast<const struct sockaddr_storage *>(&localAddress),reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0);
  622. else return true;
  623. }
  624. #ifdef ZT_TRACE
  625. void Node::postTrace(const char *module,unsigned int line,const char *fmt,...)
  626. {
  627. static Mutex traceLock;
  628. va_list ap;
  629. char tmp1[1024],tmp2[1024],tmp3[256];
  630. Mutex::Lock _l(traceLock);
  631. time_t now = (time_t)(_now / 1000ULL);
  632. #ifdef __WINDOWS__
  633. ctime_s(tmp3,sizeof(tmp3),&now);
  634. char *nowstr = tmp3;
  635. #else
  636. char *nowstr = ctime_r(&now,tmp3);
  637. #endif
  638. unsigned long nowstrlen = (unsigned long)strlen(nowstr);
  639. if (nowstr[nowstrlen-1] == '\n')
  640. nowstr[--nowstrlen] = (char)0;
  641. if (nowstr[nowstrlen-1] == '\r')
  642. nowstr[--nowstrlen] = (char)0;
  643. va_start(ap,fmt);
  644. vsnprintf(tmp2,sizeof(tmp2),fmt,ap);
  645. va_end(ap);
  646. tmp2[sizeof(tmp2)-1] = (char)0;
  647. Utils::snprintf(tmp1,sizeof(tmp1),"[%s] %s:%u %s",nowstr,module,line,tmp2);
  648. postEvent(ZT_EVENT_TRACE,tmp1);
  649. }
  650. #endif // ZT_TRACE
  651. uint64_t Node::prng()
  652. {
  653. unsigned int p = (++_prngStreamPtr % (sizeof(_prngStream) / sizeof(uint64_t)));
  654. if (!p)
  655. _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
  656. return _prngStream[p];
  657. }
  658. void Node::postCircuitTestReport(const ZT_CircuitTestReport *report)
  659. {
  660. std::vector< ZT_CircuitTest * > toNotify;
  661. {
  662. Mutex::Lock _l(_circuitTests_m);
  663. for(std::vector< ZT_CircuitTest * >::iterator i(_circuitTests.begin());i!=_circuitTests.end();++i) {
  664. if ((*i)->testId == report->testId)
  665. toNotify.push_back(*i);
  666. }
  667. }
  668. for(std::vector< ZT_CircuitTest * >::iterator i(toNotify.begin());i!=toNotify.end();++i)
  669. (reinterpret_cast<void (*)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *)>((*i)->_internalPtr))(reinterpret_cast<ZT_Node *>(this),*i,report);
  670. }
  671. } // namespace ZeroTier
  672. /****************************************************************************/
  673. /* CAPI bindings */
  674. /****************************************************************************/
  675. extern "C" {
  676. enum ZT_ResultCode ZT_Node_new(
  677. ZT_Node **node,
  678. void *uptr,
  679. uint64_t now,
  680. ZT_DataStoreGetFunction dataStoreGetFunction,
  681. ZT_DataStorePutFunction dataStorePutFunction,
  682. ZT_WirePacketSendFunction wirePacketSendFunction,
  683. ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
  684. ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
  685. ZT_PathCheckFunction pathCheckFunction,
  686. ZT_EventCallback eventCallback)
  687. {
  688. *node = (ZT_Node *)0;
  689. try {
  690. *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,pathCheckFunction,eventCallback));
  691. return ZT_RESULT_OK;
  692. } catch (std::bad_alloc &exc) {
  693. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  694. } catch (std::runtime_error &exc) {
  695. return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
  696. } catch ( ... ) {
  697. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  698. }
  699. }
  700. void ZT_Node_delete(ZT_Node *node)
  701. {
  702. try {
  703. delete (reinterpret_cast<ZeroTier::Node *>(node));
  704. } catch ( ... ) {}
  705. }
  706. enum ZT_ResultCode ZT_Node_processWirePacket(
  707. ZT_Node *node,
  708. uint64_t now,
  709. const struct sockaddr_storage *localAddress,
  710. const struct sockaddr_storage *remoteAddress,
  711. const void *packetData,
  712. unsigned int packetLength,
  713. volatile uint64_t *nextBackgroundTaskDeadline)
  714. {
  715. try {
  716. return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,localAddress,remoteAddress,packetData,packetLength,nextBackgroundTaskDeadline);
  717. } catch (std::bad_alloc &exc) {
  718. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  719. } catch ( ... ) {
  720. return ZT_RESULT_OK; // "OK" since invalid packets are simply dropped, but the system is still up
  721. }
  722. }
  723. enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
  724. ZT_Node *node,
  725. uint64_t now,
  726. uint64_t nwid,
  727. uint64_t sourceMac,
  728. uint64_t destMac,
  729. unsigned int etherType,
  730. unsigned int vlanId,
  731. const void *frameData,
  732. unsigned int frameLength,
  733. volatile uint64_t *nextBackgroundTaskDeadline)
  734. {
  735. try {
  736. return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(now,nwid,sourceMac,destMac,etherType,vlanId,frameData,frameLength,nextBackgroundTaskDeadline);
  737. } catch (std::bad_alloc &exc) {
  738. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  739. } catch ( ... ) {
  740. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  741. }
  742. }
  743. enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
  744. {
  745. try {
  746. return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(now,nextBackgroundTaskDeadline);
  747. } catch (std::bad_alloc &exc) {
  748. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  749. } catch ( ... ) {
  750. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  751. }
  752. }
  753. enum ZT_ResultCode ZT_Node_join(ZT_Node *node,uint64_t nwid)
  754. {
  755. try {
  756. return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid);
  757. } catch (std::bad_alloc &exc) {
  758. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  759. } catch ( ... ) {
  760. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  761. }
  762. }
  763. enum ZT_ResultCode ZT_Node_leave(ZT_Node *node,uint64_t nwid)
  764. {
  765. try {
  766. return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid);
  767. } catch (std::bad_alloc &exc) {
  768. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  769. } catch ( ... ) {
  770. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  771. }
  772. }
  773. enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  774. {
  775. try {
  776. return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(nwid,multicastGroup,multicastAdi);
  777. } catch (std::bad_alloc &exc) {
  778. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  779. } catch ( ... ) {
  780. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  781. }
  782. }
  783. enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
  784. {
  785. try {
  786. return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(nwid,multicastGroup,multicastAdi);
  787. } catch (std::bad_alloc &exc) {
  788. return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
  789. } catch ( ... ) {
  790. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  791. }
  792. }
  793. uint64_t ZT_Node_address(ZT_Node *node)
  794. {
  795. return reinterpret_cast<ZeroTier::Node *>(node)->address();
  796. }
  797. void ZT_Node_status(ZT_Node *node,ZT_NodeStatus *status)
  798. {
  799. try {
  800. reinterpret_cast<ZeroTier::Node *>(node)->status(status);
  801. } catch ( ... ) {}
  802. }
  803. ZT_PeerList *ZT_Node_peers(ZT_Node *node)
  804. {
  805. try {
  806. return reinterpret_cast<ZeroTier::Node *>(node)->peers();
  807. } catch ( ... ) {
  808. return (ZT_PeerList *)0;
  809. }
  810. }
  811. ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node,uint64_t nwid)
  812. {
  813. try {
  814. return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
  815. } catch ( ... ) {
  816. return (ZT_VirtualNetworkConfig *)0;
  817. }
  818. }
  819. ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
  820. {
  821. try {
  822. return reinterpret_cast<ZeroTier::Node *>(node)->networks();
  823. } catch ( ... ) {
  824. return (ZT_VirtualNetworkList *)0;
  825. }
  826. }
  827. void ZT_Node_freeQueryResult(ZT_Node *node,void *qr)
  828. {
  829. try {
  830. reinterpret_cast<ZeroTier::Node *>(node)->freeQueryResult(qr);
  831. } catch ( ... ) {}
  832. }
  833. int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr)
  834. {
  835. try {
  836. return reinterpret_cast<ZeroTier::Node *>(node)->addLocalInterfaceAddress(addr);
  837. } catch ( ... ) {
  838. return 0;
  839. }
  840. }
  841. void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node)
  842. {
  843. try {
  844. reinterpret_cast<ZeroTier::Node *>(node)->clearLocalInterfaceAddresses();
  845. } catch ( ... ) {}
  846. }
  847. void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
  848. {
  849. try {
  850. reinterpret_cast<ZeroTier::Node *>(node)->setNetconfMaster(networkControllerInstance);
  851. } catch ( ... ) {}
  852. }
  853. enum ZT_ResultCode ZT_Node_circuitTestBegin(ZT_Node *node,ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *))
  854. {
  855. try {
  856. return reinterpret_cast<ZeroTier::Node *>(node)->circuitTestBegin(test,reportCallback);
  857. } catch ( ... ) {
  858. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  859. }
  860. }
  861. void ZT_Node_circuitTestEnd(ZT_Node *node,ZT_CircuitTest *test)
  862. {
  863. try {
  864. reinterpret_cast<ZeroTier::Node *>(node)->circuitTestEnd(test);
  865. } catch ( ... ) {}
  866. }
  867. enum ZT_ResultCode ZT_Node_clusterInit(
  868. ZT_Node *node,
  869. unsigned int myId,
  870. const struct sockaddr_storage *zeroTierPhysicalEndpoints,
  871. unsigned int numZeroTierPhysicalEndpoints,
  872. int x,
  873. int y,
  874. int z,
  875. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  876. void *sendFunctionArg,
  877. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  878. void *addressToLocationFunctionArg)
  879. {
  880. try {
  881. return reinterpret_cast<ZeroTier::Node *>(node)->clusterInit(myId,zeroTierPhysicalEndpoints,numZeroTierPhysicalEndpoints,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
  882. } catch ( ... ) {
  883. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  884. }
  885. }
  886. enum ZT_ResultCode ZT_Node_clusterAddMember(ZT_Node *node,unsigned int memberId)
  887. {
  888. try {
  889. return reinterpret_cast<ZeroTier::Node *>(node)->clusterAddMember(memberId);
  890. } catch ( ... ) {
  891. return ZT_RESULT_FATAL_ERROR_INTERNAL;
  892. }
  893. }
  894. void ZT_Node_clusterRemoveMember(ZT_Node *node,unsigned int memberId)
  895. {
  896. try {
  897. reinterpret_cast<ZeroTier::Node *>(node)->clusterRemoveMember(memberId);
  898. } catch ( ... ) {}
  899. }
  900. void ZT_Node_clusterHandleIncomingMessage(ZT_Node *node,const void *msg,unsigned int len)
  901. {
  902. try {
  903. reinterpret_cast<ZeroTier::Node *>(node)->clusterHandleIncomingMessage(msg,len);
  904. } catch ( ... ) {}
  905. }
  906. void ZT_Node_clusterStatus(ZT_Node *node,ZT_ClusterStatus *cs)
  907. {
  908. try {
  909. reinterpret_cast<ZeroTier::Node *>(node)->clusterStatus(cs);
  910. } catch ( ... ) {}
  911. }
  912. void ZT_Node_backgroundThreadMain(ZT_Node *node)
  913. {
  914. try {
  915. reinterpret_cast<ZeroTier::Node *>(node)->backgroundThreadMain();
  916. } catch ( ... ) {}
  917. }
  918. void ZT_version(int *major,int *minor,int *revision,unsigned long *featureFlags)
  919. {
  920. if (major) *major = ZEROTIER_ONE_VERSION_MAJOR;
  921. if (minor) *minor = ZEROTIER_ONE_VERSION_MINOR;
  922. if (revision) *revision = ZEROTIER_ONE_VERSION_REVISION;
  923. if (featureFlags) {
  924. *featureFlags = (
  925. ZT_FEATURE_FLAG_THREAD_SAFE
  926. );
  927. }
  928. }
  929. } // extern "C"