Cluster.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #ifdef ZT_ENABLE_CLUSTER
  28. #include <stdint.h>
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <math.h>
  33. #include <algorithm>
  34. #include <utility>
  35. #include "../version.h"
  36. #include "Cluster.hpp"
  37. #include "RuntimeEnvironment.hpp"
  38. #include "MulticastGroup.hpp"
  39. #include "CertificateOfMembership.hpp"
  40. #include "Salsa20.hpp"
  41. #include "Poly1305.hpp"
  42. #include "Identity.hpp"
  43. #include "Topology.hpp"
  44. #include "Packet.hpp"
  45. #include "Switch.hpp"
  46. #include "Node.hpp"
  47. namespace ZeroTier {
  48. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  49. throw()
  50. {
  51. double dx = ((double)x2 - (double)x1);
  52. double dy = ((double)y2 - (double)y1);
  53. double dz = ((double)z2 - (double)z1);
  54. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  55. }
  56. Cluster::Cluster(
  57. const RuntimeEnvironment *renv,
  58. uint16_t id,
  59. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  60. int32_t x,
  61. int32_t y,
  62. int32_t z,
  63. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  64. void *sendFunctionArg,
  65. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  66. void *addressToLocationFunctionArg) :
  67. RR(renv),
  68. _sendFunction(sendFunction),
  69. _sendFunctionArg(sendFunctionArg),
  70. _addressToLocationFunction(addressToLocationFunction),
  71. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  72. _x(x),
  73. _y(y),
  74. _z(z),
  75. _id(id),
  76. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  77. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
  78. _peerAffinities(65536),
  79. _lastCleanedPeerAffinities(0),
  80. _lastCheckedPeersForAnnounce(0)
  81. {
  82. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  83. // Generate master secret by hashing the secret from our Identity key pair
  84. RR->identity.sha512PrivateKey(_masterSecret);
  85. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  86. memcpy(stmp,_masterSecret,sizeof(stmp));
  87. stmp[0] ^= Utils::hton(id);
  88. SHA512::hash(stmp,stmp,sizeof(stmp));
  89. SHA512::hash(stmp,stmp,sizeof(stmp));
  90. memcpy(_key,stmp,sizeof(_key));
  91. Utils::burn(stmp,sizeof(stmp));
  92. }
  93. Cluster::~Cluster()
  94. {
  95. Utils::burn(_masterSecret,sizeof(_masterSecret));
  96. Utils::burn(_key,sizeof(_key));
  97. delete [] _members;
  98. }
  99. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  100. {
  101. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  102. {
  103. // FORMAT: <[16] iv><[8] MAC><... data>
  104. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  105. return;
  106. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  107. char keytmp[32];
  108. memcpy(keytmp,_key,32);
  109. for(int i=0;i<8;++i)
  110. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  111. Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
  112. Utils::burn(keytmp,sizeof(keytmp));
  113. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  114. char polykey[ZT_POLY1305_KEY_LEN];
  115. memset(polykey,0,sizeof(polykey));
  116. s20.encrypt12(polykey,polykey,sizeof(polykey));
  117. // Compute 16-byte MAC
  118. char mac[ZT_POLY1305_MAC_LEN];
  119. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  120. // Check first 8 bytes of MAC against 64-bit MAC in stream
  121. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  122. return;
  123. // Decrypt!
  124. dmsg.setSize(len - 24);
  125. s20.decrypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  126. }
  127. if (dmsg.size() < 4)
  128. return;
  129. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  130. unsigned int ptr = 2;
  131. if (fromMemberId == _id) // sanity check: we don't talk to ourselves
  132. return;
  133. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  134. ptr += 2;
  135. if (toMemberId != _id) // sanity check: message not for us?
  136. return;
  137. { // make sure sender is actually considered a member
  138. Mutex::Lock _l3(_memberIds_m);
  139. if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
  140. return;
  141. }
  142. {
  143. _Member &m = _members[fromMemberId];
  144. Mutex::Lock mlck(m.lock);
  145. try {
  146. while (ptr < dmsg.size()) {
  147. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  148. const unsigned int nextPtr = ptr + mlen;
  149. if (nextPtr > dmsg.size())
  150. break;
  151. int mtype = -1;
  152. try {
  153. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  154. default:
  155. break;
  156. case STATE_MESSAGE_ALIVE: {
  157. ptr += 7; // skip version stuff, not used yet
  158. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  159. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  160. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  161. ptr += 8; // skip local clock, not used
  162. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  163. ptr += 8; // skip flags, unused
  164. #ifdef ZT_TRACE
  165. std::string addrs;
  166. #endif
  167. unsigned int physicalAddressCount = dmsg[ptr++];
  168. m.zeroTierPhysicalEndpoints.clear();
  169. for(unsigned int i=0;i<physicalAddressCount;++i) {
  170. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  171. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  172. if (!(m.zeroTierPhysicalEndpoints.back())) {
  173. m.zeroTierPhysicalEndpoints.pop_back();
  174. }
  175. #ifdef ZT_TRACE
  176. else {
  177. if (addrs.length() > 0)
  178. addrs.push_back(',');
  179. addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
  180. }
  181. #endif
  182. }
  183. #ifdef ZT_TRACE
  184. if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
  185. TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
  186. }
  187. #endif
  188. m.lastReceivedAliveAnnouncement = RR->node->now();
  189. } break;
  190. case STATE_MESSAGE_HAVE_PEER: {
  191. const uint64_t now = RR->node->now();
  192. Identity id;
  193. InetAddress physicalAddress;
  194. ptr += id.deserialize(dmsg,ptr);
  195. ptr += physicalAddress.deserialize(dmsg,ptr);
  196. if (id) {
  197. // Forget any paths that we have to this peer at its address
  198. if (physicalAddress) {
  199. SharedPtr<Peer> myPeerRecord(RR->topology->getPeerNoCache(id.address(),now));
  200. if (myPeerRecord)
  201. myPeerRecord->removePathByAddress(physicalAddress);
  202. }
  203. // Always save identity to update file time
  204. RR->topology->saveIdentity(id);
  205. // Set peer affinity to its new home
  206. {
  207. Mutex::Lock _l2(_peerAffinities_m);
  208. _PA &pa = _peerAffinities[id.address()];
  209. pa.ts = now;
  210. pa.mid = fromMemberId;
  211. }
  212. TRACE("[%u] has %s @ %s",(unsigned int)fromMemberId,id.address().toString().c_str(),physicalAddress.toString().c_str());
  213. }
  214. } break;
  215. case STATE_MESSAGE_MULTICAST_LIKE: {
  216. const uint64_t nwid = dmsg.at<uint64_t>(ptr); ptr += 8;
  217. const Address address(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  218. const MAC mac(dmsg.field(ptr,6),6); ptr += 6;
  219. const uint32_t adi = dmsg.at<uint32_t>(ptr); ptr += 4;
  220. RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address);
  221. TRACE("[%u] %s likes %s/%.8x on %.16llx",(unsigned int)fromMemberId,address.toString().c_str(),mac.toString().c_str(),(unsigned int)adi,nwid);
  222. } break;
  223. case STATE_MESSAGE_COM: {
  224. /* not currently used so not decoded yet
  225. CertificateOfMembership com;
  226. ptr += com.deserialize(dmsg,ptr);
  227. if (com) {
  228. TRACE("[%u] COM for %s on %.16llu rev %llu",(unsigned int)fromMemberId,com.issuedTo().toString().c_str(),com.networkId(),com.revision());
  229. }
  230. */
  231. } break;
  232. case STATE_MESSAGE_PROXY_UNITE: {
  233. const Address localPeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  234. const Address remotePeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  235. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  236. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  237. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  238. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  239. TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
  240. const uint64_t now = RR->node->now();
  241. SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress,now));
  242. if ((localPeer)&&(numRemotePeerPaths > 0)) {
  243. InetAddress bestLocalV4,bestLocalV6;
  244. localPeer->getBestActiveAddresses(now,bestLocalV4,bestLocalV6);
  245. InetAddress bestRemoteV4,bestRemoteV6;
  246. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  247. if ((bestRemoteV4)&&(bestRemoteV6))
  248. break;
  249. switch(remotePeerPaths[i].ss_family) {
  250. case AF_INET:
  251. if (!bestRemoteV4)
  252. bestRemoteV4 = remotePeerPaths[i];
  253. break;
  254. case AF_INET6:
  255. if (!bestRemoteV6)
  256. bestRemoteV6 = remotePeerPaths[i];
  257. break;
  258. }
  259. }
  260. Packet rendezvousForLocal(localPeerAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  261. rendezvousForLocal.append((uint8_t)0);
  262. remotePeerAddress.appendTo(rendezvousForLocal);
  263. Buffer<2048> rendezvousForRemote;
  264. remotePeerAddress.appendTo(rendezvousForRemote);
  265. rendezvousForRemote.append((uint8_t)Packet::VERB_RENDEZVOUS);
  266. const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForRemote.size();
  267. rendezvousForRemote.addSize(2); // space for actual packet payload length
  268. rendezvousForRemote.append((uint8_t)0); // flags == 0
  269. localPeerAddress.appendTo(rendezvousForRemote);
  270. bool haveMatch = false;
  271. if ((bestLocalV6)&&(bestRemoteV6)) {
  272. haveMatch = true;
  273. rendezvousForLocal.append((uint16_t)bestRemoteV6.port());
  274. rendezvousForLocal.append((uint8_t)16);
  275. rendezvousForLocal.append(bestRemoteV6.rawIpData(),16);
  276. rendezvousForRemote.append((uint16_t)bestLocalV6.port());
  277. rendezvousForRemote.append((uint8_t)16);
  278. rendezvousForRemote.append(bestLocalV6.rawIpData(),16);
  279. rendezvousForRemote.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 16));
  280. } else if ((bestLocalV4)&&(bestRemoteV4)) {
  281. haveMatch = true;
  282. rendezvousForLocal.append((uint16_t)bestRemoteV4.port());
  283. rendezvousForLocal.append((uint8_t)4);
  284. rendezvousForLocal.append(bestRemoteV4.rawIpData(),4);
  285. rendezvousForRemote.append((uint16_t)bestLocalV4.port());
  286. rendezvousForRemote.append((uint8_t)4);
  287. rendezvousForRemote.append(bestLocalV4.rawIpData(),4);
  288. rendezvousForRemote.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 4));
  289. }
  290. if (haveMatch) {
  291. _send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
  292. _flush(fromMemberId); // we want this to go ASAP, since with port restricted cone NATs success can be timing-sensitive
  293. RR->sw->send(rendezvousForLocal,true,0);
  294. }
  295. }
  296. } break;
  297. case STATE_MESSAGE_PROXY_SEND: {
  298. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  299. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  300. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  301. Packet outp(rcpt,RR->identity.address(),verb);
  302. outp.append(dmsg.field(ptr,len),len); ptr += len;
  303. RR->sw->send(outp,true,0);
  304. TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
  305. } break;
  306. }
  307. } catch ( ... ) {
  308. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  309. // drop invalids
  310. }
  311. ptr = nextPtr;
  312. }
  313. } catch ( ... ) {
  314. TRACE("invalid message (outer loop), discarding");
  315. // drop invalids
  316. }
  317. }
  318. }
  319. bool Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite)
  320. {
  321. if (len > 16384) // sanity check
  322. return false;
  323. const uint64_t now = RR->node->now();
  324. unsigned int canHasPeer = 0;
  325. { // Anyone got this peer?
  326. Mutex::Lock _l2(_peerAffinities_m);
  327. _PA *pa = _peerAffinities.get(toPeerAddress);
  328. if ((pa)&&(pa->mid != _id)&&((now - pa->ts) < ZT_PEER_ACTIVITY_TIMEOUT))
  329. canHasPeer = pa->mid;
  330. else return false;
  331. }
  332. Buffer<1024> buf;
  333. if (unite) {
  334. InetAddress v4,v6;
  335. if (fromPeerAddress) {
  336. SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress,now));
  337. if (fromPeer)
  338. fromPeer->getBestActiveAddresses(now,v4,v6);
  339. }
  340. uint8_t addrCount = 0;
  341. if (v4)
  342. ++addrCount;
  343. if (v6)
  344. ++addrCount;
  345. if (addrCount) {
  346. toPeerAddress.appendTo(buf);
  347. fromPeerAddress.appendTo(buf);
  348. buf.append(addrCount);
  349. if (v4)
  350. v4.serialize(buf);
  351. if (v6)
  352. v6.serialize(buf);
  353. }
  354. }
  355. {
  356. Mutex::Lock _l2(_members[canHasPeer].lock);
  357. if (buf.size() > 0)
  358. _send(canHasPeer,STATE_MESSAGE_PROXY_UNITE,buf.data(),buf.size());
  359. if (_members[canHasPeer].zeroTierPhysicalEndpoints.size() > 0)
  360. RR->node->putPacket(InetAddress(),_members[canHasPeer].zeroTierPhysicalEndpoints.front(),data,len);
  361. }
  362. TRACE("sendViaCluster(): relaying %u bytes from %s to %s by way of %u",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)canHasPeer);
  363. return true;
  364. }
  365. void Cluster::replicateHavePeer(const Identity &peerId,const InetAddress &physicalAddress)
  366. {
  367. const uint64_t now = RR->node->now();
  368. {
  369. Mutex::Lock _l2(_peerAffinities_m);
  370. _PA &pa = _peerAffinities[peerId.address()];
  371. if (pa.mid != _id) {
  372. pa.ts = now;
  373. pa.mid = _id;
  374. } else if ((now - pa.ts) < ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD) {
  375. return;
  376. } else {
  377. pa.ts = now;
  378. }
  379. }
  380. // announcement
  381. Buffer<4096> buf;
  382. peerId.serialize(buf,false);
  383. physicalAddress.serialize(buf);
  384. {
  385. Mutex::Lock _l(_memberIds_m);
  386. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  387. Mutex::Lock _l2(_members[*mid].lock);
  388. _send(*mid,STATE_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  389. }
  390. }
  391. }
  392. void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
  393. {
  394. Buffer<1024> buf;
  395. buf.append((uint64_t)nwid);
  396. peerAddress.appendTo(buf);
  397. group.mac().appendTo(buf);
  398. buf.append((uint32_t)group.adi());
  399. TRACE("replicating %s MULTICAST_LIKE %.16llx/%s/%u to all members",peerAddress.toString().c_str(),nwid,group.mac().toString().c_str(),(unsigned int)group.adi());
  400. {
  401. Mutex::Lock _l(_memberIds_m);
  402. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  403. Mutex::Lock _l2(_members[*mid].lock);
  404. _send(*mid,STATE_MESSAGE_MULTICAST_LIKE,buf.data(),buf.size());
  405. }
  406. }
  407. }
  408. void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
  409. {
  410. Buffer<4096> buf;
  411. com.serialize(buf);
  412. TRACE("replicating %s COM for %.16llx to all members",com.issuedTo().toString().c_str(),com.networkId());
  413. {
  414. Mutex::Lock _l(_memberIds_m);
  415. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  416. Mutex::Lock _l2(_members[*mid].lock);
  417. _send(*mid,STATE_MESSAGE_COM,buf.data(),buf.size());
  418. }
  419. }
  420. }
  421. struct _ClusterAnnouncePeers
  422. {
  423. _ClusterAnnouncePeers(const uint64_t now_,Cluster *parent_) : now(now_),parent(parent_) {}
  424. const uint64_t now;
  425. Cluster *const parent;
  426. inline void operator()(const Topology &t,const SharedPtr<Peer> &peer) const
  427. {
  428. Path *p = peer->getBestPath(now);
  429. if (p)
  430. parent->replicateHavePeer(peer->identity(),p->address());
  431. }
  432. };
  433. void Cluster::doPeriodicTasks()
  434. {
  435. const uint64_t now = RR->node->now();
  436. // Erase old peer affinity entries just to control table size
  437. if ((now - _lastCleanedPeerAffinities) >= (ZT_PEER_ACTIVITY_TIMEOUT * 5)) {
  438. _lastCleanedPeerAffinities = now;
  439. Address *k = (Address *)0;
  440. _PA *v = (_PA *)0;
  441. Mutex::Lock _l(_peerAffinities_m);
  442. Hashtable< Address,_PA >::Iterator i(_peerAffinities);
  443. while (i.next(k,v)) {
  444. if ((now - v->ts) >= (ZT_PEER_ACTIVITY_TIMEOUT * 5))
  445. _peerAffinities.erase(*k);
  446. }
  447. }
  448. // Announce peers that we have active direct paths to -- note that we forget paths
  449. // that other cluster members claim they have, which prevents us from fighting
  450. // with other cluster members (route flapping) over specific paths.
  451. if ((now - _lastCheckedPeersForAnnounce) >= (ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD / 4)) {
  452. _lastCheckedPeersForAnnounce = now;
  453. _ClusterAnnouncePeers func(now,this);
  454. RR->topology->eachPeer<_ClusterAnnouncePeers &>(func);
  455. }
  456. // Flush outgoing packet send queue every doPeriodicTasks()
  457. {
  458. Mutex::Lock _l(_memberIds_m);
  459. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  460. Mutex::Lock _l2(_members[*mid].lock);
  461. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  462. Buffer<2048> alive;
  463. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  464. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  465. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  466. alive.append((uint8_t)ZT_PROTO_VERSION);
  467. if (_addressToLocationFunction) {
  468. alive.append((int32_t)_x);
  469. alive.append((int32_t)_y);
  470. alive.append((int32_t)_z);
  471. } else {
  472. alive.append((int32_t)0);
  473. alive.append((int32_t)0);
  474. alive.append((int32_t)0);
  475. }
  476. alive.append((uint64_t)now);
  477. alive.append((uint64_t)0); // TODO: compute and send load average
  478. alive.append((uint64_t)0); // unused/reserved flags
  479. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  480. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  481. pe->serialize(alive);
  482. _send(*mid,STATE_MESSAGE_ALIVE,alive.data(),alive.size());
  483. _members[*mid].lastAnnouncedAliveTo = now;
  484. }
  485. _flush(*mid); // does nothing if nothing to flush
  486. }
  487. }
  488. }
  489. void Cluster::addMember(uint16_t memberId)
  490. {
  491. if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
  492. return;
  493. Mutex::Lock _l2(_members[memberId].lock);
  494. {
  495. Mutex::Lock _l(_memberIds_m);
  496. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  497. return;
  498. _memberIds.push_back(memberId);
  499. std::sort(_memberIds.begin(),_memberIds.end());
  500. }
  501. _members[memberId].clear();
  502. // Generate this member's message key from the master and its ID
  503. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  504. memcpy(stmp,_masterSecret,sizeof(stmp));
  505. stmp[0] ^= Utils::hton(memberId);
  506. SHA512::hash(stmp,stmp,sizeof(stmp));
  507. SHA512::hash(stmp,stmp,sizeof(stmp));
  508. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  509. Utils::burn(stmp,sizeof(stmp));
  510. // Prepare q
  511. _members[memberId].q.clear();
  512. char iv[16];
  513. Utils::getSecureRandom(iv,16);
  514. _members[memberId].q.append(iv,16);
  515. _members[memberId].q.addSize(8); // room for MAC
  516. _members[memberId].q.append((uint16_t)_id);
  517. _members[memberId].q.append((uint16_t)memberId);
  518. }
  519. void Cluster::removeMember(uint16_t memberId)
  520. {
  521. Mutex::Lock _l(_memberIds_m);
  522. std::vector<uint16_t> newMemberIds;
  523. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  524. if (*mid != memberId)
  525. newMemberIds.push_back(*mid);
  526. }
  527. _memberIds = newMemberIds;
  528. }
  529. bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
  530. {
  531. if (_addressToLocationFunction) {
  532. // Pick based on location if it can be determined
  533. int px = 0,py = 0,pz = 0;
  534. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  535. TRACE("no geolocation data for %s (geo-lookup is lazy/async so it may work next time)",peerPhysicalAddress.toIpString().c_str());
  536. return false;
  537. }
  538. // Find member closest to this peer
  539. const uint64_t now = RR->node->now();
  540. std::vector<InetAddress> best;
  541. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  542. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  543. unsigned int bestMember = _id;
  544. {
  545. Mutex::Lock _l(_memberIds_m);
  546. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  547. _Member &m = _members[*mid];
  548. Mutex::Lock _ml(m.lock);
  549. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  550. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  551. const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  552. if (mdist < bestDistance) {
  553. bestDistance = mdist;
  554. bestMember = *mid;
  555. best = m.zeroTierPhysicalEndpoints;
  556. }
  557. }
  558. }
  559. }
  560. // Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
  561. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  562. if (a->ss_family == peerPhysicalAddress.ss_family) {
  563. TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
  564. redirectTo = *a;
  565. return true;
  566. }
  567. }
  568. TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
  569. return false;
  570. } else {
  571. // TODO: pick based on load if no location info?
  572. return false;
  573. }
  574. }
  575. void Cluster::status(ZT_ClusterStatus &status) const
  576. {
  577. const uint64_t now = RR->node->now();
  578. memset(&status,0,sizeof(ZT_ClusterStatus));
  579. ZT_ClusterMemberStatus *ms[ZT_CLUSTER_MAX_MEMBERS];
  580. memset(ms,0,sizeof(ms));
  581. status.myId = _id;
  582. ms[_id] = &(status.members[status.clusterSize++]);
  583. ms[_id]->id = _id;
  584. ms[_id]->alive = 1;
  585. ms[_id]->x = _x;
  586. ms[_id]->y = _y;
  587. ms[_id]->z = _z;
  588. ms[_id]->peers = RR->topology->countActive();
  589. for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
  590. if (ms[_id]->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  591. break;
  592. memcpy(&(ms[_id]->zeroTierPhysicalEndpoints[ms[_id]->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  593. }
  594. {
  595. Mutex::Lock _l1(_memberIds_m);
  596. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  597. if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
  598. break;
  599. ZT_ClusterMemberStatus *s = ms[*mid] = &(status.members[status.clusterSize++]);
  600. _Member &m = _members[*mid];
  601. Mutex::Lock ml(m.lock);
  602. s->id = *mid;
  603. s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
  604. s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
  605. s->x = m.x;
  606. s->y = m.y;
  607. s->z = m.z;
  608. s->load = m.load;
  609. for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
  610. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  611. break;
  612. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  613. }
  614. }
  615. }
  616. {
  617. Mutex::Lock _l2(_peerAffinities_m);
  618. Address *k = (Address *)0;
  619. _PA *v = (_PA *)0;
  620. Hashtable< Address,_PA >::Iterator i(const_cast<Cluster *>(this)->_peerAffinities);
  621. while (i.next(k,v)) {
  622. if ( (ms[v->mid]) && (v->mid != _id) && ((now - v->ts) < ZT_PEER_ACTIVITY_TIMEOUT) )
  623. ++ms[v->mid]->peers;
  624. }
  625. }
  626. }
  627. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  628. {
  629. if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
  630. return;
  631. _Member &m = _members[memberId];
  632. // assumes m.lock is locked!
  633. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  634. _flush(memberId);
  635. m.q.append((uint16_t)(len + 1));
  636. m.q.append((uint8_t)type);
  637. m.q.append(msg,len);
  638. }
  639. void Cluster::_flush(uint16_t memberId)
  640. {
  641. _Member &m = _members[memberId];
  642. // assumes m.lock is locked!
  643. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  644. // Create key from member's key and IV
  645. char keytmp[32];
  646. memcpy(keytmp,m.key,32);
  647. for(int i=0;i<8;++i)
  648. keytmp[i] ^= m.q[i];
  649. Salsa20 s20(keytmp,256,m.q.field(8,8));
  650. Utils::burn(keytmp,sizeof(keytmp));
  651. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  652. char polykey[ZT_POLY1305_KEY_LEN];
  653. memset(polykey,0,sizeof(polykey));
  654. s20.encrypt12(polykey,polykey,sizeof(polykey));
  655. // Encrypt m.q in place
  656. s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  657. // Add MAC for authentication (encrypt-then-MAC)
  658. char mac[ZT_POLY1305_MAC_LEN];
  659. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  660. memcpy(m.q.field(16,8),mac,8);
  661. // Send!
  662. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  663. // Prepare for more
  664. m.q.clear();
  665. char iv[16];
  666. Utils::getSecureRandom(iv,16);
  667. m.q.append(iv,16);
  668. m.q.addSize(8); // room for MAC
  669. m.q.append((uint16_t)_id); // from member ID
  670. m.q.append((uint16_t)memberId); // to member ID
  671. }
  672. }
  673. } // namespace ZeroTier
  674. #endif // ZT_ENABLE_CLUSTER