Cluster.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #ifdef ZT_ENABLE_CLUSTER
  28. #include <stdint.h>
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <math.h>
  33. #include <algorithm>
  34. #include <utility>
  35. #include "../version.h"
  36. #include "Cluster.hpp"
  37. #include "RuntimeEnvironment.hpp"
  38. #include "MulticastGroup.hpp"
  39. #include "CertificateOfMembership.hpp"
  40. #include "Salsa20.hpp"
  41. #include "Poly1305.hpp"
  42. #include "Identity.hpp"
  43. #include "Topology.hpp"
  44. #include "Packet.hpp"
  45. #include "Switch.hpp"
  46. namespace ZeroTier {
  47. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  48. throw()
  49. {
  50. double dx = ((double)x2 - (double)x1);
  51. double dy = ((double)y2 - (double)y1);
  52. double dz = ((double)z2 - (double)z1);
  53. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  54. }
  55. Cluster::Cluster(
  56. const RuntimeEnvironment *renv,
  57. uint16_t id,
  58. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  59. int32_t x,
  60. int32_t y,
  61. int32_t z,
  62. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  63. void *sendFunctionArg,
  64. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  65. void *addressToLocationFunctionArg) :
  66. RR(renv),
  67. _sendFunction(sendFunction),
  68. _sendFunctionArg(sendFunctionArg),
  69. _addressToLocationFunction(addressToLocationFunction),
  70. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  71. _x(x),
  72. _y(y),
  73. _z(z),
  74. _id(id),
  75. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  76. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS])
  77. {
  78. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  79. // Generate master secret by hashing the secret from our Identity key pair
  80. RR->identity.sha512PrivateKey(_masterSecret);
  81. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  82. memcpy(stmp,_masterSecret,sizeof(stmp));
  83. stmp[0] ^= Utils::hton(id);
  84. SHA512::hash(stmp,stmp,sizeof(stmp));
  85. SHA512::hash(stmp,stmp,sizeof(stmp));
  86. memcpy(_key,stmp,sizeof(_key));
  87. Utils::burn(stmp,sizeof(stmp));
  88. }
  89. Cluster::~Cluster()
  90. {
  91. Utils::burn(_masterSecret,sizeof(_masterSecret));
  92. Utils::burn(_key,sizeof(_key));
  93. delete [] _members;
  94. }
  95. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  96. {
  97. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  98. {
  99. // FORMAT: <[16] iv><[8] MAC><... data>
  100. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  101. return;
  102. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  103. char keytmp[32];
  104. memcpy(keytmp,_key,32);
  105. for(int i=0;i<8;++i)
  106. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  107. Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
  108. Utils::burn(keytmp,sizeof(keytmp));
  109. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  110. char polykey[ZT_POLY1305_KEY_LEN];
  111. memset(polykey,0,sizeof(polykey));
  112. s20.encrypt12(polykey,polykey,sizeof(polykey));
  113. // Compute 16-byte MAC
  114. char mac[ZT_POLY1305_MAC_LEN];
  115. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  116. // Check first 8 bytes of MAC against 64-bit MAC in stream
  117. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  118. return;
  119. // Decrypt!
  120. dmsg.setSize(len - 24);
  121. s20.decrypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  122. }
  123. if (dmsg.size() < 4)
  124. return;
  125. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  126. unsigned int ptr = 2;
  127. if (fromMemberId == _id) // sanity check: we don't talk to ourselves
  128. return;
  129. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  130. ptr += 2;
  131. if (toMemberId != _id) // sanity check: message not for us?
  132. return;
  133. { // make sure sender is actually considered a member
  134. Mutex::Lock _l3(_memberIds_m);
  135. if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
  136. return;
  137. }
  138. {
  139. _Member &m = _members[fromMemberId];
  140. Mutex::Lock mlck(m.lock);
  141. try {
  142. while (ptr < dmsg.size()) {
  143. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  144. const unsigned int nextPtr = ptr + mlen;
  145. if (nextPtr > dmsg.size())
  146. break;
  147. int mtype = -1;
  148. try {
  149. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  150. default:
  151. break;
  152. case STATE_MESSAGE_ALIVE: {
  153. ptr += 7; // skip version stuff, not used yet
  154. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  155. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  156. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  157. ptr += 8; // skip local clock, not used
  158. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  159. ptr += 8; // skip flags, unused
  160. #ifdef ZT_TRACE
  161. std::string addrs;
  162. #endif
  163. unsigned int physicalAddressCount = dmsg[ptr++];
  164. m.zeroTierPhysicalEndpoints.clear();
  165. for(unsigned int i=0;i<physicalAddressCount;++i) {
  166. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  167. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  168. if (!(m.zeroTierPhysicalEndpoints.back())) {
  169. m.zeroTierPhysicalEndpoints.pop_back();
  170. }
  171. #ifdef ZT_TRACE
  172. else {
  173. if (addrs.length() > 0)
  174. addrs.push_back(',');
  175. addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
  176. }
  177. #endif
  178. }
  179. #ifdef ZT_TRACE
  180. if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
  181. TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
  182. }
  183. #endif
  184. m.lastReceivedAliveAnnouncement = RR->node->now();
  185. } break;
  186. case STATE_MESSAGE_HAVE_PEER: {
  187. try {
  188. Identity id;
  189. ptr += id.deserialize(dmsg,ptr);
  190. if (id) {
  191. RR->topology->saveIdentity(id);
  192. { // Add or update peer affinity entry
  193. _PeerAffinity pa(id.address(),fromMemberId,RR->node->now());
  194. Mutex::Lock _l2(_peerAffinities_m);
  195. std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n))
  196. if ((i != _peerAffinities.end())&&(i->key == pa.key)) {
  197. i->timestamp = pa.timestamp;
  198. } else {
  199. _peerAffinities.push_back(pa);
  200. std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now
  201. }
  202. }
  203. TRACE("[%u] has %s",(unsigned int)fromMemberId,id.address().toString().c_str());
  204. }
  205. } catch ( ... ) {
  206. // ignore invalid identities
  207. }
  208. } break;
  209. case STATE_MESSAGE_MULTICAST_LIKE: {
  210. const uint64_t nwid = dmsg.at<uint64_t>(ptr); ptr += 8;
  211. const Address address(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  212. const MAC mac(dmsg.field(ptr,6),6); ptr += 6;
  213. const uint32_t adi = dmsg.at<uint32_t>(ptr); ptr += 4;
  214. RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address);
  215. TRACE("[%u] %s likes %s/%.8x on %.16llx",(unsigned int)fromMemberId,address.toString().c_str(),mac.toString().c_str(),(unsigned int)adi,nwid);
  216. } break;
  217. case STATE_MESSAGE_COM: {
  218. CertificateOfMembership com;
  219. ptr += com.deserialize(dmsg,ptr);
  220. if (com) {
  221. TRACE("[%u] COM for %s on %.16llu rev %llu",(unsigned int)fromMemberId,com.issuedTo().toString().c_str(),com.networkId(),com.revision());
  222. }
  223. } break;
  224. case STATE_MESSAGE_RELAY: {
  225. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  226. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  227. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  228. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  229. const unsigned int packetLen = dmsg.at<uint16_t>(ptr); ptr += 2;
  230. const void *packet = (const void *)dmsg.field(ptr,packetLen); ptr += packetLen;
  231. if (packetLen >= ZT_PROTO_MIN_FRAGMENT_LENGTH) { // ignore anything too short to contain a dest address
  232. const Address destinationAddress(reinterpret_cast<const char *>(packet) + 8,ZT_ADDRESS_LENGTH);
  233. TRACE("[%u] relay %u bytes to %s (%u remote paths included)",(unsigned int)fromMemberId,packetLen,destinationAddress.toString().c_str(),numRemotePeerPaths);
  234. SharedPtr<Peer> destinationPeer(RR->topology->getPeer(destinationAddress));
  235. if (destinationPeer) {
  236. if (
  237. (destinationPeer->send(RR,packet,packetLen,RR->node->now()))&&
  238. (numRemotePeerPaths > 0)&&
  239. (packetLen >= 18)&&
  240. (reinterpret_cast<const unsigned char *>(packet)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR)
  241. ) {
  242. // If remote peer paths were sent with this relayed packet, we do
  243. // RENDEZVOUS. It's handled here for cluster-relayed packets since
  244. // we don't have both Peer records so this is a different path.
  245. const Address remotePeerAddress(reinterpret_cast<const char *>(packet) + 13,ZT_ADDRESS_LENGTH);
  246. InetAddress bestDestV4,bestDestV6;
  247. destinationPeer->getBestActiveAddresses(RR->node->now(),bestDestV4,bestDestV6);
  248. InetAddress bestRemoteV4,bestRemoteV6;
  249. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  250. if ((bestRemoteV4)&&(bestRemoteV6))
  251. break;
  252. switch(remotePeerPaths[i].ss_family) {
  253. case AF_INET:
  254. if (!bestRemoteV4)
  255. bestRemoteV4 = remotePeerPaths[i];
  256. break;
  257. case AF_INET6:
  258. if (!bestRemoteV6)
  259. bestRemoteV6 = remotePeerPaths[i];
  260. break;
  261. }
  262. }
  263. Packet rendezvousForDest(destinationAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  264. rendezvousForDest.append((uint8_t)0);
  265. remotePeerAddress.appendTo(rendezvousForDest);
  266. Buffer<2048> rendezvousForOtherEnd;
  267. remotePeerAddress.appendTo(rendezvousForOtherEnd);
  268. rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS);
  269. const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size();
  270. rendezvousForOtherEnd.addSize(2); // space for actual packet payload length
  271. rendezvousForOtherEnd.append((uint8_t)0); // flags == 0
  272. destinationAddress.appendTo(rendezvousForOtherEnd);
  273. bool haveMatch = false;
  274. if ((bestDestV6)&&(bestRemoteV6)) {
  275. haveMatch = true;
  276. rendezvousForDest.append((uint16_t)bestRemoteV6.port());
  277. rendezvousForDest.append((uint8_t)16);
  278. rendezvousForDest.append(bestRemoteV6.rawIpData(),16);
  279. rendezvousForOtherEnd.append((uint16_t)bestDestV6.port());
  280. rendezvousForOtherEnd.append((uint8_t)16);
  281. rendezvousForOtherEnd.append(bestDestV6.rawIpData(),16);
  282. rendezvousForOtherEnd.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 16));
  283. } else if ((bestDestV4)&&(bestRemoteV4)) {
  284. haveMatch = true;
  285. rendezvousForDest.append((uint16_t)bestRemoteV4.port());
  286. rendezvousForDest.append((uint8_t)4);
  287. rendezvousForDest.append(bestRemoteV4.rawIpData(),4);
  288. rendezvousForOtherEnd.append((uint16_t)bestDestV4.port());
  289. rendezvousForOtherEnd.append((uint8_t)4);
  290. rendezvousForOtherEnd.append(bestDestV4.rawIpData(),4);
  291. rendezvousForOtherEnd.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 4));
  292. }
  293. if (haveMatch) {
  294. _send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size());
  295. RR->sw->send(rendezvousForDest,true,0);
  296. }
  297. }
  298. }
  299. }
  300. } break;
  301. case STATE_MESSAGE_PROXY_SEND: {
  302. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
  303. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  304. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  305. Packet outp(rcpt,RR->identity.address(),verb);
  306. outp.append(dmsg.field(ptr,len),len);
  307. RR->sw->send(outp,true,0);
  308. TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
  309. } break;
  310. }
  311. } catch ( ... ) {
  312. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  313. // drop invalids
  314. }
  315. ptr = nextPtr;
  316. }
  317. } catch ( ... ) {
  318. TRACE("invalid message (outer loop), discarding");
  319. // drop invalids
  320. }
  321. }
  322. }
  323. bool Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len)
  324. {
  325. if (len > 16384) // sanity check
  326. return false;
  327. uint64_t mostRecentTimestamp = 0;
  328. uint16_t canHasPeer = 0;
  329. { // Anyone got this peer?
  330. Mutex::Lock _l2(_peerAffinities_m);
  331. std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),_PeerAffinity(toPeerAddress,0,0))); // O(log(n))
  332. while ((i != _peerAffinities.end())&&(i->address() == toPeerAddress)) {
  333. const uint16_t mid = i->clusterMemberId();
  334. if ((mid != _id)&&(i->timestamp > mostRecentTimestamp)) {
  335. mostRecentTimestamp = i->timestamp;
  336. canHasPeer = mid;
  337. }
  338. ++i;
  339. }
  340. }
  341. const uint64_t now = RR->node->now();
  342. if ((now - mostRecentTimestamp) < ZT_PEER_ACTIVITY_TIMEOUT) {
  343. Buffer<16384> buf;
  344. InetAddress v4,v6;
  345. if (fromPeerAddress) {
  346. SharedPtr<Peer> fromPeer(RR->topology->getPeer(fromPeerAddress));
  347. if (fromPeer)
  348. fromPeer->getBestActiveAddresses(now,v4,v6);
  349. }
  350. buf.append((uint8_t)( (v4) ? ((v6) ? 2 : 1) : ((v6) ? 1 : 0) ));
  351. if (v4)
  352. v4.serialize(buf);
  353. if (v6)
  354. v6.serialize(buf);
  355. buf.append((uint16_t)len);
  356. buf.append(data,len);
  357. {
  358. Mutex::Lock _l2(_members[canHasPeer].lock);
  359. _send(canHasPeer,STATE_MESSAGE_RELAY,buf.data(),buf.size());
  360. }
  361. TRACE("sendViaCluster(): relaying %u bytes from %s to %s by way of %u",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)canHasPeer);
  362. return true;
  363. } else {
  364. TRACE("sendViaCluster(): unable to relay %u bytes from %s to %s since no cluster members seem to have it!",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
  365. return false;
  366. }
  367. }
  368. void Cluster::replicateHavePeer(const Identity &peerId)
  369. {
  370. { // Use peer affinity table to track our own last announce time for peers
  371. _PeerAffinity pa(peerId.address(),_id,RR->node->now());
  372. Mutex::Lock _l2(_peerAffinities_m);
  373. std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n))
  374. if ((i != _peerAffinities.end())&&(i->key == pa.key)) {
  375. if ((pa.timestamp - i->timestamp) >= ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD) {
  376. i->timestamp = pa.timestamp;
  377. // continue to announcement
  378. } else {
  379. // we've already announced this peer recently, so skip
  380. return;
  381. }
  382. } else {
  383. _peerAffinities.push_back(pa);
  384. std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now
  385. // continue to announcement
  386. }
  387. }
  388. // announcement
  389. Buffer<4096> buf;
  390. peerId.serialize(buf,false);
  391. {
  392. Mutex::Lock _l(_memberIds_m);
  393. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  394. Mutex::Lock _l2(_members[*mid].lock);
  395. _send(*mid,STATE_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  396. }
  397. }
  398. }
  399. void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
  400. {
  401. Buffer<2048> buf;
  402. buf.append((uint64_t)nwid);
  403. peerAddress.appendTo(buf);
  404. group.mac().appendTo(buf);
  405. buf.append((uint32_t)group.adi());
  406. TRACE("replicating %s MULTICAST_LIKE %.16llx/%s/%u to all members",peerAddress.toString().c_str(),nwid,group.mac().toString().c_str(),(unsigned int)group.adi());
  407. {
  408. Mutex::Lock _l(_memberIds_m);
  409. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  410. Mutex::Lock _l2(_members[*mid].lock);
  411. _send(*mid,STATE_MESSAGE_MULTICAST_LIKE,buf.data(),buf.size());
  412. }
  413. }
  414. }
  415. void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
  416. {
  417. Buffer<2048> buf;
  418. com.serialize(buf);
  419. TRACE("replicating %s COM for %.16llx to all members",com.issuedTo().toString().c_str(),com.networkId());
  420. {
  421. Mutex::Lock _l(_memberIds_m);
  422. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  423. Mutex::Lock _l2(_members[*mid].lock);
  424. _send(*mid,STATE_MESSAGE_COM,buf.data(),buf.size());
  425. }
  426. }
  427. }
  428. void Cluster::doPeriodicTasks()
  429. {
  430. const uint64_t now = RR->node->now();
  431. {
  432. Mutex::Lock _l(_memberIds_m);
  433. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  434. Mutex::Lock _l2(_members[*mid].lock);
  435. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  436. Buffer<2048> alive;
  437. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  438. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  439. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  440. alive.append((uint8_t)ZT_PROTO_VERSION);
  441. if (_addressToLocationFunction) {
  442. alive.append((int32_t)_x);
  443. alive.append((int32_t)_y);
  444. alive.append((int32_t)_z);
  445. } else {
  446. alive.append((int32_t)0);
  447. alive.append((int32_t)0);
  448. alive.append((int32_t)0);
  449. }
  450. alive.append((uint64_t)now);
  451. alive.append((uint64_t)0); // TODO: compute and send load average
  452. alive.append((uint64_t)0); // unused/reserved flags
  453. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  454. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  455. pe->serialize(alive);
  456. _send(*mid,STATE_MESSAGE_ALIVE,alive.data(),alive.size());
  457. _members[*mid].lastAnnouncedAliveTo = now;
  458. }
  459. _flush(*mid); // does nothing if nothing to flush
  460. }
  461. }
  462. }
  463. void Cluster::addMember(uint16_t memberId)
  464. {
  465. if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
  466. return;
  467. Mutex::Lock _l2(_members[memberId].lock);
  468. {
  469. Mutex::Lock _l(_memberIds_m);
  470. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  471. return;
  472. _memberIds.push_back(memberId);
  473. std::sort(_memberIds.begin(),_memberIds.end());
  474. }
  475. _members[memberId].clear();
  476. // Generate this member's message key from the master and its ID
  477. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  478. memcpy(stmp,_masterSecret,sizeof(stmp));
  479. stmp[0] ^= Utils::hton(memberId);
  480. SHA512::hash(stmp,stmp,sizeof(stmp));
  481. SHA512::hash(stmp,stmp,sizeof(stmp));
  482. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  483. Utils::burn(stmp,sizeof(stmp));
  484. // Prepare q
  485. _members[memberId].q.clear();
  486. char iv[16];
  487. Utils::getSecureRandom(iv,16);
  488. _members[memberId].q.append(iv,16);
  489. _members[memberId].q.addSize(8); // room for MAC
  490. _members[memberId].q.append((uint16_t)_id);
  491. _members[memberId].q.append((uint16_t)memberId);
  492. }
  493. void Cluster::removeMember(uint16_t memberId)
  494. {
  495. Mutex::Lock _l(_memberIds_m);
  496. std::vector<uint16_t> newMemberIds;
  497. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  498. if (*mid != memberId)
  499. newMemberIds.push_back(*mid);
  500. }
  501. _memberIds = newMemberIds;
  502. }
  503. InetAddress Cluster::findBetterEndpoint(const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
  504. {
  505. if (!peerPhysicalAddress) // sanity check
  506. return InetAddress();
  507. if (_addressToLocationFunction) {
  508. // Pick based on location if it can be determined
  509. int px = 0,py = 0,pz = 0;
  510. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  511. TRACE("no geolocation data for %s (geo-lookup is lazy/async so it may work next time)",peerPhysicalAddress.toIpString().c_str());
  512. return InetAddress();
  513. }
  514. // Find member closest to this peer
  515. const uint64_t now = RR->node->now();
  516. std::vector<InetAddress> best; // initial "best" is for peer to stay put
  517. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  518. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  519. unsigned int bestMember = _id;
  520. {
  521. Mutex::Lock _l(_memberIds_m);
  522. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  523. _Member &m = _members[*mid];
  524. Mutex::Lock _ml(m.lock);
  525. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  526. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  527. double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  528. if (mdist < bestDistance) {
  529. bestDistance = mdist;
  530. bestMember = *mid;
  531. best = m.zeroTierPhysicalEndpoints;
  532. }
  533. }
  534. }
  535. }
  536. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  537. if (a->ss_family == peerPhysicalAddress.ss_family) {
  538. TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
  539. return *a;
  540. }
  541. }
  542. TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
  543. return InetAddress();
  544. } else {
  545. // TODO: pick based on load if no location info?
  546. return InetAddress();
  547. }
  548. }
  549. void Cluster::status(ZT_ClusterStatus &status) const
  550. {
  551. const uint64_t now = RR->node->now();
  552. memset(&status,0,sizeof(ZT_ClusterStatus));
  553. ZT_ClusterMemberStatus *ms[ZT_CLUSTER_MAX_MEMBERS];
  554. memset(ms,0,sizeof(ms));
  555. status.myId = _id;
  556. ms[_id] = &(status.members[status.clusterSize++]);
  557. ms[_id]->id = _id;
  558. ms[_id]->alive = 1;
  559. ms[_id]->x = _x;
  560. ms[_id]->y = _y;
  561. ms[_id]->z = _z;
  562. ms[_id]->peers = RR->topology->countAlive();
  563. for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
  564. if (ms[_id]->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  565. break;
  566. memcpy(&(ms[_id]->zeroTierPhysicalEndpoints[ms[_id]->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  567. }
  568. {
  569. Mutex::Lock _l1(_memberIds_m);
  570. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  571. if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
  572. break;
  573. ZT_ClusterMemberStatus *s = ms[*mid] = &(status.members[status.clusterSize++]);
  574. _Member &m = _members[*mid];
  575. Mutex::Lock ml(m.lock);
  576. s->id = *mid;
  577. s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
  578. s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
  579. s->x = m.x;
  580. s->y = m.y;
  581. s->z = m.z;
  582. s->load = m.load;
  583. for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
  584. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  585. break;
  586. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  587. }
  588. }
  589. }
  590. {
  591. Mutex::Lock _l2(_peerAffinities_m);
  592. for(std::vector<_PeerAffinity>::const_iterator pi(_peerAffinities.begin());pi!=_peerAffinities.end();++pi) {
  593. unsigned int mid = pi->clusterMemberId();
  594. if ((ms[mid])&&(mid != _id)&&((now - pi->timestamp) < ZT_PEER_ACTIVITY_TIMEOUT))
  595. ++ms[mid]->peers;
  596. }
  597. }
  598. }
  599. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  600. {
  601. if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
  602. return;
  603. _Member &m = _members[memberId];
  604. // assumes m.lock is locked!
  605. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  606. _flush(memberId);
  607. m.q.append((uint16_t)(len + 1));
  608. m.q.append((uint8_t)type);
  609. m.q.append(msg,len);
  610. }
  611. void Cluster::_flush(uint16_t memberId)
  612. {
  613. _Member &m = _members[memberId];
  614. // assumes m.lock is locked!
  615. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  616. // Create key from member's key and IV
  617. char keytmp[32];
  618. memcpy(keytmp,m.key,32);
  619. for(int i=0;i<8;++i)
  620. keytmp[i] ^= m.q[i];
  621. Salsa20 s20(keytmp,256,m.q.field(8,8));
  622. Utils::burn(keytmp,sizeof(keytmp));
  623. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  624. char polykey[ZT_POLY1305_KEY_LEN];
  625. memset(polykey,0,sizeof(polykey));
  626. s20.encrypt12(polykey,polykey,sizeof(polykey));
  627. // Encrypt m.q in place
  628. s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  629. // Add MAC for authentication (encrypt-then-MAC)
  630. char mac[ZT_POLY1305_MAC_LEN];
  631. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  632. memcpy(m.q.field(16,8),mac,8);
  633. // Send!
  634. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  635. // Prepare for more
  636. m.q.clear();
  637. char iv[16];
  638. Utils::getSecureRandom(iv,16);
  639. m.q.append(iv,16);
  640. m.q.addSize(8); // room for MAC
  641. m.q.append((uint16_t)_id); // from member ID
  642. m.q.append((uint16_t)memberId); // to member ID
  643. }
  644. }
  645. } // namespace ZeroTier
  646. #endif // ZT_ENABLE_CLUSTER