Cluster.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #ifdef ZT_ENABLE_CLUSTER
  28. #include <stdint.h>
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <math.h>
  33. #include <algorithm>
  34. #include <utility>
  35. #include "../version.h"
  36. #include "Cluster.hpp"
  37. #include "RuntimeEnvironment.hpp"
  38. #include "MulticastGroup.hpp"
  39. #include "CertificateOfMembership.hpp"
  40. #include "Salsa20.hpp"
  41. #include "Poly1305.hpp"
  42. #include "Packet.hpp"
  43. #include "Identity.hpp"
  44. #include "Peer.hpp"
  45. #include "Switch.hpp"
  46. #include "Node.hpp"
  47. namespace ZeroTier {
  48. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  49. throw()
  50. {
  51. double dx = ((double)x2 - (double)x1);
  52. double dy = ((double)y2 - (double)y1);
  53. double dz = ((double)z2 - (double)z1);
  54. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  55. }
  56. Cluster::Cluster(
  57. const RuntimeEnvironment *renv,
  58. uint16_t id,
  59. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  60. int32_t x,
  61. int32_t y,
  62. int32_t z,
  63. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  64. void *sendFunctionArg,
  65. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  66. void *addressToLocationFunctionArg) :
  67. RR(renv),
  68. _sendFunction(sendFunction),
  69. _sendFunctionArg(sendFunctionArg),
  70. _addressToLocationFunction(addressToLocationFunction),
  71. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  72. _x(x),
  73. _y(y),
  74. _z(z),
  75. _id(id),
  76. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  77. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS])
  78. {
  79. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  80. // Generate master secret by hashing the secret from our Identity key pair
  81. RR->identity.sha512PrivateKey(_masterSecret);
  82. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  83. memcpy(stmp,_masterSecret,sizeof(stmp));
  84. stmp[0] ^= Utils::hton(id);
  85. SHA512::hash(stmp,stmp,sizeof(stmp));
  86. SHA512::hash(stmp,stmp,sizeof(stmp));
  87. memcpy(_key,stmp,sizeof(_key));
  88. Utils::burn(stmp,sizeof(stmp));
  89. }
  90. Cluster::~Cluster()
  91. {
  92. Utils::burn(_masterSecret,sizeof(_masterSecret));
  93. Utils::burn(_key,sizeof(_key));
  94. delete [] _members;
  95. }
  96. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  97. {
  98. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  99. {
  100. // FORMAT: <[16] iv><[8] MAC><... data>
  101. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  102. return;
  103. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  104. char keytmp[32];
  105. memcpy(keytmp,_key,32);
  106. for(int i=0;i<8;++i)
  107. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  108. Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
  109. Utils::burn(keytmp,sizeof(keytmp));
  110. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  111. char polykey[ZT_POLY1305_KEY_LEN];
  112. memset(polykey,0,sizeof(polykey));
  113. s20.encrypt12(polykey,polykey,sizeof(polykey));
  114. // Compute 16-byte MAC
  115. char mac[ZT_POLY1305_MAC_LEN];
  116. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  117. // Check first 8 bytes of MAC against 64-bit MAC in stream
  118. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  119. return;
  120. // Decrypt!
  121. dmsg.setSize(len - 24);
  122. s20.decrypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  123. }
  124. if (dmsg.size() < 4)
  125. return;
  126. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  127. unsigned int ptr = 2;
  128. if (fromMemberId == _id)
  129. return;
  130. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  131. ptr += 2;
  132. if (toMemberId != _id)
  133. return;
  134. _Member &m = _members[fromMemberId];
  135. Mutex::Lock mlck(m.lock);
  136. try {
  137. while (ptr < dmsg.size()) {
  138. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  139. const unsigned int nextPtr = ptr + mlen;
  140. int mtype = -1;
  141. try {
  142. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  143. default:
  144. break;
  145. case STATE_MESSAGE_ALIVE: {
  146. ptr += 7; // skip version stuff, not used yet
  147. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  148. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  149. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  150. ptr += 8; // skip local clock, not used
  151. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  152. ptr += 8; // skip flags, unused
  153. unsigned int physicalAddressCount = dmsg[ptr++];
  154. for(unsigned int i=0;i<physicalAddressCount;++i) {
  155. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  156. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  157. if (!(m.zeroTierPhysicalEndpoints.back()))
  158. m.zeroTierPhysicalEndpoints.pop_back();
  159. }
  160. m.lastReceivedAliveAnnouncement = RR->node->now();
  161. } break;
  162. case STATE_MESSAGE_HAVE_PEER: {
  163. try {
  164. Identity id;
  165. ptr += id.deserialize(dmsg,ptr);
  166. RR->topology->saveIdentity(id);
  167. { // Add or update peer affinity entry
  168. _PeerAffinity pa(id.address(),fromMemberId,RR->node->now());
  169. Mutex::Lock _l2(_peerAffinities_m);
  170. std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n))
  171. if ((i != _peerAffinities.end())&&(i->key == pa.key)) {
  172. i->timestamp = pa.timestamp;
  173. } else {
  174. _peerAffinities.push_back(pa);
  175. std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now
  176. }
  177. }
  178. } catch ( ... ) {
  179. // ignore invalid identities
  180. }
  181. } break;
  182. case STATE_MESSAGE_MULTICAST_LIKE: {
  183. const uint64_t nwid = dmsg.at<uint64_t>(ptr); ptr += 8;
  184. const Address address(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  185. const MAC mac(dmsg.field(ptr,6),6); ptr += 6;
  186. const uint32_t adi = dmsg.at<uint32_t>(ptr); ptr += 4;
  187. RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address);
  188. } break;
  189. case STATE_MESSAGE_COM: {
  190. // TODO: not used yet
  191. } break;
  192. case STATE_MESSAGE_RELAY: {
  193. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  194. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  195. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  196. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  197. const unsigned int packetLen = dmsg.at<uint16_t>(ptr); ptr += 2;
  198. const void *packet = (const void *)dmsg.field(ptr,packetLen); ptr += packetLen;
  199. if (packetLen >= ZT_PROTO_MIN_FRAGMENT_LENGTH) { // ignore anything too short to contain a dest address
  200. const Address destinationAddress(reinterpret_cast<const char *>(packet) + 8,ZT_ADDRESS_LENGTH);
  201. SharedPtr<Peer> destinationPeer(RR->topology->getPeer(destinationAddress));
  202. if (destinationPeer) {
  203. if (
  204. (destinationPeer->send(RR,packet,packetLen,RR->node->now()))&&
  205. (numRemotePeerPaths > 0)&&
  206. (packetLen >= 18)&&
  207. (reinterpret_cast<const unsigned char *>(packet)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR)
  208. ) {
  209. // If remote peer paths were sent with this relayed packet, we do
  210. // RENDEZVOUS. It's handled here for cluster-relayed packets since
  211. // we don't have both Peer records so this is a different path.
  212. const Address remotePeerAddress(reinterpret_cast<const char *>(packet) + 13,ZT_ADDRESS_LENGTH);
  213. InetAddress bestDestV4,bestDestV6;
  214. destinationPeer->getBestActiveAddresses(RR->node->now(),bestDestV4,bestDestV6);
  215. InetAddress bestRemoteV4,bestRemoteV6;
  216. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  217. if ((bestRemoteV4)&&(bestRemoteV6))
  218. break;
  219. switch(remotePeerPaths[i].ss_family) {
  220. case AF_INET:
  221. if (!bestRemoteV4)
  222. bestRemoteV4 = remotePeerPaths[i];
  223. break;
  224. case AF_INET6:
  225. if (!bestRemoteV6)
  226. bestRemoteV6 = remotePeerPaths[i];
  227. break;
  228. }
  229. }
  230. Packet rendezvousForDest(destinationAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  231. rendezvousForDest.append((uint8_t)0);
  232. remotePeerAddress.appendTo(rendezvousForDest);
  233. Buffer<2048> rendezvousForOtherEnd;
  234. remotePeerAddress.appendTo(rendezvousForOtherEnd);
  235. rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS);
  236. const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size();
  237. rendezvousForOtherEnd.addSize(2); // space for actual packet payload length
  238. rendezvousForOtherEnd.append((uint8_t)0); // flags == 0
  239. destinationAddress.appendTo(rendezvousForOtherEnd);
  240. bool haveMatch = false;
  241. if ((bestDestV6)&&(bestRemoteV6)) {
  242. haveMatch = true;
  243. rendezvousForDest.append((uint16_t)bestRemoteV6.port());
  244. rendezvousForDest.append((uint8_t)16);
  245. rendezvousForDest.append(bestRemoteV6.rawIpData(),16);
  246. rendezvousForOtherEnd.append((uint16_t)bestDestV6.port());
  247. rendezvousForOtherEnd.append((uint8_t)16);
  248. rendezvousForOtherEnd.append(bestDestV6.rawIpData(),16);
  249. rendezvousForOtherEnd.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 16));
  250. } else if ((bestDestV4)&&(bestRemoteV4)) {
  251. haveMatch = true;
  252. rendezvousForDest.append((uint16_t)bestRemoteV4.port());
  253. rendezvousForDest.append((uint8_t)4);
  254. rendezvousForDest.append(bestRemoteV4.rawIpData(),4);
  255. rendezvousForOtherEnd.append((uint16_t)bestDestV4.port());
  256. rendezvousForOtherEnd.append((uint8_t)4);
  257. rendezvousForOtherEnd.append(bestDestV4.rawIpData(),4);
  258. rendezvousForOtherEnd.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 4));
  259. }
  260. if (haveMatch) {
  261. _send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size());
  262. RR->sw->send(rendezvousForDest,true,0);
  263. }
  264. }
  265. }
  266. }
  267. } break;
  268. case STATE_MESSAGE_PROXY_SEND: {
  269. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
  270. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  271. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  272. Packet outp(rcpt,RR->identity.address(),verb);
  273. outp.append(dmsg.field(ptr,len),len);
  274. RR->sw->send(outp,true,0);
  275. } break;
  276. }
  277. } catch ( ... ) {
  278. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  279. // drop invalids
  280. }
  281. ptr = nextPtr;
  282. }
  283. } catch ( ... ) {
  284. TRACE("invalid message (outer loop), discarding");
  285. // drop invalids
  286. }
  287. }
  288. void Cluster::replicateHavePeer(const Identity &peerId)
  289. {
  290. { // Use peer affinity table to track our own last announce time for peers
  291. _PeerAffinity pa(peerId.address(),_id,RR->node->now());
  292. Mutex::Lock _l2(_peerAffinities_m);
  293. std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n))
  294. if ((i != _peerAffinities.end())&&(i->key == pa.key)) {
  295. if ((pa.timestamp - i->timestamp) >= ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD) {
  296. i->timestamp = pa.timestamp;
  297. // continue to announcement
  298. } else {
  299. // we've already announced this peer recently, so skip
  300. return;
  301. }
  302. } else {
  303. _peerAffinities.push_back(pa);
  304. std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now
  305. // continue to announcement
  306. }
  307. }
  308. // announcement
  309. Buffer<4096> buf;
  310. peerId.serialize(buf,false);
  311. {
  312. Mutex::Lock _l(_memberIds_m);
  313. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  314. Mutex::Lock _l2(_members[*mid].lock);
  315. _send(*mid,STATE_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  316. }
  317. }
  318. }
  319. void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
  320. {
  321. Buffer<4096> buf;
  322. buf.append((uint64_t)nwid);
  323. peerAddress.appendTo(buf);
  324. group.mac().appendTo(buf);
  325. buf.append((uint32_t)group.adi());
  326. {
  327. Mutex::Lock _l(_memberIds_m);
  328. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  329. Mutex::Lock _l2(_members[*mid].lock);
  330. _send(*mid,STATE_MESSAGE_MULTICAST_LIKE,buf.data(),buf.size());
  331. }
  332. }
  333. }
  334. void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
  335. {
  336. Buffer<4096> buf;
  337. com.serialize(buf);
  338. {
  339. Mutex::Lock _l(_memberIds_m);
  340. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  341. Mutex::Lock _l2(_members[*mid].lock);
  342. _send(*mid,STATE_MESSAGE_COM,buf.data(),buf.size());
  343. }
  344. }
  345. }
  346. void Cluster::doPeriodicTasks()
  347. {
  348. const uint64_t now = RR->node->now();
  349. {
  350. Mutex::Lock _l(_memberIds_m);
  351. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  352. Mutex::Lock _l2(_members[*mid].lock);
  353. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  354. Buffer<2048> alive;
  355. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  356. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  357. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  358. alive.append((uint8_t)ZT_PROTO_VERSION);
  359. if (_addressToLocationFunction) {
  360. alive.append((int32_t)_x);
  361. alive.append((int32_t)_y);
  362. alive.append((int32_t)_z);
  363. } else {
  364. alive.append((int32_t)0);
  365. alive.append((int32_t)0);
  366. alive.append((int32_t)0);
  367. }
  368. alive.append((uint64_t)now);
  369. alive.append((uint64_t)0); // TODO: compute and send load average
  370. alive.append((uint64_t)0); // unused/reserved flags
  371. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  372. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  373. pe->serialize(alive);
  374. _send(*mid,STATE_MESSAGE_ALIVE,alive.data(),alive.size());
  375. _members[*mid].lastAnnouncedAliveTo = now;
  376. }
  377. _flush(*mid); // does nothing if nothing to flush
  378. }
  379. }
  380. }
  381. void Cluster::addMember(uint16_t memberId)
  382. {
  383. if (memberId >= ZT_CLUSTER_MAX_MEMBERS)
  384. return;
  385. Mutex::Lock _l2(_members[memberId].lock);
  386. {
  387. Mutex::Lock _l(_memberIds_m);
  388. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  389. return;
  390. _memberIds.push_back(memberId);
  391. std::sort(_memberIds.begin(),_memberIds.end());
  392. }
  393. _members[memberId].clear();
  394. // Generate this member's message key from the master and its ID
  395. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  396. memcpy(stmp,_masterSecret,sizeof(stmp));
  397. stmp[0] ^= Utils::hton(memberId);
  398. SHA512::hash(stmp,stmp,sizeof(stmp));
  399. SHA512::hash(stmp,stmp,sizeof(stmp));
  400. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  401. Utils::burn(stmp,sizeof(stmp));
  402. // Prepare q
  403. _members[memberId].q.clear();
  404. char iv[16];
  405. Utils::getSecureRandom(iv,16);
  406. _members[memberId].q.append(iv,16);
  407. _members[memberId].q.addSize(8); // room for MAC
  408. _members[memberId].q.append((uint16_t)_id);
  409. _members[memberId].q.append((uint16_t)memberId);
  410. }
  411. void Cluster::removeMember(uint16_t memberId)
  412. {
  413. Mutex::Lock _l(_memberIds_m);
  414. std::vector<uint16_t> newMemberIds;
  415. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  416. if (*mid != memberId)
  417. newMemberIds.push_back(*mid);
  418. }
  419. _memberIds = newMemberIds;
  420. }
  421. bool Cluster::redirectPeer(const SharedPtr<Peer> &peer,const InetAddress &peerPhysicalAddress,bool offload)
  422. {
  423. if (!peerPhysicalAddress) // sanity check
  424. return false;
  425. if (_addressToLocationFunction) {
  426. // Pick based on location if it can be determined
  427. int px = 0,py = 0,pz = 0;
  428. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  429. // No geo-info so no change
  430. return false;
  431. }
  432. // Find member closest to this peer
  433. const uint64_t now = RR->node->now();
  434. std::vector<InetAddress> best; // initial "best" is for peer to stay put
  435. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  436. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  437. unsigned int bestMember = _id;
  438. {
  439. Mutex::Lock _l(_memberIds_m);
  440. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  441. _Member &m = _members[*mid];
  442. Mutex::Lock _ml(m.lock);
  443. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  444. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  445. double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  446. if (mdist < bestDistance) {
  447. bestMember = *mid;
  448. best = m.zeroTierPhysicalEndpoints;
  449. }
  450. }
  451. }
  452. }
  453. if (best.size() > 0) {
  454. TRACE("peer %s is at [%d,%d,%d], distance to us is %f, sending to %u instead for better distance %f",peer->address().toString().c_str(),px,py,pz,currentDistance,bestMember,bestDistance);
  455. /* if (peer->remoteVersionProtocol() >= 5) {
  456. // If it's a newer peer send VERB_PUSH_DIRECT_PATHS which is more idiomatic
  457. } else { */
  458. // Otherwise send VERB_RENDEZVOUS for ourselves, which will trick peers into trying other endpoints for us even if they're too old for PUSH_DIRECT_PATHS
  459. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  460. if ((a->ss_family == AF_INET)||(a->ss_family == AF_INET6)) {
  461. Packet outp(peer->address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
  462. outp.append((uint8_t)0); // no flags
  463. RR->identity.address().appendTo(outp); // HACK: rendezvous with ourselves! with really old peers this will only work if I'm a root server!
  464. outp.append((uint16_t)a->port());
  465. if (a->ss_family == AF_INET) {
  466. outp.append((uint8_t)4);
  467. outp.append(a->rawIpData(),4);
  468. } else {
  469. outp.append((uint8_t)16);
  470. outp.append(a->rawIpData(),16);
  471. }
  472. RR->sw->send(outp,true,0);
  473. }
  474. }
  475. //}
  476. return true;
  477. } else {
  478. TRACE("peer %s is at [%d,%d,%d], distance to us is %f and this seems to be the best",peer->address().toString().c_str(),px,py,pz,currentDistance);
  479. return false;
  480. }
  481. } else {
  482. // TODO: pick based on load if no location info?
  483. return false;
  484. }
  485. }
  486. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  487. {
  488. _Member &m = _members[memberId];
  489. // assumes m.lock is locked!
  490. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  491. _flush(memberId);
  492. m.q.append((uint16_t)(len + 1));
  493. m.q.append((uint8_t)type);
  494. m.q.append(msg,len);
  495. }
  496. void Cluster::_flush(uint16_t memberId)
  497. {
  498. _Member &m = _members[memberId];
  499. // assumes m.lock is locked!
  500. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  501. // Create key from member's key and IV
  502. char keytmp[32];
  503. memcpy(keytmp,m.key,32);
  504. for(int i=0;i<8;++i)
  505. keytmp[i] ^= m.q[i];
  506. Salsa20 s20(keytmp,256,m.q.field(8,8));
  507. Utils::burn(keytmp,sizeof(keytmp));
  508. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  509. char polykey[ZT_POLY1305_KEY_LEN];
  510. memset(polykey,0,sizeof(polykey));
  511. s20.encrypt12(polykey,polykey,sizeof(polykey));
  512. // Encrypt m.q in place
  513. s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  514. // Add MAC for authentication (encrypt-then-MAC)
  515. char mac[ZT_POLY1305_MAC_LEN];
  516. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  517. memcpy(m.q.field(16,8),mac,8);
  518. // Send!
  519. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  520. // Prepare for more
  521. m.q.clear();
  522. char iv[16];
  523. Utils::getSecureRandom(iv,16);
  524. m.q.append(iv,16);
  525. m.q.addSize(8); // room for MAC
  526. m.q.append((uint16_t)_id); // from member ID
  527. m.q.append((uint16_t)memberId); // to member ID
  528. }
  529. }
  530. } // namespace ZeroTier
  531. #endif // ZT_ENABLE_CLUSTER