Cluster.cpp 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2017 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * You can be released from the requirements of the license by purchasing
  21. * a commercial license. Buying such a license is mandatory as soon as you
  22. * develop commercial closed-source software that incorporates or links
  23. * directly against ZeroTier software without disclosing the source code
  24. * of your own application.
  25. */
  26. #ifdef ZT_ENABLE_CLUSTER
  27. #include <stdint.h>
  28. #include <stdio.h>
  29. #include <stdlib.h>
  30. #include <string.h>
  31. #include <math.h>
  32. #include <map>
  33. #include <algorithm>
  34. #include <set>
  35. #include <utility>
  36. #include <list>
  37. #include <stdexcept>
  38. #include "../version.h"
  39. #include "Cluster.hpp"
  40. #include "RuntimeEnvironment.hpp"
  41. #include "MulticastGroup.hpp"
  42. #include "CertificateOfMembership.hpp"
  43. #include "Salsa20.hpp"
  44. #include "Poly1305.hpp"
  45. #include "Identity.hpp"
  46. #include "Topology.hpp"
  47. #include "Packet.hpp"
  48. #include "Switch.hpp"
  49. #include "Node.hpp"
  50. #include "Network.hpp"
  51. #include "Array.hpp"
  52. namespace ZeroTier {
  53. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  54. throw()
  55. {
  56. double dx = ((double)x2 - (double)x1);
  57. double dy = ((double)y2 - (double)y1);
  58. double dz = ((double)z2 - (double)z1);
  59. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  60. }
  61. // An entry in _ClusterSendQueue
  62. struct _ClusterSendQueueEntry
  63. {
  64. uint64_t timestamp;
  65. Address fromPeerAddress;
  66. Address toPeerAddress;
  67. // if we ever support larger transport MTUs this must be increased
  68. unsigned char data[ZT_CLUSTER_SEND_QUEUE_DATA_MAX];
  69. unsigned int len;
  70. bool unite;
  71. };
  72. // A multi-index map with entry memory pooling -- this allows our queue to
  73. // be O(log(N)) and is complex enough that it makes the code a lot cleaner
  74. // to break it out from Cluster.
  75. class _ClusterSendQueue
  76. {
  77. public:
  78. _ClusterSendQueue() :
  79. _poolCount(0) {}
  80. ~_ClusterSendQueue() {} // memory is automatically freed when _chunks is destroyed
  81. inline void enqueue(uint64_t now,const Address &from,const Address &to,const void *data,unsigned int len,bool unite)
  82. {
  83. if (len > ZT_CLUSTER_SEND_QUEUE_DATA_MAX)
  84. return;
  85. Mutex::Lock _l(_lock);
  86. // Delete oldest queue entry for this sender if this enqueue() would take them over the per-sender limit
  87. {
  88. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(from,(_ClusterSendQueueEntry *)0)));
  89. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator oldest(qi);
  90. unsigned long countForSender = 0;
  91. while ((qi != _bySrc.end())&&(qi->first == from)) {
  92. if (qi->second->timestamp < oldest->second->timestamp)
  93. oldest = qi;
  94. ++countForSender;
  95. ++qi;
  96. }
  97. if (countForSender >= ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
  98. _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(oldest->second->toPeerAddress,oldest->second));
  99. _pool[_poolCount++] = oldest->second;
  100. _bySrc.erase(oldest);
  101. }
  102. }
  103. _ClusterSendQueueEntry *e;
  104. if (_poolCount > 0) {
  105. e = _pool[--_poolCount];
  106. } else {
  107. if (_chunks.size() >= ZT_CLUSTER_MAX_QUEUE_CHUNKS)
  108. return; // queue is totally full!
  109. _chunks.push_back(Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE>());
  110. e = &(_chunks.back().data[0]);
  111. for(unsigned int i=1;i<ZT_CLUSTER_QUEUE_CHUNK_SIZE;++i)
  112. _pool[_poolCount++] = &(_chunks.back().data[i]);
  113. }
  114. e->timestamp = now;
  115. e->fromPeerAddress = from;
  116. e->toPeerAddress = to;
  117. memcpy(e->data,data,len);
  118. e->len = len;
  119. e->unite = unite;
  120. _bySrc.insert(std::pair<Address,_ClusterSendQueueEntry *>(from,e));
  121. _byDest.insert(std::pair<Address,_ClusterSendQueueEntry *>(to,e));
  122. }
  123. inline void expire(uint64_t now)
  124. {
  125. Mutex::Lock _l(_lock);
  126. for(std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.begin());qi!=_bySrc.end();) {
  127. if ((now - qi->second->timestamp) > ZT_CLUSTER_QUEUE_EXPIRATION) {
  128. _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->toPeerAddress,qi->second));
  129. _pool[_poolCount++] = qi->second;
  130. _bySrc.erase(qi++);
  131. } else ++qi;
  132. }
  133. }
  134. /**
  135. * Get and dequeue entries for a given destination address
  136. *
  137. * After use these entries must be returned with returnToPool()!
  138. *
  139. * @param dest Destination address
  140. * @param results Array to fill with results
  141. * @param maxResults Size of results[] in pointers
  142. * @return Number of actual results returned
  143. */
  144. inline unsigned int getByDest(const Address &dest,_ClusterSendQueueEntry **results,unsigned int maxResults)
  145. {
  146. unsigned int count = 0;
  147. Mutex::Lock _l(_lock);
  148. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_byDest.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(dest,(_ClusterSendQueueEntry *)0)));
  149. while ((qi != _byDest.end())&&(qi->first == dest)) {
  150. _bySrc.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->fromPeerAddress,qi->second));
  151. results[count++] = qi->second;
  152. if (count == maxResults)
  153. break;
  154. _byDest.erase(qi++);
  155. }
  156. return count;
  157. }
  158. /**
  159. * Return entries to pool after use
  160. *
  161. * @param entries Array of entries
  162. * @param count Number of entries
  163. */
  164. inline void returnToPool(_ClusterSendQueueEntry **entries,unsigned int count)
  165. {
  166. Mutex::Lock _l(_lock);
  167. for(unsigned int i=0;i<count;++i)
  168. _pool[_poolCount++] = entries[i];
  169. }
  170. private:
  171. std::list< Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE> > _chunks;
  172. _ClusterSendQueueEntry *_pool[ZT_CLUSTER_QUEUE_CHUNK_SIZE * ZT_CLUSTER_MAX_QUEUE_CHUNKS];
  173. unsigned long _poolCount;
  174. std::set< std::pair<Address,_ClusterSendQueueEntry *> > _bySrc;
  175. std::set< std::pair<Address,_ClusterSendQueueEntry *> > _byDest;
  176. Mutex _lock;
  177. };
  178. Cluster::Cluster(
  179. const RuntimeEnvironment *renv,
  180. uint16_t id,
  181. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  182. int32_t x,
  183. int32_t y,
  184. int32_t z,
  185. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  186. void *sendFunctionArg,
  187. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  188. void *addressToLocationFunctionArg) :
  189. RR(renv),
  190. _sendQueue(new _ClusterSendQueue()),
  191. _sendFunction(sendFunction),
  192. _sendFunctionArg(sendFunctionArg),
  193. _addressToLocationFunction(addressToLocationFunction),
  194. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  195. _x(x),
  196. _y(y),
  197. _z(z),
  198. _id(id),
  199. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  200. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
  201. _lastFlushed(0),
  202. _lastCleanedRemotePeers(0),
  203. _lastCleanedQueue(0)
  204. {
  205. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  206. // Generate master secret by hashing the secret from our Identity key pair
  207. RR->identity.sha512PrivateKey(_masterSecret);
  208. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  209. memcpy(stmp,_masterSecret,sizeof(stmp));
  210. stmp[0] ^= Utils::hton(id);
  211. SHA512::hash(stmp,stmp,sizeof(stmp));
  212. SHA512::hash(stmp,stmp,sizeof(stmp));
  213. memcpy(_key,stmp,sizeof(_key));
  214. Utils::burn(stmp,sizeof(stmp));
  215. }
  216. Cluster::~Cluster()
  217. {
  218. Utils::burn(_masterSecret,sizeof(_masterSecret));
  219. Utils::burn(_key,sizeof(_key));
  220. delete [] _members;
  221. delete _sendQueue;
  222. }
  223. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  224. {
  225. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  226. {
  227. // FORMAT: <[16] iv><[8] MAC><... data>
  228. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  229. return;
  230. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  231. char keytmp[32];
  232. memcpy(keytmp,_key,32);
  233. for(int i=0;i<8;++i)
  234. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  235. Salsa20 s20(keytmp,reinterpret_cast<const char *>(msg) + 8);
  236. Utils::burn(keytmp,sizeof(keytmp));
  237. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  238. char polykey[ZT_POLY1305_KEY_LEN];
  239. memset(polykey,0,sizeof(polykey));
  240. s20.crypt12(polykey,polykey,sizeof(polykey));
  241. // Compute 16-byte MAC
  242. char mac[ZT_POLY1305_MAC_LEN];
  243. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  244. // Check first 8 bytes of MAC against 64-bit MAC in stream
  245. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  246. return;
  247. // Decrypt!
  248. dmsg.setSize(len - 24);
  249. s20.crypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  250. }
  251. if (dmsg.size() < 4)
  252. return;
  253. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  254. unsigned int ptr = 2;
  255. if (fromMemberId == _id) // sanity check: we don't talk to ourselves
  256. return;
  257. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  258. ptr += 2;
  259. if (toMemberId != _id) // sanity check: message not for us?
  260. return;
  261. { // make sure sender is actually considered a member
  262. Mutex::Lock _l3(_memberIds_m);
  263. if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
  264. return;
  265. }
  266. try {
  267. while (ptr < dmsg.size()) {
  268. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  269. const unsigned int nextPtr = ptr + mlen;
  270. if (nextPtr > dmsg.size())
  271. break;
  272. int mtype = -1;
  273. try {
  274. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  275. default:
  276. break;
  277. case CLUSTER_MESSAGE_ALIVE: {
  278. _Member &m = _members[fromMemberId];
  279. Mutex::Lock mlck(m.lock);
  280. ptr += 7; // skip version stuff, not used yet
  281. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  282. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  283. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  284. ptr += 8; // skip local clock, not used
  285. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  286. m.peers = dmsg.at<uint64_t>(ptr); ptr += 8;
  287. ptr += 8; // skip flags, unused
  288. #ifdef ZT_TRACE
  289. std::string addrs;
  290. #endif
  291. unsigned int physicalAddressCount = dmsg[ptr++];
  292. m.zeroTierPhysicalEndpoints.clear();
  293. for(unsigned int i=0;i<physicalAddressCount;++i) {
  294. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  295. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  296. if (!(m.zeroTierPhysicalEndpoints.back())) {
  297. m.zeroTierPhysicalEndpoints.pop_back();
  298. }
  299. #ifdef ZT_TRACE
  300. else {
  301. if (addrs.length() > 0)
  302. addrs.push_back(',');
  303. addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
  304. }
  305. #endif
  306. }
  307. #ifdef ZT_TRACE
  308. if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
  309. TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
  310. }
  311. #endif
  312. m.lastReceivedAliveAnnouncement = RR->node->now();
  313. } break;
  314. case CLUSTER_MESSAGE_HAVE_PEER: {
  315. Identity id;
  316. ptr += id.deserialize(dmsg,ptr);
  317. if (id) {
  318. {
  319. Mutex::Lock _l(_remotePeers_m);
  320. _RemotePeer &rp = _remotePeers[std::pair<Address,unsigned int>(id.address(),(unsigned int)fromMemberId)];
  321. if (!rp.lastHavePeerReceived) {
  322. RR->topology->saveIdentity((void *)0,id);
  323. RR->identity.agree(id,rp.key,ZT_PEER_SECRET_KEY_LENGTH);
  324. }
  325. rp.lastHavePeerReceived = RR->node->now();
  326. }
  327. _ClusterSendQueueEntry *q[16384]; // 16384 is "tons"
  328. unsigned int qc = _sendQueue->getByDest(id.address(),q,16384);
  329. for(unsigned int i=0;i<qc;++i)
  330. this->relayViaCluster(q[i]->fromPeerAddress,q[i]->toPeerAddress,q[i]->data,q[i]->len,q[i]->unite);
  331. _sendQueue->returnToPool(q,qc);
  332. TRACE("[%u] has %s (retried %u queued sends)",(unsigned int)fromMemberId,id.address().toString().c_str(),qc);
  333. }
  334. } break;
  335. case CLUSTER_MESSAGE_WANT_PEER: {
  336. const Address zeroTierAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  337. SharedPtr<Peer> peer(RR->topology->getPeerNoCache(zeroTierAddress));
  338. if ( (peer) && (peer->hasLocalClusterOptimalPath(RR->node->now())) ) {
  339. Buffer<1024> buf;
  340. peer->identity().serialize(buf);
  341. Mutex::Lock _l2(_members[fromMemberId].lock);
  342. _send(fromMemberId,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  343. }
  344. } break;
  345. case CLUSTER_MESSAGE_REMOTE_PACKET: {
  346. const unsigned int plen = dmsg.at<uint16_t>(ptr); ptr += 2;
  347. if (plen) {
  348. Packet remotep(dmsg.field(ptr,plen),plen); ptr += plen;
  349. //TRACE("remote %s from %s via %u (%u bytes)",Packet::verbString(remotep.verb()),remotep.source().toString().c_str(),fromMemberId,plen);
  350. switch(remotep.verb()) {
  351. case Packet::VERB_WHOIS: _doREMOTE_WHOIS(fromMemberId,remotep); break;
  352. case Packet::VERB_MULTICAST_GATHER: _doREMOTE_MULTICAST_GATHER(fromMemberId,remotep); break;
  353. default: break; // ignore things we don't care about across cluster
  354. }
  355. }
  356. } break;
  357. case CLUSTER_MESSAGE_PROXY_UNITE: {
  358. const Address localPeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  359. const Address remotePeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  360. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  361. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  362. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  363. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  364. TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
  365. const uint64_t now = RR->node->now();
  366. SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress));
  367. if ((localPeer)&&(numRemotePeerPaths > 0)) {
  368. InetAddress bestLocalV4,bestLocalV6;
  369. localPeer->getRendezvousAddresses(now,bestLocalV4,bestLocalV6);
  370. InetAddress bestRemoteV4,bestRemoteV6;
  371. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  372. if ((bestRemoteV4)&&(bestRemoteV6))
  373. break;
  374. switch(remotePeerPaths[i].ss_family) {
  375. case AF_INET:
  376. if (!bestRemoteV4)
  377. bestRemoteV4 = remotePeerPaths[i];
  378. break;
  379. case AF_INET6:
  380. if (!bestRemoteV6)
  381. bestRemoteV6 = remotePeerPaths[i];
  382. break;
  383. }
  384. }
  385. Packet rendezvousForLocal(localPeerAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  386. rendezvousForLocal.append((uint8_t)0);
  387. remotePeerAddress.appendTo(rendezvousForLocal);
  388. Buffer<2048> rendezvousForRemote;
  389. remotePeerAddress.appendTo(rendezvousForRemote);
  390. rendezvousForRemote.append((uint8_t)Packet::VERB_RENDEZVOUS);
  391. rendezvousForRemote.addSize(2); // space for actual packet payload length
  392. rendezvousForRemote.append((uint8_t)0); // flags == 0
  393. localPeerAddress.appendTo(rendezvousForRemote);
  394. bool haveMatch = false;
  395. if ((bestLocalV6)&&(bestRemoteV6)) {
  396. haveMatch = true;
  397. rendezvousForLocal.append((uint16_t)bestRemoteV6.port());
  398. rendezvousForLocal.append((uint8_t)16);
  399. rendezvousForLocal.append(bestRemoteV6.rawIpData(),16);
  400. rendezvousForRemote.append((uint16_t)bestLocalV6.port());
  401. rendezvousForRemote.append((uint8_t)16);
  402. rendezvousForRemote.append(bestLocalV6.rawIpData(),16);
  403. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 16));
  404. } else if ((bestLocalV4)&&(bestRemoteV4)) {
  405. haveMatch = true;
  406. rendezvousForLocal.append((uint16_t)bestRemoteV4.port());
  407. rendezvousForLocal.append((uint8_t)4);
  408. rendezvousForLocal.append(bestRemoteV4.rawIpData(),4);
  409. rendezvousForRemote.append((uint16_t)bestLocalV4.port());
  410. rendezvousForRemote.append((uint8_t)4);
  411. rendezvousForRemote.append(bestLocalV4.rawIpData(),4);
  412. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 4));
  413. }
  414. if (haveMatch) {
  415. {
  416. Mutex::Lock _l2(_members[fromMemberId].lock);
  417. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
  418. }
  419. RR->sw->send((void *)0,rendezvousForLocal,true);
  420. }
  421. }
  422. } break;
  423. case CLUSTER_MESSAGE_PROXY_SEND: {
  424. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  425. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  426. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  427. Packet outp(rcpt,RR->identity.address(),verb);
  428. outp.append(dmsg.field(ptr,len),len); ptr += len;
  429. RR->sw->send((void *)0,outp,true);
  430. //TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
  431. } break;
  432. case CLUSTER_MESSAGE_NETWORK_CONFIG: {
  433. const SharedPtr<Network> network(RR->node->network(dmsg.at<uint64_t>(ptr)));
  434. if (network) {
  435. // Copy into a Packet just to conform to Network API. Eventually
  436. // will want to refactor.
  437. network->handleConfigChunk((void *)0,0,Address(),Buffer<ZT_PROTO_MAX_PACKET_LENGTH>(dmsg),ptr);
  438. }
  439. } break;
  440. }
  441. } catch ( ... ) {
  442. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  443. // drop invalids
  444. }
  445. ptr = nextPtr;
  446. }
  447. } catch ( ... ) {
  448. TRACE("invalid message (outer loop), discarding");
  449. // drop invalids
  450. }
  451. }
  452. void Cluster::broadcastHavePeer(const Identity &id)
  453. {
  454. Buffer<1024> buf;
  455. id.serialize(buf);
  456. Mutex::Lock _l(_memberIds_m);
  457. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  458. Mutex::Lock _l2(_members[*mid].lock);
  459. _send(*mid,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  460. }
  461. }
  462. void Cluster::broadcastNetworkConfigChunk(const void *chunk,unsigned int len)
  463. {
  464. Mutex::Lock _l(_memberIds_m);
  465. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  466. Mutex::Lock _l2(_members[*mid].lock);
  467. _send(*mid,CLUSTER_MESSAGE_NETWORK_CONFIG,chunk,len);
  468. }
  469. }
  470. int Cluster::checkSendViaCluster(const Address &toPeerAddress,uint64_t &mostRecentTs,void *peerSecret)
  471. {
  472. const uint64_t now = RR->node->now();
  473. mostRecentTs = 0;
  474. int mostRecentMemberId = -1;
  475. {
  476. Mutex::Lock _l2(_remotePeers_m);
  477. std::map< std::pair<Address,unsigned int>,_RemotePeer >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
  478. for(;;) {
  479. if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
  480. break;
  481. else if (rpe->second.lastHavePeerReceived > mostRecentTs) {
  482. mostRecentTs = rpe->second.lastHavePeerReceived;
  483. memcpy(peerSecret,rpe->second.key,ZT_PEER_SECRET_KEY_LENGTH);
  484. mostRecentMemberId = (int)rpe->first.second;
  485. }
  486. ++rpe;
  487. }
  488. }
  489. const uint64_t ageOfMostRecentHavePeerAnnouncement = now - mostRecentTs;
  490. if (ageOfMostRecentHavePeerAnnouncement >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
  491. if (ageOfMostRecentHavePeerAnnouncement >= ZT_PEER_ACTIVITY_TIMEOUT)
  492. mostRecentMemberId = -1;
  493. bool sendWantPeer = true;
  494. {
  495. Mutex::Lock _l(_remotePeers_m);
  496. _RemotePeer &rp = _remotePeers[std::pair<Address,unsigned int>(toPeerAddress,(unsigned int)_id)];
  497. if ((now - rp.lastSentWantPeer) >= ZT_CLUSTER_WANT_PEER_EVERY) {
  498. rp.lastSentWantPeer = now;
  499. } else {
  500. sendWantPeer = false; // don't flood WANT_PEER
  501. }
  502. }
  503. if (sendWantPeer) {
  504. char tmp[ZT_ADDRESS_LENGTH];
  505. toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
  506. {
  507. Mutex::Lock _l(_memberIds_m);
  508. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  509. Mutex::Lock _l2(_members[*mid].lock);
  510. _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
  511. }
  512. }
  513. }
  514. }
  515. return mostRecentMemberId;
  516. }
  517. bool Cluster::sendViaCluster(int mostRecentMemberId,const Address &toPeerAddress,const void *data,unsigned int len)
  518. {
  519. if ((mostRecentMemberId < 0)||(mostRecentMemberId >= ZT_CLUSTER_MAX_MEMBERS)) // sanity check
  520. return false;
  521. Mutex::Lock _l2(_members[mostRecentMemberId].lock);
  522. for(std::vector<InetAddress>::const_iterator i1(_zeroTierPhysicalEndpoints.begin());i1!=_zeroTierPhysicalEndpoints.end();++i1) {
  523. for(std::vector<InetAddress>::const_iterator i2(_members[mostRecentMemberId].zeroTierPhysicalEndpoints.begin());i2!=_members[mostRecentMemberId].zeroTierPhysicalEndpoints.end();++i2) {
  524. if (i1->ss_family == i2->ss_family) {
  525. TRACE("sendViaCluster sending %u bytes to %s by way of %u (%s->%s)",len,toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId,i1->toString().c_str(),i2->toString().c_str());
  526. RR->node->putPacket((void *)0,*i1,*i2,data,len);
  527. return true;
  528. }
  529. }
  530. }
  531. return false;
  532. }
  533. void Cluster::relayViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite)
  534. {
  535. if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
  536. return;
  537. const uint64_t now = RR->node->now();
  538. uint64_t mostRecentTs = 0;
  539. int mostRecentMemberId = -1;
  540. {
  541. Mutex::Lock _l2(_remotePeers_m);
  542. std::map< std::pair<Address,unsigned int>,_RemotePeer >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
  543. for(;;) {
  544. if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
  545. break;
  546. else if (rpe->second.lastHavePeerReceived > mostRecentTs) {
  547. mostRecentTs = rpe->second.lastHavePeerReceived;
  548. mostRecentMemberId = (int)rpe->first.second;
  549. }
  550. ++rpe;
  551. }
  552. }
  553. const uint64_t ageOfMostRecentHavePeerAnnouncement = now - mostRecentTs;
  554. if (ageOfMostRecentHavePeerAnnouncement >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
  555. // Enqueue and wait if peer seems alive, but do WANT_PEER to refresh homing
  556. const bool enqueueAndWait = ((ageOfMostRecentHavePeerAnnouncement >= ZT_PEER_ACTIVITY_TIMEOUT)||(mostRecentMemberId < 0));
  557. // Poll everyone with WANT_PEER if the age of our most recent entry is
  558. // approaching expiration (or has expired, or does not exist).
  559. bool sendWantPeer = true;
  560. {
  561. Mutex::Lock _l(_remotePeers_m);
  562. _RemotePeer &rp = _remotePeers[std::pair<Address,unsigned int>(toPeerAddress,(unsigned int)_id)];
  563. if ((now - rp.lastSentWantPeer) >= ZT_CLUSTER_WANT_PEER_EVERY) {
  564. rp.lastSentWantPeer = now;
  565. } else {
  566. sendWantPeer = false; // don't flood WANT_PEER
  567. }
  568. }
  569. if (sendWantPeer) {
  570. char tmp[ZT_ADDRESS_LENGTH];
  571. toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
  572. {
  573. Mutex::Lock _l(_memberIds_m);
  574. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  575. Mutex::Lock _l2(_members[*mid].lock);
  576. _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
  577. }
  578. }
  579. }
  580. // If there isn't a good place to send via, then enqueue this for retrying
  581. // later and return after having broadcasted a WANT_PEER.
  582. if (enqueueAndWait) {
  583. TRACE("relayViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
  584. _sendQueue->enqueue(now,fromPeerAddress,toPeerAddress,data,len,unite);
  585. return;
  586. }
  587. }
  588. if (mostRecentMemberId >= 0) {
  589. Buffer<1024> buf;
  590. if (unite) {
  591. InetAddress v4,v6;
  592. if (fromPeerAddress) {
  593. SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress));
  594. if (fromPeer)
  595. fromPeer->getRendezvousAddresses(now,v4,v6);
  596. }
  597. uint8_t addrCount = 0;
  598. if (v4)
  599. ++addrCount;
  600. if (v6)
  601. ++addrCount;
  602. if (addrCount) {
  603. toPeerAddress.appendTo(buf);
  604. fromPeerAddress.appendTo(buf);
  605. buf.append(addrCount);
  606. if (v4)
  607. v4.serialize(buf);
  608. if (v6)
  609. v6.serialize(buf);
  610. }
  611. }
  612. {
  613. Mutex::Lock _l2(_members[mostRecentMemberId].lock);
  614. if (buf.size() > 0)
  615. _send(mostRecentMemberId,CLUSTER_MESSAGE_PROXY_UNITE,buf.data(),buf.size());
  616. for(std::vector<InetAddress>::const_iterator i1(_zeroTierPhysicalEndpoints.begin());i1!=_zeroTierPhysicalEndpoints.end();++i1) {
  617. for(std::vector<InetAddress>::const_iterator i2(_members[mostRecentMemberId].zeroTierPhysicalEndpoints.begin());i2!=_members[mostRecentMemberId].zeroTierPhysicalEndpoints.end();++i2) {
  618. if (i1->ss_family == i2->ss_family) {
  619. TRACE("relayViaCluster relaying %u bytes from %s to %s by way of %u (%s->%s)",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId,i1->toString().c_str(),i2->toString().c_str());
  620. RR->node->putPacket((void *)0,*i1,*i2,data,len);
  621. return;
  622. }
  623. }
  624. }
  625. TRACE("relayViaCluster relaying %u bytes from %s to %s by way of %u failed: no common endpoints with the same address family!",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId);
  626. }
  627. }
  628. }
  629. void Cluster::sendDistributedQuery(const Packet &pkt)
  630. {
  631. Buffer<4096> buf;
  632. buf.append((uint16_t)pkt.size());
  633. buf.append(pkt.data(),pkt.size());
  634. Mutex::Lock _l(_memberIds_m);
  635. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  636. Mutex::Lock _l2(_members[*mid].lock);
  637. _send(*mid,CLUSTER_MESSAGE_REMOTE_PACKET,buf.data(),buf.size());
  638. }
  639. }
  640. void Cluster::doPeriodicTasks()
  641. {
  642. const uint64_t now = RR->node->now();
  643. if ((now - _lastFlushed) >= ZT_CLUSTER_FLUSH_PERIOD) {
  644. _lastFlushed = now;
  645. Mutex::Lock _l(_memberIds_m);
  646. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  647. Mutex::Lock _l2(_members[*mid].lock);
  648. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  649. _members[*mid].lastAnnouncedAliveTo = now;
  650. Buffer<2048> alive;
  651. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  652. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  653. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  654. alive.append((uint8_t)ZT_PROTO_VERSION);
  655. if (_addressToLocationFunction) {
  656. alive.append((int32_t)_x);
  657. alive.append((int32_t)_y);
  658. alive.append((int32_t)_z);
  659. } else {
  660. alive.append((int32_t)0);
  661. alive.append((int32_t)0);
  662. alive.append((int32_t)0);
  663. }
  664. alive.append((uint64_t)now);
  665. alive.append((uint64_t)0); // TODO: compute and send load average
  666. alive.append((uint64_t)RR->topology->countActive(now));
  667. alive.append((uint64_t)0); // unused/reserved flags
  668. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  669. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  670. pe->serialize(alive);
  671. _send(*mid,CLUSTER_MESSAGE_ALIVE,alive.data(),alive.size());
  672. }
  673. _flush(*mid);
  674. }
  675. }
  676. if ((now - _lastCleanedRemotePeers) >= (ZT_PEER_ACTIVITY_TIMEOUT * 2)) {
  677. _lastCleanedRemotePeers = now;
  678. Mutex::Lock _l(_remotePeers_m);
  679. for(std::map< std::pair<Address,unsigned int>,_RemotePeer >::iterator rp(_remotePeers.begin());rp!=_remotePeers.end();) {
  680. if ((now - rp->second.lastHavePeerReceived) >= ZT_PEER_ACTIVITY_TIMEOUT)
  681. _remotePeers.erase(rp++);
  682. else ++rp;
  683. }
  684. }
  685. if ((now - _lastCleanedQueue) >= ZT_CLUSTER_QUEUE_EXPIRATION) {
  686. _lastCleanedQueue = now;
  687. _sendQueue->expire(now);
  688. }
  689. }
  690. void Cluster::addMember(uint16_t memberId)
  691. {
  692. if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
  693. return;
  694. Mutex::Lock _l2(_members[memberId].lock);
  695. {
  696. Mutex::Lock _l(_memberIds_m);
  697. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  698. return;
  699. _memberIds.push_back(memberId);
  700. std::sort(_memberIds.begin(),_memberIds.end());
  701. }
  702. _members[memberId].clear();
  703. // Generate this member's message key from the master and its ID
  704. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  705. memcpy(stmp,_masterSecret,sizeof(stmp));
  706. stmp[0] ^= Utils::hton(memberId);
  707. SHA512::hash(stmp,stmp,sizeof(stmp));
  708. SHA512::hash(stmp,stmp,sizeof(stmp));
  709. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  710. Utils::burn(stmp,sizeof(stmp));
  711. // Prepare q
  712. _members[memberId].q.clear();
  713. char iv[16];
  714. Utils::getSecureRandom(iv,16);
  715. _members[memberId].q.append(iv,16);
  716. _members[memberId].q.addSize(8); // room for MAC
  717. _members[memberId].q.append((uint16_t)_id);
  718. _members[memberId].q.append((uint16_t)memberId);
  719. }
  720. void Cluster::removeMember(uint16_t memberId)
  721. {
  722. Mutex::Lock _l(_memberIds_m);
  723. std::vector<uint16_t> newMemberIds;
  724. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  725. if (*mid != memberId)
  726. newMemberIds.push_back(*mid);
  727. }
  728. _memberIds = newMemberIds;
  729. }
  730. bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
  731. {
  732. if (_addressToLocationFunction) {
  733. // Pick based on location if it can be determined
  734. int px = 0,py = 0,pz = 0;
  735. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  736. TRACE("no geolocation data for %s",peerPhysicalAddress.toIpString().c_str());
  737. return false;
  738. }
  739. // Find member closest to this peer
  740. const uint64_t now = RR->node->now();
  741. std::vector<InetAddress> best;
  742. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  743. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  744. #ifdef ZT_TRACE
  745. unsigned int bestMember = _id;
  746. #endif
  747. {
  748. Mutex::Lock _l(_memberIds_m);
  749. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  750. _Member &m = _members[*mid];
  751. Mutex::Lock _ml(m.lock);
  752. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  753. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  754. const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  755. if (mdist < bestDistance) {
  756. bestDistance = mdist;
  757. #ifdef ZT_TRACE
  758. bestMember = *mid;
  759. #endif
  760. best = m.zeroTierPhysicalEndpoints;
  761. }
  762. }
  763. }
  764. }
  765. // Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
  766. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  767. if (a->ss_family == peerPhysicalAddress.ss_family) {
  768. TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
  769. redirectTo = *a;
  770. return true;
  771. }
  772. }
  773. TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
  774. return false;
  775. } else {
  776. // TODO: pick based on load if no location info?
  777. return false;
  778. }
  779. }
  780. bool Cluster::isClusterPeerFrontplane(const InetAddress &ip) const
  781. {
  782. Mutex::Lock _l(_memberIds_m);
  783. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  784. Mutex::Lock _l2(_members[*mid].lock);
  785. for(std::vector<InetAddress>::const_iterator i2(_members[*mid].zeroTierPhysicalEndpoints.begin());i2!=_members[*mid].zeroTierPhysicalEndpoints.end();++i2) {
  786. if (ip == *i2)
  787. return true;
  788. }
  789. }
  790. return false;
  791. }
  792. void Cluster::status(ZT_ClusterStatus &status) const
  793. {
  794. const uint64_t now = RR->node->now();
  795. memset(&status,0,sizeof(ZT_ClusterStatus));
  796. status.myId = _id;
  797. {
  798. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  799. s->id = _id;
  800. s->alive = 1;
  801. s->x = _x;
  802. s->y = _y;
  803. s->z = _z;
  804. s->load = 0; // TODO
  805. s->peers = RR->topology->countActive(now);
  806. for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
  807. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  808. break;
  809. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  810. }
  811. }
  812. {
  813. Mutex::Lock _l1(_memberIds_m);
  814. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  815. if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
  816. break;
  817. _Member &m = _members[*mid];
  818. Mutex::Lock ml(m.lock);
  819. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  820. s->id = *mid;
  821. s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
  822. s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
  823. s->x = m.x;
  824. s->y = m.y;
  825. s->z = m.z;
  826. s->load = m.load;
  827. s->peers = m.peers;
  828. for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
  829. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  830. break;
  831. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  832. }
  833. }
  834. }
  835. }
  836. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  837. {
  838. if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
  839. return;
  840. _Member &m = _members[memberId];
  841. // assumes m.lock is locked!
  842. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  843. _flush(memberId);
  844. m.q.append((uint16_t)(len + 1));
  845. m.q.append((uint8_t)type);
  846. m.q.append(msg,len);
  847. }
  848. void Cluster::_flush(uint16_t memberId)
  849. {
  850. _Member &m = _members[memberId];
  851. // assumes m.lock is locked!
  852. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  853. // Create key from member's key and IV
  854. char keytmp[32];
  855. memcpy(keytmp,m.key,32);
  856. for(int i=0;i<8;++i)
  857. keytmp[i] ^= m.q[i];
  858. Salsa20 s20(keytmp,m.q.field(8,8));
  859. Utils::burn(keytmp,sizeof(keytmp));
  860. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  861. char polykey[ZT_POLY1305_KEY_LEN];
  862. memset(polykey,0,sizeof(polykey));
  863. s20.crypt12(polykey,polykey,sizeof(polykey));
  864. // Encrypt m.q in place
  865. s20.crypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  866. // Add MAC for authentication (encrypt-then-MAC)
  867. char mac[ZT_POLY1305_MAC_LEN];
  868. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  869. memcpy(m.q.field(16,8),mac,8);
  870. // Send!
  871. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  872. // Prepare for more
  873. m.q.clear();
  874. char iv[16];
  875. Utils::getSecureRandom(iv,16);
  876. m.q.append(iv,16);
  877. m.q.addSize(8); // room for MAC
  878. m.q.append((uint16_t)_id); // from member ID
  879. m.q.append((uint16_t)memberId); // to member ID
  880. }
  881. }
  882. void Cluster::_doREMOTE_WHOIS(uint64_t fromMemberId,const Packet &remotep)
  883. {
  884. if (remotep.payloadLength() >= ZT_ADDRESS_LENGTH) {
  885. Identity queried(RR->topology->getIdentity((void *)0,Address(remotep.payload(),ZT_ADDRESS_LENGTH)));
  886. if (queried) {
  887. Buffer<1024> routp;
  888. remotep.source().appendTo(routp);
  889. routp.append((uint8_t)Packet::VERB_OK);
  890. routp.addSize(2); // space for length
  891. routp.append((uint8_t)Packet::VERB_WHOIS);
  892. routp.append(remotep.packetId());
  893. queried.serialize(routp);
  894. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  895. TRACE("responding to remote WHOIS from %s @ %u with identity of %s",remotep.source().toString().c_str(),(unsigned int)fromMemberId,queried.address().toString().c_str());
  896. Mutex::Lock _l2(_members[fromMemberId].lock);
  897. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  898. }
  899. }
  900. }
  901. void Cluster::_doREMOTE_MULTICAST_GATHER(uint64_t fromMemberId,const Packet &remotep)
  902. {
  903. const uint64_t nwid = remotep.at<uint64_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_NETWORK_ID);
  904. const MulticastGroup mg(MAC(remotep.field(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_MAC,6),6),remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_ADI));
  905. unsigned int gatherLimit = remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_GATHER_LIMIT);
  906. const Address remotePeerAddress(remotep.source());
  907. if (gatherLimit) {
  908. Buffer<ZT_PROTO_MAX_PACKET_LENGTH> routp;
  909. remotePeerAddress.appendTo(routp);
  910. routp.append((uint8_t)Packet::VERB_OK);
  911. routp.addSize(2); // space for length
  912. routp.append((uint8_t)Packet::VERB_MULTICAST_GATHER);
  913. routp.append(remotep.packetId());
  914. routp.append(nwid);
  915. mg.mac().appendTo(routp);
  916. routp.append((uint32_t)mg.adi());
  917. if (gatherLimit > ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5))
  918. gatherLimit = ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5);
  919. if (RR->mc->gather(remotePeerAddress,nwid,mg,routp,gatherLimit)) {
  920. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  921. TRACE("responding to remote MULTICAST_GATHER from %s @ %u with %u bytes",remotePeerAddress.toString().c_str(),(unsigned int)fromMemberId,routp.size());
  922. Mutex::Lock _l2(_members[fromMemberId].lock);
  923. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  924. }
  925. }
  926. }
  927. } // namespace ZeroTier
  928. #endif // ZT_ENABLE_CLUSTER