Cluster.cpp 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #ifdef ZT_ENABLE_CLUSTER
  19. #include <stdint.h>
  20. #include <stdio.h>
  21. #include <stdlib.h>
  22. #include <string.h>
  23. #include <math.h>
  24. #include <map>
  25. #include <algorithm>
  26. #include <set>
  27. #include <utility>
  28. #include <list>
  29. #include <stdexcept>
  30. #include "../version.h"
  31. #include "Cluster.hpp"
  32. #include "RuntimeEnvironment.hpp"
  33. #include "MulticastGroup.hpp"
  34. #include "CertificateOfMembership.hpp"
  35. #include "Salsa20.hpp"
  36. #include "Poly1305.hpp"
  37. #include "Identity.hpp"
  38. #include "Topology.hpp"
  39. #include "Packet.hpp"
  40. #include "Switch.hpp"
  41. #include "Node.hpp"
  42. #include "Network.hpp"
  43. #include "Array.hpp"
  44. namespace ZeroTier {
  45. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  46. throw()
  47. {
  48. double dx = ((double)x2 - (double)x1);
  49. double dy = ((double)y2 - (double)y1);
  50. double dz = ((double)z2 - (double)z1);
  51. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  52. }
  53. // An entry in _ClusterSendQueue
  54. struct _ClusterSendQueueEntry
  55. {
  56. uint64_t timestamp;
  57. Address fromPeerAddress;
  58. Address toPeerAddress;
  59. // if we ever support larger transport MTUs this must be increased
  60. unsigned char data[ZT_CLUSTER_SEND_QUEUE_DATA_MAX];
  61. unsigned int len;
  62. bool unite;
  63. };
  64. // A multi-index map with entry memory pooling -- this allows our queue to
  65. // be O(log(N)) and is complex enough that it makes the code a lot cleaner
  66. // to break it out from Cluster.
  67. class _ClusterSendQueue
  68. {
  69. public:
  70. _ClusterSendQueue() :
  71. _poolCount(0) {}
  72. ~_ClusterSendQueue() {} // memory is automatically freed when _chunks is destroyed
  73. inline void enqueue(uint64_t now,const Address &from,const Address &to,const void *data,unsigned int len,bool unite)
  74. {
  75. if (len > ZT_CLUSTER_SEND_QUEUE_DATA_MAX)
  76. return;
  77. Mutex::Lock _l(_lock);
  78. // Delete oldest queue entry for this sender if this enqueue() would take them over the per-sender limit
  79. {
  80. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(from,(_ClusterSendQueueEntry *)0)));
  81. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator oldest(qi);
  82. unsigned long countForSender = 0;
  83. while ((qi != _bySrc.end())&&(qi->first == from)) {
  84. if (qi->second->timestamp < oldest->second->timestamp)
  85. oldest = qi;
  86. ++countForSender;
  87. ++qi;
  88. }
  89. if (countForSender >= ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
  90. _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(oldest->second->toPeerAddress,oldest->second));
  91. _pool[_poolCount++] = oldest->second;
  92. _bySrc.erase(oldest);
  93. }
  94. }
  95. _ClusterSendQueueEntry *e;
  96. if (_poolCount > 0) {
  97. e = _pool[--_poolCount];
  98. } else {
  99. if (_chunks.size() >= ZT_CLUSTER_MAX_QUEUE_CHUNKS)
  100. return; // queue is totally full!
  101. _chunks.push_back(Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE>());
  102. e = &(_chunks.back().data[0]);
  103. for(unsigned int i=1;i<ZT_CLUSTER_QUEUE_CHUNK_SIZE;++i)
  104. _pool[_poolCount++] = &(_chunks.back().data[i]);
  105. }
  106. e->timestamp = now;
  107. e->fromPeerAddress = from;
  108. e->toPeerAddress = to;
  109. memcpy(e->data,data,len);
  110. e->len = len;
  111. e->unite = unite;
  112. _bySrc.insert(std::pair<Address,_ClusterSendQueueEntry *>(from,e));
  113. _byDest.insert(std::pair<Address,_ClusterSendQueueEntry *>(to,e));
  114. }
  115. inline void expire(uint64_t now)
  116. {
  117. Mutex::Lock _l(_lock);
  118. for(std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.begin());qi!=_bySrc.end();) {
  119. if ((now - qi->second->timestamp) > ZT_CLUSTER_QUEUE_EXPIRATION) {
  120. _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->toPeerAddress,qi->second));
  121. _pool[_poolCount++] = qi->second;
  122. _bySrc.erase(qi++);
  123. } else ++qi;
  124. }
  125. }
  126. /**
  127. * Get and dequeue entries for a given destination address
  128. *
  129. * After use these entries must be returned with returnToPool()!
  130. *
  131. * @param dest Destination address
  132. * @param results Array to fill with results
  133. * @param maxResults Size of results[] in pointers
  134. * @return Number of actual results returned
  135. */
  136. inline unsigned int getByDest(const Address &dest,_ClusterSendQueueEntry **results,unsigned int maxResults)
  137. {
  138. unsigned int count = 0;
  139. Mutex::Lock _l(_lock);
  140. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_byDest.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(dest,(_ClusterSendQueueEntry *)0)));
  141. while ((qi != _byDest.end())&&(qi->first == dest)) {
  142. _bySrc.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->fromPeerAddress,qi->second));
  143. results[count++] = qi->second;
  144. if (count == maxResults)
  145. break;
  146. _byDest.erase(qi++);
  147. }
  148. return count;
  149. }
  150. /**
  151. * Return entries to pool after use
  152. *
  153. * @param entries Array of entries
  154. * @param count Number of entries
  155. */
  156. inline void returnToPool(_ClusterSendQueueEntry **entries,unsigned int count)
  157. {
  158. Mutex::Lock _l(_lock);
  159. for(unsigned int i=0;i<count;++i)
  160. _pool[_poolCount++] = entries[i];
  161. }
  162. private:
  163. std::list< Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE> > _chunks;
  164. _ClusterSendQueueEntry *_pool[ZT_CLUSTER_QUEUE_CHUNK_SIZE * ZT_CLUSTER_MAX_QUEUE_CHUNKS];
  165. unsigned long _poolCount;
  166. std::set< std::pair<Address,_ClusterSendQueueEntry *> > _bySrc;
  167. std::set< std::pair<Address,_ClusterSendQueueEntry *> > _byDest;
  168. Mutex _lock;
  169. };
  170. Cluster::Cluster(
  171. const RuntimeEnvironment *renv,
  172. uint16_t id,
  173. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  174. int32_t x,
  175. int32_t y,
  176. int32_t z,
  177. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  178. void *sendFunctionArg,
  179. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  180. void *addressToLocationFunctionArg) :
  181. RR(renv),
  182. _sendQueue(new _ClusterSendQueue()),
  183. _sendFunction(sendFunction),
  184. _sendFunctionArg(sendFunctionArg),
  185. _addressToLocationFunction(addressToLocationFunction),
  186. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  187. _x(x),
  188. _y(y),
  189. _z(z),
  190. _id(id),
  191. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  192. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
  193. _lastFlushed(0),
  194. _lastCleanedRemotePeers(0),
  195. _lastCleanedQueue(0)
  196. {
  197. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  198. // Generate master secret by hashing the secret from our Identity key pair
  199. RR->identity.sha512PrivateKey(_masterSecret);
  200. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  201. memcpy(stmp,_masterSecret,sizeof(stmp));
  202. stmp[0] ^= Utils::hton(id);
  203. SHA512::hash(stmp,stmp,sizeof(stmp));
  204. SHA512::hash(stmp,stmp,sizeof(stmp));
  205. memcpy(_key,stmp,sizeof(_key));
  206. Utils::burn(stmp,sizeof(stmp));
  207. }
  208. Cluster::~Cluster()
  209. {
  210. Utils::burn(_masterSecret,sizeof(_masterSecret));
  211. Utils::burn(_key,sizeof(_key));
  212. delete [] _members;
  213. delete _sendQueue;
  214. }
  215. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  216. {
  217. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  218. {
  219. // FORMAT: <[16] iv><[8] MAC><... data>
  220. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  221. return;
  222. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  223. char keytmp[32];
  224. memcpy(keytmp,_key,32);
  225. for(int i=0;i<8;++i)
  226. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  227. Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
  228. Utils::burn(keytmp,sizeof(keytmp));
  229. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  230. char polykey[ZT_POLY1305_KEY_LEN];
  231. memset(polykey,0,sizeof(polykey));
  232. s20.crypt12(polykey,polykey,sizeof(polykey));
  233. // Compute 16-byte MAC
  234. char mac[ZT_POLY1305_MAC_LEN];
  235. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  236. // Check first 8 bytes of MAC against 64-bit MAC in stream
  237. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  238. return;
  239. // Decrypt!
  240. dmsg.setSize(len - 24);
  241. s20.crypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  242. }
  243. if (dmsg.size() < 4)
  244. return;
  245. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  246. unsigned int ptr = 2;
  247. if (fromMemberId == _id) // sanity check: we don't talk to ourselves
  248. return;
  249. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  250. ptr += 2;
  251. if (toMemberId != _id) // sanity check: message not for us?
  252. return;
  253. { // make sure sender is actually considered a member
  254. Mutex::Lock _l3(_memberIds_m);
  255. if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
  256. return;
  257. }
  258. try {
  259. while (ptr < dmsg.size()) {
  260. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  261. const unsigned int nextPtr = ptr + mlen;
  262. if (nextPtr > dmsg.size())
  263. break;
  264. int mtype = -1;
  265. try {
  266. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  267. default:
  268. break;
  269. case CLUSTER_MESSAGE_ALIVE: {
  270. _Member &m = _members[fromMemberId];
  271. Mutex::Lock mlck(m.lock);
  272. ptr += 7; // skip version stuff, not used yet
  273. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  274. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  275. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  276. ptr += 8; // skip local clock, not used
  277. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  278. m.peers = dmsg.at<uint64_t>(ptr); ptr += 8;
  279. ptr += 8; // skip flags, unused
  280. #ifdef ZT_TRACE
  281. std::string addrs;
  282. #endif
  283. unsigned int physicalAddressCount = dmsg[ptr++];
  284. m.zeroTierPhysicalEndpoints.clear();
  285. for(unsigned int i=0;i<physicalAddressCount;++i) {
  286. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  287. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  288. if (!(m.zeroTierPhysicalEndpoints.back())) {
  289. m.zeroTierPhysicalEndpoints.pop_back();
  290. }
  291. #ifdef ZT_TRACE
  292. else {
  293. if (addrs.length() > 0)
  294. addrs.push_back(',');
  295. addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
  296. }
  297. #endif
  298. }
  299. #ifdef ZT_TRACE
  300. if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
  301. TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
  302. }
  303. #endif
  304. m.lastReceivedAliveAnnouncement = RR->node->now();
  305. } break;
  306. case CLUSTER_MESSAGE_HAVE_PEER: {
  307. Identity id;
  308. ptr += id.deserialize(dmsg,ptr);
  309. if (id) {
  310. {
  311. Mutex::Lock _l(_remotePeers_m);
  312. _RemotePeer &rp = _remotePeers[std::pair<Address,unsigned int>(id.address(),(unsigned int)fromMemberId)];
  313. if (!rp.lastHavePeerReceived) {
  314. RR->topology->saveIdentity(id);
  315. RR->identity.agree(id,rp.key,ZT_PEER_SECRET_KEY_LENGTH);
  316. }
  317. rp.lastHavePeerReceived = RR->node->now();
  318. }
  319. _ClusterSendQueueEntry *q[16384]; // 16384 is "tons"
  320. unsigned int qc = _sendQueue->getByDest(id.address(),q,16384);
  321. for(unsigned int i=0;i<qc;++i)
  322. this->relayViaCluster(q[i]->fromPeerAddress,q[i]->toPeerAddress,q[i]->data,q[i]->len,q[i]->unite);
  323. _sendQueue->returnToPool(q,qc);
  324. TRACE("[%u] has %s (retried %u queued sends)",(unsigned int)fromMemberId,id.address().toString().c_str(),qc);
  325. }
  326. } break;
  327. case CLUSTER_MESSAGE_WANT_PEER: {
  328. const Address zeroTierAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  329. SharedPtr<Peer> peer(RR->topology->getPeerNoCache(zeroTierAddress));
  330. if ( (peer) && (peer->hasLocalClusterOptimalPath(RR->node->now())) ) {
  331. Buffer<1024> buf;
  332. peer->identity().serialize(buf);
  333. Mutex::Lock _l2(_members[fromMemberId].lock);
  334. _send(fromMemberId,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  335. }
  336. } break;
  337. case CLUSTER_MESSAGE_REMOTE_PACKET: {
  338. const unsigned int plen = dmsg.at<uint16_t>(ptr); ptr += 2;
  339. if (plen) {
  340. Packet remotep(dmsg.field(ptr,plen),plen); ptr += plen;
  341. //TRACE("remote %s from %s via %u (%u bytes)",Packet::verbString(remotep.verb()),remotep.source().toString().c_str(),fromMemberId,plen);
  342. switch(remotep.verb()) {
  343. case Packet::VERB_WHOIS: _doREMOTE_WHOIS(fromMemberId,remotep); break;
  344. case Packet::VERB_MULTICAST_GATHER: _doREMOTE_MULTICAST_GATHER(fromMemberId,remotep); break;
  345. default: break; // ignore things we don't care about across cluster
  346. }
  347. }
  348. } break;
  349. case CLUSTER_MESSAGE_PROXY_UNITE: {
  350. const Address localPeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  351. const Address remotePeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  352. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  353. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  354. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  355. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  356. TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
  357. const uint64_t now = RR->node->now();
  358. SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress));
  359. if ((localPeer)&&(numRemotePeerPaths > 0)) {
  360. InetAddress bestLocalV4,bestLocalV6;
  361. localPeer->getRendezvousAddresses(now,bestLocalV4,bestLocalV6);
  362. InetAddress bestRemoteV4,bestRemoteV6;
  363. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  364. if ((bestRemoteV4)&&(bestRemoteV6))
  365. break;
  366. switch(remotePeerPaths[i].ss_family) {
  367. case AF_INET:
  368. if (!bestRemoteV4)
  369. bestRemoteV4 = remotePeerPaths[i];
  370. break;
  371. case AF_INET6:
  372. if (!bestRemoteV6)
  373. bestRemoteV6 = remotePeerPaths[i];
  374. break;
  375. }
  376. }
  377. Packet rendezvousForLocal(localPeerAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  378. rendezvousForLocal.append((uint8_t)0);
  379. remotePeerAddress.appendTo(rendezvousForLocal);
  380. Buffer<2048> rendezvousForRemote;
  381. remotePeerAddress.appendTo(rendezvousForRemote);
  382. rendezvousForRemote.append((uint8_t)Packet::VERB_RENDEZVOUS);
  383. rendezvousForRemote.addSize(2); // space for actual packet payload length
  384. rendezvousForRemote.append((uint8_t)0); // flags == 0
  385. localPeerAddress.appendTo(rendezvousForRemote);
  386. bool haveMatch = false;
  387. if ((bestLocalV6)&&(bestRemoteV6)) {
  388. haveMatch = true;
  389. rendezvousForLocal.append((uint16_t)bestRemoteV6.port());
  390. rendezvousForLocal.append((uint8_t)16);
  391. rendezvousForLocal.append(bestRemoteV6.rawIpData(),16);
  392. rendezvousForRemote.append((uint16_t)bestLocalV6.port());
  393. rendezvousForRemote.append((uint8_t)16);
  394. rendezvousForRemote.append(bestLocalV6.rawIpData(),16);
  395. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 16));
  396. } else if ((bestLocalV4)&&(bestRemoteV4)) {
  397. haveMatch = true;
  398. rendezvousForLocal.append((uint16_t)bestRemoteV4.port());
  399. rendezvousForLocal.append((uint8_t)4);
  400. rendezvousForLocal.append(bestRemoteV4.rawIpData(),4);
  401. rendezvousForRemote.append((uint16_t)bestLocalV4.port());
  402. rendezvousForRemote.append((uint8_t)4);
  403. rendezvousForRemote.append(bestLocalV4.rawIpData(),4);
  404. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 4));
  405. }
  406. if (haveMatch) {
  407. {
  408. Mutex::Lock _l2(_members[fromMemberId].lock);
  409. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
  410. }
  411. RR->sw->send(rendezvousForLocal,true);
  412. }
  413. }
  414. } break;
  415. case CLUSTER_MESSAGE_PROXY_SEND: {
  416. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  417. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  418. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  419. Packet outp(rcpt,RR->identity.address(),verb);
  420. outp.append(dmsg.field(ptr,len),len); ptr += len;
  421. RR->sw->send(outp,true);
  422. //TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
  423. } break;
  424. case CLUSTER_MESSAGE_NETWORK_CONFIG: {
  425. const SharedPtr<Network> network(RR->node->network(dmsg.at<uint64_t>(ptr)));
  426. if (network) {
  427. // Copy into a Packet just to conform to Network API. Eventually
  428. // will want to refactor.
  429. network->handleConfigChunk(0,Address(),Buffer<ZT_PROTO_MAX_PACKET_LENGTH>(dmsg),ptr);
  430. }
  431. } break;
  432. }
  433. } catch ( ... ) {
  434. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  435. // drop invalids
  436. }
  437. ptr = nextPtr;
  438. }
  439. } catch ( ... ) {
  440. TRACE("invalid message (outer loop), discarding");
  441. // drop invalids
  442. }
  443. }
  444. void Cluster::broadcastHavePeer(const Identity &id)
  445. {
  446. Buffer<1024> buf;
  447. id.serialize(buf);
  448. Mutex::Lock _l(_memberIds_m);
  449. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  450. Mutex::Lock _l2(_members[*mid].lock);
  451. _send(*mid,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  452. }
  453. }
  454. void Cluster::broadcastNetworkConfigChunk(const void *chunk,unsigned int len)
  455. {
  456. Mutex::Lock _l(_memberIds_m);
  457. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  458. Mutex::Lock _l2(_members[*mid].lock);
  459. _send(*mid,CLUSTER_MESSAGE_NETWORK_CONFIG,chunk,len);
  460. }
  461. }
  462. int Cluster::checkSendViaCluster(const Address &toPeerAddress,uint64_t &mostRecentTs,void *peerSecret)
  463. {
  464. const uint64_t now = RR->node->now();
  465. mostRecentTs = 0;
  466. int mostRecentMemberId = -1;
  467. {
  468. Mutex::Lock _l2(_remotePeers_m);
  469. std::map< std::pair<Address,unsigned int>,_RemotePeer >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
  470. for(;;) {
  471. if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
  472. break;
  473. else if (rpe->second.lastHavePeerReceived > mostRecentTs) {
  474. mostRecentTs = rpe->second.lastHavePeerReceived;
  475. memcpy(peerSecret,rpe->second.key,ZT_PEER_SECRET_KEY_LENGTH);
  476. mostRecentMemberId = (int)rpe->first.second;
  477. }
  478. ++rpe;
  479. }
  480. }
  481. const uint64_t ageOfMostRecentHavePeerAnnouncement = now - mostRecentTs;
  482. if (ageOfMostRecentHavePeerAnnouncement >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
  483. if (ageOfMostRecentHavePeerAnnouncement >= ZT_PEER_ACTIVITY_TIMEOUT)
  484. mostRecentMemberId = -1;
  485. bool sendWantPeer = true;
  486. {
  487. Mutex::Lock _l(_remotePeers_m);
  488. _RemotePeer &rp = _remotePeers[std::pair<Address,unsigned int>(toPeerAddress,(unsigned int)_id)];
  489. if ((now - rp.lastSentWantPeer) >= ZT_CLUSTER_WANT_PEER_EVERY) {
  490. rp.lastSentWantPeer = now;
  491. } else {
  492. sendWantPeer = false; // don't flood WANT_PEER
  493. }
  494. }
  495. if (sendWantPeer) {
  496. char tmp[ZT_ADDRESS_LENGTH];
  497. toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
  498. {
  499. Mutex::Lock _l(_memberIds_m);
  500. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  501. Mutex::Lock _l2(_members[*mid].lock);
  502. _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
  503. }
  504. }
  505. }
  506. }
  507. return mostRecentMemberId;
  508. }
  509. bool Cluster::sendViaCluster(int mostRecentMemberId,const Address &toPeerAddress,const void *data,unsigned int len)
  510. {
  511. if ((mostRecentMemberId < 0)||(mostRecentMemberId >= ZT_CLUSTER_MAX_MEMBERS)) // sanity check
  512. return false;
  513. Mutex::Lock _l2(_members[mostRecentMemberId].lock);
  514. for(std::vector<InetAddress>::const_iterator i1(_zeroTierPhysicalEndpoints.begin());i1!=_zeroTierPhysicalEndpoints.end();++i1) {
  515. for(std::vector<InetAddress>::const_iterator i2(_members[mostRecentMemberId].zeroTierPhysicalEndpoints.begin());i2!=_members[mostRecentMemberId].zeroTierPhysicalEndpoints.end();++i2) {
  516. if (i1->ss_family == i2->ss_family) {
  517. TRACE("sendViaCluster sending %u bytes to %s by way of %u (%s->%s)",len,toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId,i1->toString().c_str(),i2->toString().c_str());
  518. RR->node->putPacket(*i1,*i2,data,len);
  519. return true;
  520. }
  521. }
  522. }
  523. return false;
  524. }
  525. void Cluster::relayViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite)
  526. {
  527. if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
  528. return;
  529. const uint64_t now = RR->node->now();
  530. uint64_t mostRecentTs = 0;
  531. int mostRecentMemberId = -1;
  532. {
  533. Mutex::Lock _l2(_remotePeers_m);
  534. std::map< std::pair<Address,unsigned int>,_RemotePeer >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
  535. for(;;) {
  536. if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
  537. break;
  538. else if (rpe->second.lastHavePeerReceived > mostRecentTs) {
  539. mostRecentTs = rpe->second.lastHavePeerReceived;
  540. mostRecentMemberId = (int)rpe->first.second;
  541. }
  542. ++rpe;
  543. }
  544. }
  545. const uint64_t ageOfMostRecentHavePeerAnnouncement = now - mostRecentTs;
  546. if (ageOfMostRecentHavePeerAnnouncement >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
  547. // Enqueue and wait if peer seems alive, but do WANT_PEER to refresh homing
  548. const bool enqueueAndWait = ((ageOfMostRecentHavePeerAnnouncement >= ZT_PEER_ACTIVITY_TIMEOUT)||(mostRecentMemberId < 0));
  549. // Poll everyone with WANT_PEER if the age of our most recent entry is
  550. // approaching expiration (or has expired, or does not exist).
  551. bool sendWantPeer = true;
  552. {
  553. Mutex::Lock _l(_remotePeers_m);
  554. _RemotePeer &rp = _remotePeers[std::pair<Address,unsigned int>(toPeerAddress,(unsigned int)_id)];
  555. if ((now - rp.lastSentWantPeer) >= ZT_CLUSTER_WANT_PEER_EVERY) {
  556. rp.lastSentWantPeer = now;
  557. } else {
  558. sendWantPeer = false; // don't flood WANT_PEER
  559. }
  560. }
  561. if (sendWantPeer) {
  562. char tmp[ZT_ADDRESS_LENGTH];
  563. toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
  564. {
  565. Mutex::Lock _l(_memberIds_m);
  566. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  567. Mutex::Lock _l2(_members[*mid].lock);
  568. _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
  569. }
  570. }
  571. }
  572. // If there isn't a good place to send via, then enqueue this for retrying
  573. // later and return after having broadcasted a WANT_PEER.
  574. if (enqueueAndWait) {
  575. TRACE("relayViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
  576. _sendQueue->enqueue(now,fromPeerAddress,toPeerAddress,data,len,unite);
  577. return;
  578. }
  579. }
  580. if (mostRecentMemberId >= 0) {
  581. Buffer<1024> buf;
  582. if (unite) {
  583. InetAddress v4,v6;
  584. if (fromPeerAddress) {
  585. SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress));
  586. if (fromPeer)
  587. fromPeer->getRendezvousAddresses(now,v4,v6);
  588. }
  589. uint8_t addrCount = 0;
  590. if (v4)
  591. ++addrCount;
  592. if (v6)
  593. ++addrCount;
  594. if (addrCount) {
  595. toPeerAddress.appendTo(buf);
  596. fromPeerAddress.appendTo(buf);
  597. buf.append(addrCount);
  598. if (v4)
  599. v4.serialize(buf);
  600. if (v6)
  601. v6.serialize(buf);
  602. }
  603. }
  604. {
  605. Mutex::Lock _l2(_members[mostRecentMemberId].lock);
  606. if (buf.size() > 0)
  607. _send(mostRecentMemberId,CLUSTER_MESSAGE_PROXY_UNITE,buf.data(),buf.size());
  608. for(std::vector<InetAddress>::const_iterator i1(_zeroTierPhysicalEndpoints.begin());i1!=_zeroTierPhysicalEndpoints.end();++i1) {
  609. for(std::vector<InetAddress>::const_iterator i2(_members[mostRecentMemberId].zeroTierPhysicalEndpoints.begin());i2!=_members[mostRecentMemberId].zeroTierPhysicalEndpoints.end();++i2) {
  610. if (i1->ss_family == i2->ss_family) {
  611. TRACE("relayViaCluster relaying %u bytes from %s to %s by way of %u (%s->%s)",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId,i1->toString().c_str(),i2->toString().c_str());
  612. RR->node->putPacket(*i1,*i2,data,len);
  613. return;
  614. }
  615. }
  616. }
  617. TRACE("relayViaCluster relaying %u bytes from %s to %s by way of %u failed: no common endpoints with the same address family!",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId);
  618. }
  619. }
  620. }
  621. void Cluster::sendDistributedQuery(const Packet &pkt)
  622. {
  623. Buffer<4096> buf;
  624. buf.append((uint16_t)pkt.size());
  625. buf.append(pkt.data(),pkt.size());
  626. Mutex::Lock _l(_memberIds_m);
  627. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  628. Mutex::Lock _l2(_members[*mid].lock);
  629. _send(*mid,CLUSTER_MESSAGE_REMOTE_PACKET,buf.data(),buf.size());
  630. }
  631. }
  632. void Cluster::doPeriodicTasks()
  633. {
  634. const uint64_t now = RR->node->now();
  635. if ((now - _lastFlushed) >= ZT_CLUSTER_FLUSH_PERIOD) {
  636. _lastFlushed = now;
  637. Mutex::Lock _l(_memberIds_m);
  638. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  639. Mutex::Lock _l2(_members[*mid].lock);
  640. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  641. _members[*mid].lastAnnouncedAliveTo = now;
  642. Buffer<2048> alive;
  643. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  644. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  645. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  646. alive.append((uint8_t)ZT_PROTO_VERSION);
  647. if (_addressToLocationFunction) {
  648. alive.append((int32_t)_x);
  649. alive.append((int32_t)_y);
  650. alive.append((int32_t)_z);
  651. } else {
  652. alive.append((int32_t)0);
  653. alive.append((int32_t)0);
  654. alive.append((int32_t)0);
  655. }
  656. alive.append((uint64_t)now);
  657. alive.append((uint64_t)0); // TODO: compute and send load average
  658. alive.append((uint64_t)RR->topology->countActive(now));
  659. alive.append((uint64_t)0); // unused/reserved flags
  660. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  661. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  662. pe->serialize(alive);
  663. _send(*mid,CLUSTER_MESSAGE_ALIVE,alive.data(),alive.size());
  664. }
  665. _flush(*mid);
  666. }
  667. }
  668. if ((now - _lastCleanedRemotePeers) >= (ZT_PEER_ACTIVITY_TIMEOUT * 2)) {
  669. _lastCleanedRemotePeers = now;
  670. Mutex::Lock _l(_remotePeers_m);
  671. for(std::map< std::pair<Address,unsigned int>,_RemotePeer >::iterator rp(_remotePeers.begin());rp!=_remotePeers.end();) {
  672. if ((now - rp->second.lastHavePeerReceived) >= ZT_PEER_ACTIVITY_TIMEOUT)
  673. _remotePeers.erase(rp++);
  674. else ++rp;
  675. }
  676. }
  677. if ((now - _lastCleanedQueue) >= ZT_CLUSTER_QUEUE_EXPIRATION) {
  678. _lastCleanedQueue = now;
  679. _sendQueue->expire(now);
  680. }
  681. }
  682. void Cluster::addMember(uint16_t memberId)
  683. {
  684. if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
  685. return;
  686. Mutex::Lock _l2(_members[memberId].lock);
  687. {
  688. Mutex::Lock _l(_memberIds_m);
  689. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  690. return;
  691. _memberIds.push_back(memberId);
  692. std::sort(_memberIds.begin(),_memberIds.end());
  693. }
  694. _members[memberId].clear();
  695. // Generate this member's message key from the master and its ID
  696. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  697. memcpy(stmp,_masterSecret,sizeof(stmp));
  698. stmp[0] ^= Utils::hton(memberId);
  699. SHA512::hash(stmp,stmp,sizeof(stmp));
  700. SHA512::hash(stmp,stmp,sizeof(stmp));
  701. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  702. Utils::burn(stmp,sizeof(stmp));
  703. // Prepare q
  704. _members[memberId].q.clear();
  705. char iv[16];
  706. Utils::getSecureRandom(iv,16);
  707. _members[memberId].q.append(iv,16);
  708. _members[memberId].q.addSize(8); // room for MAC
  709. _members[memberId].q.append((uint16_t)_id);
  710. _members[memberId].q.append((uint16_t)memberId);
  711. }
  712. void Cluster::removeMember(uint16_t memberId)
  713. {
  714. Mutex::Lock _l(_memberIds_m);
  715. std::vector<uint16_t> newMemberIds;
  716. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  717. if (*mid != memberId)
  718. newMemberIds.push_back(*mid);
  719. }
  720. _memberIds = newMemberIds;
  721. }
  722. bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
  723. {
  724. if (_addressToLocationFunction) {
  725. // Pick based on location if it can be determined
  726. int px = 0,py = 0,pz = 0;
  727. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  728. TRACE("no geolocation data for %s",peerPhysicalAddress.toIpString().c_str());
  729. return false;
  730. }
  731. // Find member closest to this peer
  732. const uint64_t now = RR->node->now();
  733. std::vector<InetAddress> best;
  734. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  735. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  736. #ifdef ZT_TRACE
  737. unsigned int bestMember = _id;
  738. #endif
  739. {
  740. Mutex::Lock _l(_memberIds_m);
  741. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  742. _Member &m = _members[*mid];
  743. Mutex::Lock _ml(m.lock);
  744. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  745. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  746. const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  747. if (mdist < bestDistance) {
  748. bestDistance = mdist;
  749. #ifdef ZT_TRACE
  750. bestMember = *mid;
  751. #endif
  752. best = m.zeroTierPhysicalEndpoints;
  753. }
  754. }
  755. }
  756. }
  757. // Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
  758. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  759. if (a->ss_family == peerPhysicalAddress.ss_family) {
  760. TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
  761. redirectTo = *a;
  762. return true;
  763. }
  764. }
  765. TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
  766. return false;
  767. } else {
  768. // TODO: pick based on load if no location info?
  769. return false;
  770. }
  771. }
  772. bool Cluster::isClusterPeerFrontplane(const InetAddress &ip) const
  773. {
  774. Mutex::Lock _l(_memberIds_m);
  775. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  776. Mutex::Lock _l2(_members[*mid].lock);
  777. for(std::vector<InetAddress>::const_iterator i2(_members[*mid].zeroTierPhysicalEndpoints.begin());i2!=_members[*mid].zeroTierPhysicalEndpoints.end();++i2) {
  778. if (ip == *i2)
  779. return true;
  780. }
  781. }
  782. return false;
  783. }
  784. void Cluster::status(ZT_ClusterStatus &status) const
  785. {
  786. const uint64_t now = RR->node->now();
  787. memset(&status,0,sizeof(ZT_ClusterStatus));
  788. status.myId = _id;
  789. {
  790. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  791. s->id = _id;
  792. s->alive = 1;
  793. s->x = _x;
  794. s->y = _y;
  795. s->z = _z;
  796. s->load = 0; // TODO
  797. s->peers = RR->topology->countActive(now);
  798. for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
  799. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  800. break;
  801. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  802. }
  803. }
  804. {
  805. Mutex::Lock _l1(_memberIds_m);
  806. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  807. if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
  808. break;
  809. _Member &m = _members[*mid];
  810. Mutex::Lock ml(m.lock);
  811. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  812. s->id = *mid;
  813. s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
  814. s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
  815. s->x = m.x;
  816. s->y = m.y;
  817. s->z = m.z;
  818. s->load = m.load;
  819. s->peers = m.peers;
  820. for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
  821. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  822. break;
  823. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  824. }
  825. }
  826. }
  827. }
  828. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  829. {
  830. if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
  831. return;
  832. _Member &m = _members[memberId];
  833. // assumes m.lock is locked!
  834. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  835. _flush(memberId);
  836. m.q.append((uint16_t)(len + 1));
  837. m.q.append((uint8_t)type);
  838. m.q.append(msg,len);
  839. }
  840. void Cluster::_flush(uint16_t memberId)
  841. {
  842. _Member &m = _members[memberId];
  843. // assumes m.lock is locked!
  844. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  845. // Create key from member's key and IV
  846. char keytmp[32];
  847. memcpy(keytmp,m.key,32);
  848. for(int i=0;i<8;++i)
  849. keytmp[i] ^= m.q[i];
  850. Salsa20 s20(keytmp,256,m.q.field(8,8));
  851. Utils::burn(keytmp,sizeof(keytmp));
  852. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  853. char polykey[ZT_POLY1305_KEY_LEN];
  854. memset(polykey,0,sizeof(polykey));
  855. s20.crypt12(polykey,polykey,sizeof(polykey));
  856. // Encrypt m.q in place
  857. s20.crypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  858. // Add MAC for authentication (encrypt-then-MAC)
  859. char mac[ZT_POLY1305_MAC_LEN];
  860. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  861. memcpy(m.q.field(16,8),mac,8);
  862. // Send!
  863. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  864. // Prepare for more
  865. m.q.clear();
  866. char iv[16];
  867. Utils::getSecureRandom(iv,16);
  868. m.q.append(iv,16);
  869. m.q.addSize(8); // room for MAC
  870. m.q.append((uint16_t)_id); // from member ID
  871. m.q.append((uint16_t)memberId); // to member ID
  872. }
  873. }
  874. void Cluster::_doREMOTE_WHOIS(uint64_t fromMemberId,const Packet &remotep)
  875. {
  876. if (remotep.payloadLength() >= ZT_ADDRESS_LENGTH) {
  877. Identity queried(RR->topology->getIdentity(Address(remotep.payload(),ZT_ADDRESS_LENGTH)));
  878. if (queried) {
  879. Buffer<1024> routp;
  880. remotep.source().appendTo(routp);
  881. routp.append((uint8_t)Packet::VERB_OK);
  882. routp.addSize(2); // space for length
  883. routp.append((uint8_t)Packet::VERB_WHOIS);
  884. routp.append(remotep.packetId());
  885. queried.serialize(routp);
  886. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  887. TRACE("responding to remote WHOIS from %s @ %u with identity of %s",remotep.source().toString().c_str(),(unsigned int)fromMemberId,queried.address().toString().c_str());
  888. Mutex::Lock _l2(_members[fromMemberId].lock);
  889. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  890. }
  891. }
  892. }
  893. void Cluster::_doREMOTE_MULTICAST_GATHER(uint64_t fromMemberId,const Packet &remotep)
  894. {
  895. const uint64_t nwid = remotep.at<uint64_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_NETWORK_ID);
  896. const MulticastGroup mg(MAC(remotep.field(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_MAC,6),6),remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_ADI));
  897. unsigned int gatherLimit = remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_GATHER_LIMIT);
  898. const Address remotePeerAddress(remotep.source());
  899. if (gatherLimit) {
  900. Buffer<ZT_PROTO_MAX_PACKET_LENGTH> routp;
  901. remotePeerAddress.appendTo(routp);
  902. routp.append((uint8_t)Packet::VERB_OK);
  903. routp.addSize(2); // space for length
  904. routp.append((uint8_t)Packet::VERB_MULTICAST_GATHER);
  905. routp.append(remotep.packetId());
  906. routp.append(nwid);
  907. mg.mac().appendTo(routp);
  908. routp.append((uint32_t)mg.adi());
  909. if (gatherLimit > ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5))
  910. gatherLimit = ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5);
  911. if (RR->mc->gather(remotePeerAddress,nwid,mg,routp,gatherLimit)) {
  912. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  913. TRACE("responding to remote MULTICAST_GATHER from %s @ %u with %u bytes",remotePeerAddress.toString().c_str(),(unsigned int)fromMemberId,routp.size());
  914. Mutex::Lock _l2(_members[fromMemberId].lock);
  915. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  916. }
  917. }
  918. }
  919. } // namespace ZeroTier
  920. #endif // ZT_ENABLE_CLUSTER