Cluster.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #ifdef ZT_ENABLE_CLUSTER
  28. #include <stdint.h>
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <math.h>
  33. #include <map>
  34. #include <algorithm>
  35. #include <set>
  36. #include <utility>
  37. #include <list>
  38. #include <stdexcept>
  39. #include "../version.h"
  40. #include "Cluster.hpp"
  41. #include "RuntimeEnvironment.hpp"
  42. #include "MulticastGroup.hpp"
  43. #include "CertificateOfMembership.hpp"
  44. #include "Salsa20.hpp"
  45. #include "Poly1305.hpp"
  46. #include "Identity.hpp"
  47. #include "Topology.hpp"
  48. #include "Packet.hpp"
  49. #include "Switch.hpp"
  50. #include "Node.hpp"
  51. #include "Array.hpp"
  52. /**
  53. * Chunk size for allocating queue entries
  54. *
  55. * Queue entries are allocated in chunks of this many and are added to a pool.
  56. * ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this.
  57. */
  58. #define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32
  59. /**
  60. * Maximum number of chunks to ever allocate
  61. *
  62. * This is a global sanity limit to prevent resource exhaustion attacks. It
  63. * works out to about 600mb of RAM. You'll never see this on a normal edge
  64. * node. We're unlikely to see this on a root server unless someone is DOSing
  65. * us. In that case cluster relaying will be affected but other functions
  66. * should continue to operate normally.
  67. */
  68. #define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194
  69. /**
  70. * Max data per queue entry
  71. *
  72. * If we ever support larger transport MTUs this must be increased. The plus
  73. * 16 is just a small margin and has no special meaning.
  74. */
  75. #define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16)
  76. namespace ZeroTier {
  77. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  78. throw()
  79. {
  80. double dx = ((double)x2 - (double)x1);
  81. double dy = ((double)y2 - (double)y1);
  82. double dz = ((double)z2 - (double)z1);
  83. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  84. }
  85. // An entry in _ClusterSendQueue
  86. struct _ClusterSendQueueEntry
  87. {
  88. uint64_t timestamp;
  89. Address fromPeerAddress;
  90. Address toPeerAddress;
  91. // if we ever support larger transport MTUs this must be increased
  92. unsigned char data[ZT_CLUSTER_SEND_QUEUE_DATA_MAX];
  93. unsigned int len;
  94. bool unite;
  95. };
  96. // A multi-index map with entry memory pooling -- this allows our queue to
  97. // be O(log(N)) and is complex enough that it makes the code a lot cleaner
  98. // to break it out from Cluster.
  99. class _ClusterSendQueue
  100. {
  101. public:
  102. _ClusterSendQueue() :
  103. _poolCount(0)
  104. {
  105. }
  106. ~_ClusterSendQueue() {} // memory is automatically freed when _chunks is destroyed
  107. inline void enqueue(uint64_t ts,const Address &from,const Address &to,const void *data,unsigned int len,bool unite)
  108. {
  109. if (len > ZT_CLUSTER_SEND_QUEUE_DATA_MAX)
  110. return;
  111. Mutex::Lock _l(_lock);
  112. // Delete oldest queue entry if sender has too many queued packets
  113. {
  114. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(from,(_ClusterSendQueueEntry *)0)));
  115. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator oldest(_bySrc.end());
  116. unsigned long countForSender = 0;
  117. while ((qi != _bySrc.end())&&(qi->first == from)) {
  118. if (++countForSender > ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
  119. _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(oldest->second->toPeerAddress,oldest->second));
  120. _pool[_poolCount++] = oldest->second;
  121. _bySrc.erase(oldest);
  122. break;
  123. } else if (oldest == _bySrc.end())
  124. oldest = qi;
  125. ++qi;
  126. }
  127. }
  128. _ClusterSendQueueEntry *e;
  129. if (_poolCount > 0) {
  130. e = _pool[--_poolCount];
  131. } else {
  132. if (_chunks.size() >= ZT_CLUSTER_MAX_QUEUE_CHUNKS)
  133. return; // queue is totally full!
  134. _chunks.push_back(Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE>());
  135. e = &(_chunks.back().data[0]);
  136. for(unsigned int i=1;i<ZT_CLUSTER_QUEUE_CHUNK_SIZE;++i)
  137. _pool[_poolCount++] = &(_chunks.back().data[i]);
  138. }
  139. e->timestamp = ts;
  140. e->fromPeerAddress = from;
  141. e->toPeerAddress = to;
  142. memcpy(e->data,data,len);
  143. e->len = len;
  144. e->unite = unite;
  145. _bySrc.insert(std::pair<Address,_ClusterSendQueueEntry *>(from,e));
  146. _byDest.insert(std::pair<Address,_ClusterSendQueueEntry *>(to,e));
  147. }
  148. inline void expire(uint64_t now)
  149. {
  150. Mutex::Lock _l(_lock);
  151. for(std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.begin());qi!=_bySrc.end();) {
  152. if ((now - qi->second->timestamp) > ZT_CLUSTER_QUEUE_EXPIRATION) {
  153. _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->toPeerAddress,qi->second));
  154. _pool[_poolCount++] = qi->second;
  155. _bySrc.erase(qi++);
  156. } else ++qi;
  157. }
  158. }
  159. /**
  160. * Get and dequeue entries for a given destination address
  161. *
  162. * After use these entries must be returned with returnToPool()!
  163. *
  164. * @param dest Destination address
  165. * @param results Array to fill with results
  166. * @param maxResults Size of results[] in pointers
  167. * @return Number of actual results returned
  168. */
  169. inline unsigned int getByDest(const Address &dest,_ClusterSendQueueEntry **results,unsigned int maxResults)
  170. {
  171. unsigned int count = 0;
  172. Mutex::Lock _l(_lock);
  173. std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_byDest.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(dest,(_ClusterSendQueueEntry *)0)));
  174. while ((qi != _byDest.end())&&(qi->first == dest)) {
  175. _bySrc.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->fromPeerAddress,qi->second));
  176. results[count++] = qi->second;
  177. if (count == maxResults)
  178. break;
  179. _byDest.erase(qi++);
  180. }
  181. return count;
  182. }
  183. /**
  184. * Return entries to pool after use
  185. *
  186. * @param entries Array of entries
  187. * @param count Number of entries
  188. */
  189. inline void returnToPool(_ClusterSendQueueEntry **entries,unsigned int count)
  190. {
  191. Mutex::Lock _l(_lock);
  192. for(unsigned int i=0;i<count;++i)
  193. _pool[_poolCount++] = entries[i];
  194. }
  195. private:
  196. std::list< Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE> > _chunks;
  197. _ClusterSendQueueEntry *_pool[ZT_CLUSTER_QUEUE_CHUNK_SIZE * ZT_CLUSTER_MAX_QUEUE_CHUNKS];
  198. unsigned long _poolCount;
  199. std::set< std::pair<Address,_ClusterSendQueueEntry *> > _bySrc;
  200. std::set< std::pair<Address,_ClusterSendQueueEntry *> > _byDest;
  201. Mutex _lock;
  202. };
  203. Cluster::Cluster(
  204. const RuntimeEnvironment *renv,
  205. uint16_t id,
  206. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  207. int32_t x,
  208. int32_t y,
  209. int32_t z,
  210. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  211. void *sendFunctionArg,
  212. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  213. void *addressToLocationFunctionArg) :
  214. RR(renv),
  215. _sendQueue(new _ClusterSendQueue()),
  216. _sendFunction(sendFunction),
  217. _sendFunctionArg(sendFunctionArg),
  218. _addressToLocationFunction(addressToLocationFunction),
  219. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  220. _x(x),
  221. _y(y),
  222. _z(z),
  223. _id(id),
  224. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  225. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
  226. _lastFlushed(0),
  227. _lastCleanedRemotePeers(0),
  228. _lastCleanedQueue(0)
  229. {
  230. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  231. // Generate master secret by hashing the secret from our Identity key pair
  232. RR->identity.sha512PrivateKey(_masterSecret);
  233. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  234. memcpy(stmp,_masterSecret,sizeof(stmp));
  235. stmp[0] ^= Utils::hton(id);
  236. SHA512::hash(stmp,stmp,sizeof(stmp));
  237. SHA512::hash(stmp,stmp,sizeof(stmp));
  238. memcpy(_key,stmp,sizeof(_key));
  239. Utils::burn(stmp,sizeof(stmp));
  240. }
  241. Cluster::~Cluster()
  242. {
  243. Utils::burn(_masterSecret,sizeof(_masterSecret));
  244. Utils::burn(_key,sizeof(_key));
  245. delete [] _members;
  246. delete _sendQueue;
  247. }
  248. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  249. {
  250. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  251. {
  252. // FORMAT: <[16] iv><[8] MAC><... data>
  253. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  254. return;
  255. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  256. char keytmp[32];
  257. memcpy(keytmp,_key,32);
  258. for(int i=0;i<8;++i)
  259. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  260. Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
  261. Utils::burn(keytmp,sizeof(keytmp));
  262. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  263. char polykey[ZT_POLY1305_KEY_LEN];
  264. memset(polykey,0,sizeof(polykey));
  265. s20.encrypt12(polykey,polykey,sizeof(polykey));
  266. // Compute 16-byte MAC
  267. char mac[ZT_POLY1305_MAC_LEN];
  268. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  269. // Check first 8 bytes of MAC against 64-bit MAC in stream
  270. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  271. return;
  272. // Decrypt!
  273. dmsg.setSize(len - 24);
  274. s20.decrypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  275. }
  276. if (dmsg.size() < 4)
  277. return;
  278. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  279. unsigned int ptr = 2;
  280. if (fromMemberId == _id) // sanity check: we don't talk to ourselves
  281. return;
  282. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  283. ptr += 2;
  284. if (toMemberId != _id) // sanity check: message not for us?
  285. return;
  286. { // make sure sender is actually considered a member
  287. Mutex::Lock _l3(_memberIds_m);
  288. if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
  289. return;
  290. }
  291. try {
  292. while (ptr < dmsg.size()) {
  293. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  294. const unsigned int nextPtr = ptr + mlen;
  295. if (nextPtr > dmsg.size())
  296. break;
  297. int mtype = -1;
  298. try {
  299. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  300. default:
  301. break;
  302. case CLUSTER_MESSAGE_ALIVE: {
  303. _Member &m = _members[fromMemberId];
  304. Mutex::Lock mlck(m.lock);
  305. ptr += 7; // skip version stuff, not used yet
  306. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  307. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  308. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  309. ptr += 8; // skip local clock, not used
  310. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  311. m.peers = dmsg.at<uint64_t>(ptr); ptr += 8;
  312. ptr += 8; // skip flags, unused
  313. #ifdef ZT_TRACE
  314. std::string addrs;
  315. #endif
  316. unsigned int physicalAddressCount = dmsg[ptr++];
  317. m.zeroTierPhysicalEndpoints.clear();
  318. for(unsigned int i=0;i<physicalAddressCount;++i) {
  319. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  320. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  321. if (!(m.zeroTierPhysicalEndpoints.back())) {
  322. m.zeroTierPhysicalEndpoints.pop_back();
  323. }
  324. #ifdef ZT_TRACE
  325. else {
  326. if (addrs.length() > 0)
  327. addrs.push_back(',');
  328. addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
  329. }
  330. #endif
  331. }
  332. #ifdef ZT_TRACE
  333. if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
  334. TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
  335. }
  336. #endif
  337. m.lastReceivedAliveAnnouncement = RR->node->now();
  338. } break;
  339. case CLUSTER_MESSAGE_HAVE_PEER: {
  340. Identity id;
  341. ptr += id.deserialize(dmsg,ptr);
  342. if (id) {
  343. RR->topology->saveIdentity(id);
  344. {
  345. Mutex::Lock _l(_remotePeers_m);
  346. _remotePeers[std::pair<Address,unsigned int>(id.address(),(unsigned int)fromMemberId)] = RR->node->now();
  347. }
  348. _ClusterSendQueueEntry *q[16384]; // 16384 is "tons"
  349. unsigned int qc = _sendQueue->getByDest(id.address(),q,16384);
  350. for(unsigned int i=0;i<qc;++i)
  351. this->sendViaCluster(q[i]->fromPeerAddress,q[i]->toPeerAddress,q[i]->data,q[i]->len,q[i]->unite);
  352. _sendQueue->returnToPool(q,qc);
  353. TRACE("[%u] has %s (retried %u queued sends)",(unsigned int)fromMemberId,id.address().toString().c_str(),qc);
  354. }
  355. } break;
  356. case CLUSTER_MESSAGE_WANT_PEER: {
  357. const Address zeroTierAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  358. SharedPtr<Peer> peer(RR->topology->getPeerNoCache(zeroTierAddress));
  359. if ( (peer) && (peer->hasClusterOptimalPath(RR->node->now())) ) {
  360. Buffer<1024> buf;
  361. peer->identity().serialize(buf);
  362. Mutex::Lock _l2(_members[fromMemberId].lock);
  363. _send(fromMemberId,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  364. }
  365. } break;
  366. case CLUSTER_MESSAGE_REMOTE_PACKET: {
  367. const unsigned int plen = dmsg.at<uint16_t>(ptr); ptr += 2;
  368. if (plen) {
  369. Packet remotep(dmsg.field(ptr,plen),plen); ptr += plen;
  370. //TRACE("remote %s from %s via %u (%u bytes)",Packet::verbString(remotep.verb()),remotep.source().toString().c_str(),fromMemberId,plen);
  371. switch(remotep.verb()) {
  372. case Packet::VERB_WHOIS: _doREMOTE_WHOIS(fromMemberId,remotep); break;
  373. case Packet::VERB_MULTICAST_GATHER: _doREMOTE_MULTICAST_GATHER(fromMemberId,remotep); break;
  374. default: break; // ignore things we don't care about across cluster
  375. }
  376. }
  377. } break;
  378. case CLUSTER_MESSAGE_PROXY_UNITE: {
  379. const Address localPeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  380. const Address remotePeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  381. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  382. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  383. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  384. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  385. TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
  386. const uint64_t now = RR->node->now();
  387. SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress));
  388. if ((localPeer)&&(numRemotePeerPaths > 0)) {
  389. InetAddress bestLocalV4,bestLocalV6;
  390. localPeer->getBestActiveAddresses(now,bestLocalV4,bestLocalV6);
  391. InetAddress bestRemoteV4,bestRemoteV6;
  392. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  393. if ((bestRemoteV4)&&(bestRemoteV6))
  394. break;
  395. switch(remotePeerPaths[i].ss_family) {
  396. case AF_INET:
  397. if (!bestRemoteV4)
  398. bestRemoteV4 = remotePeerPaths[i];
  399. break;
  400. case AF_INET6:
  401. if (!bestRemoteV6)
  402. bestRemoteV6 = remotePeerPaths[i];
  403. break;
  404. }
  405. }
  406. Packet rendezvousForLocal(localPeerAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  407. rendezvousForLocal.append((uint8_t)0);
  408. remotePeerAddress.appendTo(rendezvousForLocal);
  409. Buffer<2048> rendezvousForRemote;
  410. remotePeerAddress.appendTo(rendezvousForRemote);
  411. rendezvousForRemote.append((uint8_t)Packet::VERB_RENDEZVOUS);
  412. rendezvousForRemote.addSize(2); // space for actual packet payload length
  413. rendezvousForRemote.append((uint8_t)0); // flags == 0
  414. localPeerAddress.appendTo(rendezvousForRemote);
  415. bool haveMatch = false;
  416. if ((bestLocalV6)&&(bestRemoteV6)) {
  417. haveMatch = true;
  418. rendezvousForLocal.append((uint16_t)bestRemoteV6.port());
  419. rendezvousForLocal.append((uint8_t)16);
  420. rendezvousForLocal.append(bestRemoteV6.rawIpData(),16);
  421. rendezvousForRemote.append((uint16_t)bestLocalV6.port());
  422. rendezvousForRemote.append((uint8_t)16);
  423. rendezvousForRemote.append(bestLocalV6.rawIpData(),16);
  424. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 16));
  425. } else if ((bestLocalV4)&&(bestRemoteV4)) {
  426. haveMatch = true;
  427. rendezvousForLocal.append((uint16_t)bestRemoteV4.port());
  428. rendezvousForLocal.append((uint8_t)4);
  429. rendezvousForLocal.append(bestRemoteV4.rawIpData(),4);
  430. rendezvousForRemote.append((uint16_t)bestLocalV4.port());
  431. rendezvousForRemote.append((uint8_t)4);
  432. rendezvousForRemote.append(bestLocalV4.rawIpData(),4);
  433. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 4));
  434. }
  435. if (haveMatch) {
  436. {
  437. Mutex::Lock _l2(_members[fromMemberId].lock);
  438. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
  439. }
  440. RR->sw->send(rendezvousForLocal,true,0);
  441. }
  442. }
  443. } break;
  444. case CLUSTER_MESSAGE_PROXY_SEND: {
  445. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  446. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  447. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  448. Packet outp(rcpt,RR->identity.address(),verb);
  449. outp.append(dmsg.field(ptr,len),len); ptr += len;
  450. RR->sw->send(outp,true,0);
  451. //TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
  452. } break;
  453. }
  454. } catch ( ... ) {
  455. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  456. // drop invalids
  457. }
  458. ptr = nextPtr;
  459. }
  460. } catch ( ... ) {
  461. TRACE("invalid message (outer loop), discarding");
  462. // drop invalids
  463. }
  464. }
  465. void Cluster::broadcastHavePeer(const Identity &id)
  466. {
  467. Buffer<1024> buf;
  468. id.serialize(buf);
  469. Mutex::Lock _l(_memberIds_m);
  470. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  471. Mutex::Lock _l2(_members[*mid].lock);
  472. _send(*mid,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  473. }
  474. }
  475. void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite)
  476. {
  477. if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
  478. return;
  479. const uint64_t now = RR->node->now();
  480. uint64_t mostRecentTs = 0;
  481. unsigned int mostRecentMemberId = 0xffffffff;
  482. {
  483. Mutex::Lock _l2(_remotePeers_m);
  484. std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
  485. for(;;) {
  486. if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
  487. break;
  488. else if (rpe->second > mostRecentTs) {
  489. mostRecentTs = rpe->second;
  490. mostRecentMemberId = rpe->first.second;
  491. }
  492. ++rpe;
  493. }
  494. }
  495. const uint64_t age = now - mostRecentTs;
  496. if (age >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
  497. const bool enqueueAndWait = ((age >= ZT_PEER_ACTIVITY_TIMEOUT)||(mostRecentMemberId > 0xffff));
  498. // Poll everyone with WANT_PEER if the age of our most recent entry is
  499. // approaching expiration (or has expired, or does not exist).
  500. char tmp[ZT_ADDRESS_LENGTH];
  501. toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
  502. {
  503. Mutex::Lock _l(_memberIds_m);
  504. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  505. Mutex::Lock _l2(_members[*mid].lock);
  506. _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
  507. }
  508. }
  509. // If there isn't a good place to send via, then enqueue this for retrying
  510. // later and return after having broadcasted a WANT_PEER.
  511. if (enqueueAndWait) {
  512. TRACE("sendViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
  513. _sendQueue->enqueue(now,fromPeerAddress,toPeerAddress,data,len,unite);
  514. return;
  515. }
  516. }
  517. Buffer<1024> buf;
  518. if (unite) {
  519. InetAddress v4,v6;
  520. if (fromPeerAddress) {
  521. SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress));
  522. if (fromPeer)
  523. fromPeer->getBestActiveAddresses(now,v4,v6);
  524. }
  525. uint8_t addrCount = 0;
  526. if (v4)
  527. ++addrCount;
  528. if (v6)
  529. ++addrCount;
  530. if (addrCount) {
  531. toPeerAddress.appendTo(buf);
  532. fromPeerAddress.appendTo(buf);
  533. buf.append(addrCount);
  534. if (v4)
  535. v4.serialize(buf);
  536. if (v6)
  537. v6.serialize(buf);
  538. }
  539. }
  540. {
  541. Mutex::Lock _l2(_members[mostRecentMemberId].lock);
  542. if (buf.size() > 0)
  543. _send(mostRecentMemberId,CLUSTER_MESSAGE_PROXY_UNITE,buf.data(),buf.size());
  544. if (_members[mostRecentMemberId].zeroTierPhysicalEndpoints.size() > 0) {
  545. TRACE("sendViaCluster relaying %u bytes from %s to %s by way of %u",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId);
  546. RR->node->putPacket(InetAddress(),_members[mostRecentMemberId].zeroTierPhysicalEndpoints.front(),data,len);
  547. }
  548. }
  549. }
  550. void Cluster::sendDistributedQuery(const Packet &pkt)
  551. {
  552. Buffer<4096> buf;
  553. buf.append((uint16_t)pkt.size());
  554. buf.append(pkt.data(),pkt.size());
  555. Mutex::Lock _l(_memberIds_m);
  556. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  557. Mutex::Lock _l2(_members[*mid].lock);
  558. _send(*mid,CLUSTER_MESSAGE_REMOTE_PACKET,buf.data(),buf.size());
  559. }
  560. }
  561. void Cluster::doPeriodicTasks()
  562. {
  563. const uint64_t now = RR->node->now();
  564. if ((now - _lastFlushed) >= ZT_CLUSTER_FLUSH_PERIOD) {
  565. _lastFlushed = now;
  566. Mutex::Lock _l(_memberIds_m);
  567. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  568. Mutex::Lock _l2(_members[*mid].lock);
  569. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  570. _members[*mid].lastAnnouncedAliveTo = now;
  571. Buffer<2048> alive;
  572. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  573. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  574. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  575. alive.append((uint8_t)ZT_PROTO_VERSION);
  576. if (_addressToLocationFunction) {
  577. alive.append((int32_t)_x);
  578. alive.append((int32_t)_y);
  579. alive.append((int32_t)_z);
  580. } else {
  581. alive.append((int32_t)0);
  582. alive.append((int32_t)0);
  583. alive.append((int32_t)0);
  584. }
  585. alive.append((uint64_t)now);
  586. alive.append((uint64_t)0); // TODO: compute and send load average
  587. alive.append((uint64_t)RR->topology->countActive());
  588. alive.append((uint64_t)0); // unused/reserved flags
  589. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  590. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  591. pe->serialize(alive);
  592. _send(*mid,CLUSTER_MESSAGE_ALIVE,alive.data(),alive.size());
  593. }
  594. _flush(*mid);
  595. }
  596. }
  597. if ((now - _lastCleanedRemotePeers) >= (ZT_PEER_ACTIVITY_TIMEOUT * 2)) {
  598. _lastCleanedRemotePeers = now;
  599. Mutex::Lock _l(_remotePeers_m);
  600. for(std::map< std::pair<Address,unsigned int>,uint64_t >::iterator rp(_remotePeers.begin());rp!=_remotePeers.end();) {
  601. if ((now - rp->second) >= ZT_PEER_ACTIVITY_TIMEOUT)
  602. _remotePeers.erase(rp++);
  603. else ++rp;
  604. }
  605. }
  606. if ((now - _lastCleanedQueue) >= ZT_CLUSTER_QUEUE_EXPIRATION) {
  607. _lastCleanedQueue = now;
  608. _sendQueue->expire(now);
  609. }
  610. }
  611. void Cluster::addMember(uint16_t memberId)
  612. {
  613. if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
  614. return;
  615. Mutex::Lock _l2(_members[memberId].lock);
  616. {
  617. Mutex::Lock _l(_memberIds_m);
  618. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  619. return;
  620. _memberIds.push_back(memberId);
  621. std::sort(_memberIds.begin(),_memberIds.end());
  622. }
  623. _members[memberId].clear();
  624. // Generate this member's message key from the master and its ID
  625. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  626. memcpy(stmp,_masterSecret,sizeof(stmp));
  627. stmp[0] ^= Utils::hton(memberId);
  628. SHA512::hash(stmp,stmp,sizeof(stmp));
  629. SHA512::hash(stmp,stmp,sizeof(stmp));
  630. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  631. Utils::burn(stmp,sizeof(stmp));
  632. // Prepare q
  633. _members[memberId].q.clear();
  634. char iv[16];
  635. Utils::getSecureRandom(iv,16);
  636. _members[memberId].q.append(iv,16);
  637. _members[memberId].q.addSize(8); // room for MAC
  638. _members[memberId].q.append((uint16_t)_id);
  639. _members[memberId].q.append((uint16_t)memberId);
  640. }
  641. void Cluster::removeMember(uint16_t memberId)
  642. {
  643. Mutex::Lock _l(_memberIds_m);
  644. std::vector<uint16_t> newMemberIds;
  645. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  646. if (*mid != memberId)
  647. newMemberIds.push_back(*mid);
  648. }
  649. _memberIds = newMemberIds;
  650. }
  651. bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
  652. {
  653. if (_addressToLocationFunction) {
  654. // Pick based on location if it can be determined
  655. int px = 0,py = 0,pz = 0;
  656. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  657. TRACE("no geolocation data for %s (geo-lookup is lazy/async so it may work next time)",peerPhysicalAddress.toIpString().c_str());
  658. return false;
  659. }
  660. // Find member closest to this peer
  661. const uint64_t now = RR->node->now();
  662. std::vector<InetAddress> best;
  663. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  664. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  665. unsigned int bestMember = _id;
  666. {
  667. Mutex::Lock _l(_memberIds_m);
  668. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  669. _Member &m = _members[*mid];
  670. Mutex::Lock _ml(m.lock);
  671. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  672. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  673. const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  674. if (mdist < bestDistance) {
  675. bestDistance = mdist;
  676. bestMember = *mid;
  677. best = m.zeroTierPhysicalEndpoints;
  678. }
  679. }
  680. }
  681. }
  682. // Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
  683. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  684. if (a->ss_family == peerPhysicalAddress.ss_family) {
  685. TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
  686. redirectTo = *a;
  687. return true;
  688. }
  689. }
  690. TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
  691. return false;
  692. } else {
  693. // TODO: pick based on load if no location info?
  694. return false;
  695. }
  696. }
  697. void Cluster::status(ZT_ClusterStatus &status) const
  698. {
  699. const uint64_t now = RR->node->now();
  700. memset(&status,0,sizeof(ZT_ClusterStatus));
  701. status.myId = _id;
  702. {
  703. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  704. s->id = _id;
  705. s->alive = 1;
  706. s->x = _x;
  707. s->y = _y;
  708. s->z = _z;
  709. s->load = 0; // TODO
  710. s->peers = RR->topology->countActive();
  711. for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
  712. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  713. break;
  714. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  715. }
  716. }
  717. {
  718. Mutex::Lock _l1(_memberIds_m);
  719. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  720. if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
  721. break;
  722. _Member &m = _members[*mid];
  723. Mutex::Lock ml(m.lock);
  724. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  725. s->id = *mid;
  726. s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
  727. s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
  728. s->x = m.x;
  729. s->y = m.y;
  730. s->z = m.z;
  731. s->load = m.load;
  732. s->peers = m.peers;
  733. for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
  734. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  735. break;
  736. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  737. }
  738. }
  739. }
  740. }
  741. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  742. {
  743. if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
  744. return;
  745. _Member &m = _members[memberId];
  746. // assumes m.lock is locked!
  747. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  748. _flush(memberId);
  749. m.q.append((uint16_t)(len + 1));
  750. m.q.append((uint8_t)type);
  751. m.q.append(msg,len);
  752. }
  753. void Cluster::_flush(uint16_t memberId)
  754. {
  755. _Member &m = _members[memberId];
  756. // assumes m.lock is locked!
  757. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  758. // Create key from member's key and IV
  759. char keytmp[32];
  760. memcpy(keytmp,m.key,32);
  761. for(int i=0;i<8;++i)
  762. keytmp[i] ^= m.q[i];
  763. Salsa20 s20(keytmp,256,m.q.field(8,8));
  764. Utils::burn(keytmp,sizeof(keytmp));
  765. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  766. char polykey[ZT_POLY1305_KEY_LEN];
  767. memset(polykey,0,sizeof(polykey));
  768. s20.encrypt12(polykey,polykey,sizeof(polykey));
  769. // Encrypt m.q in place
  770. s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  771. // Add MAC for authentication (encrypt-then-MAC)
  772. char mac[ZT_POLY1305_MAC_LEN];
  773. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  774. memcpy(m.q.field(16,8),mac,8);
  775. // Send!
  776. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  777. // Prepare for more
  778. m.q.clear();
  779. char iv[16];
  780. Utils::getSecureRandom(iv,16);
  781. m.q.append(iv,16);
  782. m.q.addSize(8); // room for MAC
  783. m.q.append((uint16_t)_id); // from member ID
  784. m.q.append((uint16_t)memberId); // to member ID
  785. }
  786. }
  787. void Cluster::_doREMOTE_WHOIS(uint64_t fromMemberId,const Packet &remotep)
  788. {
  789. if (remotep.payloadLength() >= ZT_ADDRESS_LENGTH) {
  790. Identity queried(RR->topology->getIdentity(Address(remotep.payload(),ZT_ADDRESS_LENGTH)));
  791. if (queried) {
  792. Buffer<1024> routp;
  793. remotep.source().appendTo(routp);
  794. routp.append((uint8_t)Packet::VERB_OK);
  795. routp.addSize(2); // space for length
  796. routp.append((uint8_t)Packet::VERB_WHOIS);
  797. routp.append(remotep.packetId());
  798. queried.serialize(routp);
  799. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  800. TRACE("responding to remote WHOIS from %s @ %u with identity of %s",remotep.source().toString().c_str(),(unsigned int)fromMemberId,queried.address().toString().c_str());
  801. Mutex::Lock _l2(_members[fromMemberId].lock);
  802. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  803. }
  804. }
  805. }
  806. void Cluster::_doREMOTE_MULTICAST_GATHER(uint64_t fromMemberId,const Packet &remotep)
  807. {
  808. const uint64_t nwid = remotep.at<uint64_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_NETWORK_ID);
  809. const MulticastGroup mg(MAC(remotep.field(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_MAC,6),6),remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_ADI));
  810. unsigned int gatherLimit = remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_GATHER_LIMIT);
  811. const Address remotePeerAddress(remotep.source());
  812. if (gatherLimit) {
  813. Buffer<ZT_PROTO_MAX_PACKET_LENGTH> routp;
  814. remotePeerAddress.appendTo(routp);
  815. routp.append((uint8_t)Packet::VERB_OK);
  816. routp.addSize(2); // space for length
  817. routp.append((uint8_t)Packet::VERB_MULTICAST_GATHER);
  818. routp.append(remotep.packetId());
  819. routp.append(nwid);
  820. mg.mac().appendTo(routp);
  821. routp.append((uint32_t)mg.adi());
  822. if (gatherLimit > ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5))
  823. gatherLimit = ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5);
  824. if (RR->mc->gather(remotePeerAddress,nwid,mg,routp,gatherLimit)) {
  825. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  826. TRACE("responding to remote MULTICAST_GATHER from %s @ %u with %u bytes",remotePeerAddress.toString().c_str(),(unsigned int)fromMemberId,routp.size());
  827. Mutex::Lock _l2(_members[fromMemberId].lock);
  828. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  829. }
  830. }
  831. }
  832. } // namespace ZeroTier
  833. #endif // ZT_ENABLE_CLUSTER