Switch.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * ZeroTier One - Global Peer to Peer Ethernet
  3. * Copyright (C) 2011-2014 ZeroTier Networks LLC
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <algorithm>
  30. #include <utility>
  31. #include <stdexcept>
  32. #include "Constants.hpp"
  33. #ifdef __WINDOWS__
  34. #include <WinSock2.h>
  35. #include <Windows.h>
  36. #endif
  37. #include "Switch.hpp"
  38. #include "Node.hpp"
  39. #include "EthernetTap.hpp"
  40. #include "InetAddress.hpp"
  41. #include "Topology.hpp"
  42. #include "RuntimeEnvironment.hpp"
  43. #include "Peer.hpp"
  44. #include "NodeConfig.hpp"
  45. #include "CMWC4096.hpp"
  46. #include "AntiRecursion.hpp"
  47. #include "../version.h"
  48. namespace ZeroTier {
  49. Switch::Switch(const RuntimeEnvironment *renv) :
  50. RR(renv),
  51. _lastBeacon(0)
  52. {
  53. }
  54. Switch::~Switch()
  55. {
  56. }
  57. void Switch::onRemotePacket(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,Buffer<ZT_SOCKET_MAX_MESSAGE_LEN> &data)
  58. {
  59. try {
  60. if (data.size() == ZT_PROTO_BEACON_LENGTH) {
  61. _handleBeacon(fromSock,fromAddr,data);
  62. } else if (data.size() > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
  63. if (data[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR)
  64. _handleRemotePacketFragment(fromSock,fromAddr,data);
  65. else if (data.size() >= ZT_PROTO_MIN_PACKET_LENGTH)
  66. _handleRemotePacketHead(fromSock,fromAddr,data);
  67. }
  68. } catch (std::exception &ex) {
  69. TRACE("dropped packet from %s: unexpected exception: %s",fromAddr.toString().c_str(),ex.what());
  70. } catch ( ... ) {
  71. TRACE("dropped packet from %s: unexpected exception: (unknown)",fromAddr.toString().c_str());
  72. }
  73. }
  74. void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,const Buffer<4096> &data)
  75. {
  76. SharedPtr<NetworkConfig> nconf(network->config2());
  77. if (!nconf)
  78. return;
  79. // Sanity check -- bridge loop? OS problem?
  80. if (to == network->mac())
  81. return;
  82. /* Check anti-recursion module to ensure that this is not ZeroTier talking over its own links.
  83. * Note: even when we introduce a more purposeful binding of the main UDP port, this can
  84. * still happen because Windows likes to send broadcasts over interfaces that have little
  85. * to do with their intended target audience. :P */
  86. if (!RR->antiRec->checkEthernetFrame(data.data(),data.size())) {
  87. TRACE("%s: rejected recursively addressed ZeroTier packet by tail match (type %s, length: %u)",network->tapDeviceName().c_str(),etherTypeName(etherType),data.size());
  88. return;
  89. }
  90. // Check to make sure this protocol is allowed on this network
  91. if (!nconf->permitsEtherType(etherType)) {
  92. TRACE("%s: ignored tap: %s -> %s: ethertype %s not allowed on network %.16llx",network->tapDeviceName().c_str(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),(unsigned long long)network->id());
  93. return;
  94. }
  95. // Check if this packet is from someone other than the tap -- i.e. bridged in
  96. bool fromBridged = false;
  97. if (from != network->mac()) {
  98. if (!network->permitsBridging(RR->identity.address())) {
  99. LOG("%s: %s -> %s %s not forwarded, bridging disabled on %.16llx or this peer not a bridge",network->tapDeviceName().c_str(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),network->id());
  100. return;
  101. }
  102. fromBridged = true;
  103. }
  104. if (to.isMulticast()) {
  105. // Destination is a multicast address (including broadcast)
  106. uint64_t now = Utils::now();
  107. MulticastGroup mg(to,0);
  108. if (to.isBroadcast()) {
  109. if ((etherType == ZT_ETHERTYPE_ARP)&&(data.size() >= 28)&&(data[2] == 0x08)&&(data[3] == 0x00)&&(data[4] == 6)&&(data[5] == 4)&&(data[7] == 0x01)) {
  110. // Cram IPv4 IP into ADI field to make IPv4 ARP broadcast channel specific and scalable
  111. // Also: enableBroadcast() does not apply to ARP since it's required for IPv4
  112. mg = MulticastGroup::deriveMulticastGroupForAddressResolution(InetAddress(data.field(24,4),4,0));
  113. } else if (!nconf->enableBroadcast()) {
  114. // Don't transmit broadcasts if this network doesn't want them
  115. TRACE("%s: dropped broadcast since ff:ff:ff:ff:ff:ff is not enabled on network %.16llx",network->tapDeviceName().c_str(),network->id());
  116. return;
  117. }
  118. }
  119. /* Learn multicast groups for bridged-in hosts.
  120. * Note that some OSes, most notably Linux, do this for you by learning
  121. * multicast addresses on bridge interfaces and subscribing each slave.
  122. * But in that case this does no harm, as the sets are just merged. */
  123. if (fromBridged)
  124. network->learnBridgedMulticastGroup(mg,now);
  125. // Check multicast/broadcast bandwidth quotas and reject if quota exceeded
  126. if (!network->updateAndCheckMulticastBalance(mg,data.size())) {
  127. TRACE("%s: didn't multicast %d bytes, quota exceeded for multicast group %s",network->tapDeviceName().c_str(),(int)data.size(),mg.toString().c_str());
  128. return;
  129. }
  130. TRACE("%s: MULTICAST %s -> %s %s %d",network->tapDeviceName().c_str(),from.toString().c_str(),mg.toString().c_str(),etherTypeName(etherType),(int)data.size());
  131. RR->mc->send(
  132. ((!nconf->isPublic())&&(nconf->com())) ? &(nconf->com()) : (const CertificateOfMembership *)0,
  133. nconf->multicastLimit(),
  134. now,
  135. network->id(),
  136. mg,
  137. from,
  138. etherType,
  139. data.data(),
  140. data.size());
  141. return;
  142. }
  143. if (to[0] == MAC::firstOctetForNetwork(network->id())) {
  144. // Destination is another ZeroTier peer
  145. Address toZT(to.toAddress(network->id()));
  146. if (network->isAllowed(toZT)) {
  147. network->pushMembershipCertificate(toZT,false,Utils::now());
  148. if (fromBridged) {
  149. // Must use EXT_FRAME if source is not myself
  150. Packet outp(toZT,RR->identity.address(),Packet::VERB_EXT_FRAME);
  151. outp.append(network->id());
  152. outp.append((unsigned char)0);
  153. to.appendTo(outp);
  154. from.appendTo(outp);
  155. outp.append((uint16_t)etherType);
  156. outp.append(data);
  157. outp.compress();
  158. send(outp,true);
  159. } else {
  160. // VERB_FRAME is really just lighter weight EXT_FRAME, can use for direct-to-direct (before bridging this was the only unicast method)
  161. Packet outp(toZT,RR->identity.address(),Packet::VERB_FRAME);
  162. outp.append(network->id());
  163. outp.append((uint16_t)etherType);
  164. outp.append(data);
  165. outp.compress();
  166. send(outp,true);
  167. }
  168. } else {
  169. TRACE("%s: UNICAST: %s -> %s %s dropped, destination not a member of closed network %.16llx",network->tapDeviceName().c_str(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),network->id());
  170. }
  171. return;
  172. }
  173. {
  174. // Destination is bridged behind a remote peer
  175. Address bridges[ZT_MAX_BRIDGE_SPAM];
  176. unsigned int numBridges = 0;
  177. bridges[0] = network->findBridgeTo(to);
  178. if ((bridges[0])&&(bridges[0] != RR->identity.address())&&(network->isAllowed(bridges[0]))&&(network->permitsBridging(bridges[0]))) {
  179. // We have a known bridge route for this MAC.
  180. ++numBridges;
  181. } else if (!nconf->activeBridges().empty()) {
  182. /* If there is no known route, spam to up to ZT_MAX_BRIDGE_SPAM active
  183. * bridges. This is similar to what many switches do -- if they do not
  184. * know which port corresponds to a MAC, they send it to all ports. If
  185. * there aren't any active bridges, numBridges will stay 0 and packet
  186. * is dropped. */
  187. std::set<Address>::const_iterator ab(nconf->activeBridges().begin());
  188. if (nconf->activeBridges().size() <= ZT_MAX_BRIDGE_SPAM) {
  189. // If there are <= ZT_MAX_BRIDGE_SPAM active bridges, spam them all
  190. while (ab != nconf->activeBridges().end()) {
  191. if (network->isAllowed(*ab)) // config sanity check
  192. bridges[numBridges++] = *ab;
  193. ++ab;
  194. }
  195. } else {
  196. // Otherwise pick a random set of them
  197. while (numBridges < ZT_MAX_BRIDGE_SPAM) {
  198. if (ab == nconf->activeBridges().end())
  199. ab = nconf->activeBridges().begin();
  200. if (((unsigned long)RR->prng->next32() % (unsigned long)nconf->activeBridges().size()) == 0) {
  201. if (network->isAllowed(*ab)) // config sanity check
  202. bridges[numBridges++] = *ab;
  203. ++ab;
  204. } else ++ab;
  205. }
  206. }
  207. }
  208. for(unsigned int b=0;b<numBridges;++b) {
  209. Packet outp(bridges[b],RR->identity.address(),Packet::VERB_EXT_FRAME);
  210. outp.append(network->id());
  211. outp.append((unsigned char)0);
  212. to.appendTo(outp);
  213. from.appendTo(outp);
  214. outp.append((uint16_t)etherType);
  215. outp.append(data);
  216. outp.compress();
  217. send(outp,true);
  218. }
  219. }
  220. }
  221. void Switch::send(const Packet &packet,bool encrypt)
  222. {
  223. if (packet.destination() == RR->identity.address()) {
  224. TRACE("BUG: caught attempt to send() to self, ignored");
  225. return;
  226. }
  227. if (!_trySend(packet,encrypt)) {
  228. Mutex::Lock _l(_txQueue_m);
  229. _txQueue.insert(std::pair< Address,TXQueueEntry >(packet.destination(),TXQueueEntry(Utils::now(),packet,encrypt)));
  230. }
  231. }
  232. void Switch::sendHELLO(const Address &dest)
  233. {
  234. Packet outp(dest,RR->identity.address(),Packet::VERB_HELLO);
  235. outp.append((unsigned char)ZT_PROTO_VERSION);
  236. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  237. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  238. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  239. outp.append(Utils::now());
  240. RR->identity.serialize(outp,false);
  241. send(outp,false);
  242. }
  243. bool Switch::sendHELLO(const SharedPtr<Peer> &dest,const Path &path)
  244. {
  245. uint64_t now = Utils::now();
  246. Packet outp(dest->address(),RR->identity.address(),Packet::VERB_HELLO);
  247. outp.append((unsigned char)ZT_PROTO_VERSION);
  248. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  249. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  250. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  251. outp.append(now);
  252. RR->identity.serialize(outp,false);
  253. outp.armor(dest->key(),false);
  254. RR->antiRec->logOutgoingZT(outp.data(),outp.size());
  255. return RR->sm->send(path.address(),path.tcp(),path.type() == Path::PATH_TYPE_TCP_OUT,outp.data(),outp.size());
  256. }
  257. bool Switch::sendHELLO(const SharedPtr<Peer> &dest,const InetAddress &destUdp)
  258. {
  259. uint64_t now = Utils::now();
  260. Packet outp(dest->address(),RR->identity.address(),Packet::VERB_HELLO);
  261. outp.append((unsigned char)ZT_PROTO_VERSION);
  262. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  263. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  264. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  265. outp.append(now);
  266. RR->identity.serialize(outp,false);
  267. outp.armor(dest->key(),false);
  268. RR->antiRec->logOutgoingZT(outp.data(),outp.size());
  269. return RR->sm->send(destUdp,false,false,outp.data(),outp.size());
  270. }
  271. bool Switch::unite(const Address &p1,const Address &p2,bool force)
  272. {
  273. if ((p1 == RR->identity.address())||(p2 == RR->identity.address()))
  274. return false;
  275. SharedPtr<Peer> p1p = RR->topology->getPeer(p1);
  276. if (!p1p)
  277. return false;
  278. SharedPtr<Peer> p2p = RR->topology->getPeer(p2);
  279. if (!p2p)
  280. return false;
  281. uint64_t now = Utils::now();
  282. std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
  283. if (!(cg.first))
  284. return false;
  285. // Addresses are sorted in key for last unite attempt map for order
  286. // invariant lookup: (p1,p2) == (p2,p1)
  287. Array<Address,2> uniteKey;
  288. if (p1 >= p2) {
  289. uniteKey[0] = p2;
  290. uniteKey[1] = p1;
  291. } else {
  292. uniteKey[0] = p1;
  293. uniteKey[1] = p2;
  294. }
  295. {
  296. Mutex::Lock _l(_lastUniteAttempt_m);
  297. std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey));
  298. if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL))
  299. return false;
  300. else _lastUniteAttempt[uniteKey] = now;
  301. }
  302. TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str());
  303. /* Tell P1 where to find P2 and vice versa, sending the packets to P1 and
  304. * P2 in randomized order in terms of which gets sent first. This is done
  305. * since in a few cases NAT-t can be sensitive to slight timing differences
  306. * in terms of when the two peers initiate. Normally this is accounted for
  307. * by the nearly-simultaneous RENDEZVOUS kickoff from the supernode, but
  308. * given that supernodes are hosted on cloud providers this can in some
  309. * cases have a few ms of latency between packet departures. By randomizing
  310. * the order we make each attempted NAT-t favor one or the other going
  311. * first, meaning if it doesn't succeed the first time it might the second
  312. * and so forth. */
  313. unsigned int alt = RR->prng->next32() & 1;
  314. unsigned int completed = alt + 2;
  315. while (alt != completed) {
  316. if ((alt & 1) == 0) {
  317. // Tell p1 where to find p2.
  318. Packet outp(p1,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  319. outp.append((unsigned char)0);
  320. p2.appendTo(outp);
  321. outp.append((uint16_t)cg.first.port());
  322. if (cg.first.isV6()) {
  323. outp.append((unsigned char)16);
  324. outp.append(cg.first.rawIpData(),16);
  325. } else {
  326. outp.append((unsigned char)4);
  327. outp.append(cg.first.rawIpData(),4);
  328. }
  329. outp.armor(p1p->key(),true);
  330. p1p->send(RR,outp.data(),outp.size(),now);
  331. } else {
  332. // Tell p2 where to find p1.
  333. Packet outp(p2,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  334. outp.append((unsigned char)0);
  335. p1.appendTo(outp);
  336. outp.append((uint16_t)cg.second.port());
  337. if (cg.second.isV6()) {
  338. outp.append((unsigned char)16);
  339. outp.append(cg.second.rawIpData(),16);
  340. } else {
  341. outp.append((unsigned char)4);
  342. outp.append(cg.second.rawIpData(),4);
  343. }
  344. outp.armor(p2p->key(),true);
  345. p2p->send(RR,outp.data(),outp.size(),now);
  346. }
  347. ++alt; // counts up and also flips LSB
  348. }
  349. return true;
  350. }
  351. void Switch::contact(const SharedPtr<Peer> &peer,const InetAddress &atAddr)
  352. {
  353. // Send simple packet directly to indicated address -- works for most NATs
  354. sendHELLO(peer,atAddr);
  355. TRACE("sending NAT-t HELLO to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
  356. // If we have not punched through after this timeout, open refreshing can of whupass
  357. {
  358. Mutex::Lock _l(_contactQueue_m);
  359. _contactQueue.push_back(ContactQueueEntry(peer,Utils::now() + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,atAddr));
  360. }
  361. // Kick main loop out of wait so that it can pick up this
  362. // change to our scheduled timer tasks.
  363. RR->sm->whack();
  364. }
  365. void Switch::requestWhois(const Address &addr)
  366. {
  367. //TRACE("requesting WHOIS for %s",addr.toString().c_str());
  368. bool inserted = false;
  369. {
  370. Mutex::Lock _l(_outstandingWhoisRequests_m);
  371. std::pair< std::map< Address,WhoisRequest >::iterator,bool > entry(_outstandingWhoisRequests.insert(std::pair<Address,WhoisRequest>(addr,WhoisRequest())));
  372. if ((inserted = entry.second))
  373. entry.first->second.lastSent = Utils::now();
  374. entry.first->second.retries = 0; // reset retry count if entry already existed
  375. }
  376. if (inserted)
  377. _sendWhoisRequest(addr,(const Address *)0,0);
  378. }
  379. void Switch::cancelWhoisRequest(const Address &addr)
  380. {
  381. Mutex::Lock _l(_outstandingWhoisRequests_m);
  382. _outstandingWhoisRequests.erase(addr);
  383. }
  384. void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
  385. {
  386. { // cancel pending WHOIS since we now know this peer
  387. Mutex::Lock _l(_outstandingWhoisRequests_m);
  388. _outstandingWhoisRequests.erase(peer->address());
  389. }
  390. { // finish processing any packets waiting on peer's public key / identity
  391. Mutex::Lock _l(_rxQueue_m);
  392. for(std::list< SharedPtr<IncomingPacket> >::iterator rxi(_rxQueue.begin());rxi!=_rxQueue.end();) {
  393. if ((*rxi)->tryDecode(RR))
  394. _rxQueue.erase(rxi++);
  395. else ++rxi;
  396. }
  397. }
  398. { // finish sending any packets waiting on peer's public key / identity
  399. Mutex::Lock _l(_txQueue_m);
  400. std::pair< std::multimap< Address,TXQueueEntry >::iterator,std::multimap< Address,TXQueueEntry >::iterator > waitingTxQueueItems(_txQueue.equal_range(peer->address()));
  401. for(std::multimap< Address,TXQueueEntry >::iterator txi(waitingTxQueueItems.first);txi!=waitingTxQueueItems.second;) {
  402. if (_trySend(txi->second.packet,txi->second.encrypt))
  403. _txQueue.erase(txi++);
  404. else ++txi;
  405. }
  406. }
  407. }
  408. unsigned long Switch::doTimerTasks()
  409. {
  410. unsigned long nextDelay = ~((unsigned long)0); // big number, caller will cap return value
  411. uint64_t now = Utils::now();
  412. {
  413. Mutex::Lock _l(_contactQueue_m);
  414. for(std::list<ContactQueueEntry>::iterator qi(_contactQueue.begin());qi!=_contactQueue.end();) {
  415. if (now >= qi->fireAtTime) {
  416. if (!qi->peer->hasActiveDirectPath(now)) {
  417. TRACE("deploying aggressive NAT-t against %s(%s)",qi->peer->address().toString().c_str(),qi->inaddr.toString().c_str());
  418. /* Shotgun approach -- literally -- against symmetric NATs. Most of these
  419. * either increment or decrement ports so this gets a good number. Also try
  420. * the original port one more time for good measure, since sometimes it
  421. * fails first time around. */
  422. int p = (int)qi->inaddr.port() - 2;
  423. for(int k=0;k<5;++k) {
  424. if ((p > 0)&&(p <= 0xffff)) {
  425. qi->inaddr.setPort((unsigned int)p);
  426. sendHELLO(qi->peer,qi->inaddr);
  427. }
  428. ++p;
  429. }
  430. }
  431. _contactQueue.erase(qi++);
  432. } else {
  433. nextDelay = std::min(nextDelay,(unsigned long)(qi->fireAtTime - now));
  434. ++qi;
  435. }
  436. }
  437. }
  438. {
  439. Mutex::Lock _l(_outstandingWhoisRequests_m);
  440. for(std::map< Address,WhoisRequest >::iterator i(_outstandingWhoisRequests.begin());i!=_outstandingWhoisRequests.end();) {
  441. unsigned long since = (unsigned long)(now - i->second.lastSent);
  442. if (since >= ZT_WHOIS_RETRY_DELAY) {
  443. if (i->second.retries >= ZT_MAX_WHOIS_RETRIES) {
  444. TRACE("WHOIS %s timed out",i->first.toString().c_str());
  445. _outstandingWhoisRequests.erase(i++);
  446. continue;
  447. } else {
  448. i->second.lastSent = now;
  449. i->second.peersConsulted[i->second.retries] = _sendWhoisRequest(i->first,i->second.peersConsulted,i->second.retries);
  450. ++i->second.retries;
  451. TRACE("WHOIS %s (retry %u)",i->first.toString().c_str(),i->second.retries);
  452. nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY);
  453. }
  454. } else nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since);
  455. ++i;
  456. }
  457. }
  458. {
  459. Mutex::Lock _l(_txQueue_m);
  460. for(std::multimap< Address,TXQueueEntry >::iterator i(_txQueue.begin());i!=_txQueue.end();) {
  461. if (_trySend(i->second.packet,i->second.encrypt))
  462. _txQueue.erase(i++);
  463. else if ((now - i->second.creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
  464. TRACE("TX %s -> %s timed out",i->second.packet.source().toString().c_str(),i->second.packet.destination().toString().c_str());
  465. _txQueue.erase(i++);
  466. } else ++i;
  467. }
  468. }
  469. {
  470. Mutex::Lock _l(_rxQueue_m);
  471. for(std::list< SharedPtr<IncomingPacket> >::iterator i(_rxQueue.begin());i!=_rxQueue.end();) {
  472. if ((now - (*i)->receiveTime()) > ZT_RECEIVE_QUEUE_TIMEOUT) {
  473. TRACE("RX %s -> %s timed out",(*i)->source().toString().c_str(),(*i)->destination().toString().c_str());
  474. _rxQueue.erase(i++);
  475. } else ++i;
  476. }
  477. }
  478. {
  479. Mutex::Lock _l(_defragQueue_m);
  480. for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) {
  481. if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
  482. TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first);
  483. _defragQueue.erase(i++);
  484. } else ++i;
  485. }
  486. }
  487. return std::max(nextDelay,(unsigned long)10); // minimum delay
  488. }
  489. const char *Switch::etherTypeName(const unsigned int etherType)
  490. throw()
  491. {
  492. switch(etherType) {
  493. case ZT_ETHERTYPE_IPV4: return "IPV4";
  494. case ZT_ETHERTYPE_ARP: return "ARP";
  495. case ZT_ETHERTYPE_RARP: return "RARP";
  496. case ZT_ETHERTYPE_ATALK: return "ATALK";
  497. case ZT_ETHERTYPE_AARP: return "AARP";
  498. case ZT_ETHERTYPE_IPX_A: return "IPX_A";
  499. case ZT_ETHERTYPE_IPX_B: return "IPX_B";
  500. case ZT_ETHERTYPE_IPV6: return "IPV6";
  501. }
  502. return "UNKNOWN";
  503. }
  504. void Switch::_handleRemotePacketFragment(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,const Buffer<4096> &data)
  505. {
  506. Packet::Fragment fragment(data);
  507. Address destination(fragment.destination());
  508. if (destination != RR->identity.address()) {
  509. // Fragment is not for us, so try to relay it
  510. if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
  511. fragment.incrementHops();
  512. // Note: we don't bother initiating NAT-t for fragments, since heads will set that off.
  513. // It wouldn't hurt anything, just redundant and unnecessary.
  514. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  515. if ((!relayTo)||(relayTo->send(RR,fragment.data(),fragment.size(),Utils::now()) == Path::PATH_TYPE_NULL)) {
  516. // Don't know peer or no direct path -- so relay via supernode
  517. relayTo = RR->topology->getBestSupernode();
  518. if (relayTo)
  519. relayTo->send(RR,fragment.data(),fragment.size(),Utils::now());
  520. }
  521. } else {
  522. TRACE("dropped relay [fragment](%s) -> %s, max hops exceeded",fromAddr.toString().c_str(),destination.toString().c_str());
  523. }
  524. } else {
  525. // Fragment looks like ours
  526. uint64_t pid = fragment.packetId();
  527. unsigned int fno = fragment.fragmentNumber();
  528. unsigned int tf = fragment.totalFragments();
  529. if ((tf <= ZT_MAX_PACKET_FRAGMENTS)&&(fno < ZT_MAX_PACKET_FRAGMENTS)&&(fno > 0)&&(tf > 1)) {
  530. // Fragment appears basically sane. Its fragment number must be
  531. // 1 or more, since a Packet with fragmented bit set is fragment 0.
  532. // Total fragments must be more than 1, otherwise why are we
  533. // seeing a Packet::Fragment?
  534. Mutex::Lock _l(_defragQueue_m);
  535. std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
  536. if (dqe == _defragQueue.end()) {
  537. // We received a Packet::Fragment without its head, so queue it and wait
  538. DefragQueueEntry &dq = _defragQueue[pid];
  539. dq.creationTime = Utils::now();
  540. dq.frags[fno - 1] = fragment;
  541. dq.totalFragments = tf; // total fragment count is known
  542. dq.haveFragments = 1 << fno; // we have only this fragment
  543. //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
  544. } else if (!(dqe->second.haveFragments & (1 << fno))) {
  545. // We have other fragments and maybe the head, so add this one and check
  546. dqe->second.frags[fno - 1] = fragment;
  547. dqe->second.totalFragments = tf;
  548. //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
  549. if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) {
  550. // We have all fragments -- assemble and process full Packet
  551. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  552. SharedPtr<IncomingPacket> packet(dqe->second.frag0);
  553. for(unsigned int f=1;f<tf;++f)
  554. packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
  555. _defragQueue.erase(dqe);
  556. if (!packet->tryDecode(RR)) {
  557. Mutex::Lock _l(_rxQueue_m);
  558. _rxQueue.push_back(packet);
  559. }
  560. }
  561. } // else this is a duplicate fragment, ignore
  562. }
  563. }
  564. }
  565. void Switch::_handleRemotePacketHead(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,const Buffer<4096> &data)
  566. {
  567. SharedPtr<IncomingPacket> packet(new IncomingPacket(data,fromSock,fromAddr));
  568. Address source(packet->source());
  569. Address destination(packet->destination());
  570. //TRACE("<< %.16llx %s -> %s (size: %u)",(unsigned long long)packet->packetId(),source.toString().c_str(),destination.toString().c_str(),packet->size());
  571. if (destination != RR->identity.address()) {
  572. // Packet is not for us, so try to relay it
  573. if (packet->hops() < ZT_RELAY_MAX_HOPS) {
  574. packet->incrementHops();
  575. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  576. Path::Type relayedVia;
  577. if ((relayTo)&&((relayedVia = relayTo->send(RR,packet->data(),packet->size(),Utils::now())) != Path::PATH_TYPE_NULL)) {
  578. /* If both paths are UDP, attempt to invoke UDP NAT-t between peers
  579. * by sending VERB_RENDEZVOUS. Do not do this for TCP due to GitHub
  580. * issue #63. */
  581. if ((fromSock->udp())&&(relayedVia == Path::PATH_TYPE_UDP))
  582. unite(source,destination,false);
  583. } else {
  584. // Don't know peer or no direct path -- so relay via supernode
  585. relayTo = RR->topology->getBestSupernode(&source,1,true);
  586. if (relayTo)
  587. relayTo->send(RR,packet->data(),packet->size(),Utils::now());
  588. }
  589. } else {
  590. TRACE("dropped relay %s(%s) -> %s, max hops exceeded",packet->source().toString().c_str(),fromAddr.toString().c_str(),destination.toString().c_str());
  591. }
  592. } else if (packet->fragmented()) {
  593. // Packet is the head of a fragmented packet series
  594. uint64_t pid = packet->packetId();
  595. Mutex::Lock _l(_defragQueue_m);
  596. std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
  597. if (dqe == _defragQueue.end()) {
  598. // If we have no other fragments yet, create an entry and save the head
  599. DefragQueueEntry &dq = _defragQueue[pid];
  600. dq.creationTime = Utils::now();
  601. dq.frag0 = packet;
  602. dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment
  603. dq.haveFragments = 1; // head is first bit (left to right)
  604. //TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str());
  605. } else if (!(dqe->second.haveFragments & 1)) {
  606. // If we have other fragments but no head, see if we are complete with the head
  607. if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) {
  608. // We have all fragments -- assemble and process full Packet
  609. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  610. // packet already contains head, so append fragments
  611. for(unsigned int f=1;f<dqe->second.totalFragments;++f)
  612. packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
  613. _defragQueue.erase(dqe);
  614. if (!packet->tryDecode(RR)) {
  615. Mutex::Lock _l(_rxQueue_m);
  616. _rxQueue.push_back(packet);
  617. }
  618. } else {
  619. // Still waiting on more fragments, so queue the head
  620. dqe->second.frag0 = packet;
  621. }
  622. } // else this is a duplicate head, ignore
  623. } else {
  624. // Packet is unfragmented, so just process it
  625. if (!packet->tryDecode(RR)) {
  626. Mutex::Lock _l(_rxQueue_m);
  627. _rxQueue.push_back(packet);
  628. }
  629. }
  630. }
  631. void Switch::_handleBeacon(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,const Buffer<4096> &data)
  632. {
  633. Address beaconAddr(data.field(ZT_PROTO_BEACON_IDX_ADDRESS,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
  634. if (beaconAddr == RR->identity.address())
  635. return;
  636. SharedPtr<Peer> peer(RR->topology->getPeer(beaconAddr));
  637. if (peer) {
  638. uint64_t now = Utils::now();
  639. if (peer->haveUdpPath(fromAddr)) {
  640. if ((now - peer->lastDirectReceive()) >= ZT_PEER_DIRECT_PING_DELAY)
  641. peer->sendPing(RR,now);
  642. } else {
  643. if ((now - _lastBeacon) < ZT_MIN_BEACON_RESPONSE_INTERVAL)
  644. return;
  645. _lastBeacon = now;
  646. sendHELLO(peer,fromAddr);
  647. }
  648. }
  649. }
  650. Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
  651. {
  652. SharedPtr<Peer> supernode(RR->topology->getBestSupernode(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
  653. if (supernode) {
  654. Packet outp(supernode->address(),RR->identity.address(),Packet::VERB_WHOIS);
  655. addr.appendTo(outp);
  656. outp.armor(supernode->key(),true);
  657. uint64_t now = Utils::now();
  658. if (supernode->send(RR,outp.data(),outp.size(),now) != Path::PATH_TYPE_NULL)
  659. return supernode->address();
  660. }
  661. return Address();
  662. }
  663. bool Switch::_trySend(const Packet &packet,bool encrypt)
  664. {
  665. SharedPtr<Peer> peer(RR->topology->getPeer(packet.destination()));
  666. if (peer) {
  667. uint64_t now = Utils::now();
  668. SharedPtr<Peer> via;
  669. if (peer->hasActiveDirectPath(now)) {
  670. via = peer;
  671. } else {
  672. via = RR->topology->getBestSupernode();
  673. if (!via)
  674. return false;
  675. }
  676. Packet tmp(packet);
  677. unsigned int chunkSize = std::min(tmp.size(),(unsigned int)ZT_UDP_DEFAULT_PAYLOAD_MTU);
  678. tmp.setFragmented(chunkSize < tmp.size());
  679. tmp.armor(peer->key(),encrypt);
  680. if (via->send(RR,tmp.data(),chunkSize,now) != Path::PATH_TYPE_NULL) {
  681. if (chunkSize < tmp.size()) {
  682. // Too big for one bite, fragment the rest
  683. unsigned int fragStart = chunkSize;
  684. unsigned int remaining = tmp.size() - chunkSize;
  685. unsigned int fragsRemaining = (remaining / (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  686. if ((fragsRemaining * (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH)) < remaining)
  687. ++fragsRemaining;
  688. unsigned int totalFragments = fragsRemaining + 1;
  689. for(unsigned int f=0;f<fragsRemaining;++f) {
  690. chunkSize = std::min(remaining,(unsigned int)(ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  691. Packet::Fragment frag(tmp,fragStart,chunkSize,f + 1,totalFragments);
  692. via->send(RR,frag.data(),frag.size(),now);
  693. fragStart += chunkSize;
  694. remaining -= chunkSize;
  695. }
  696. }
  697. return true;
  698. }
  699. } else requestWhois(packet.destination());
  700. return false;
  701. }
  702. } // namespace ZeroTier