Switch.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /*
  2. * ZeroTier One - Global Peer to Peer Ethernet
  3. * Copyright (C) 2011-2014 ZeroTier Networks LLC
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <algorithm>
  30. #include <utility>
  31. #include <stdexcept>
  32. #include "Constants.hpp"
  33. #ifdef __WINDOWS__
  34. #include <WinSock2.h>
  35. #include <Windows.h>
  36. #endif
  37. #include "Switch.hpp"
  38. #include "Node.hpp"
  39. #include "EthernetTap.hpp"
  40. #include "InetAddress.hpp"
  41. #include "Topology.hpp"
  42. #include "RuntimeEnvironment.hpp"
  43. #include "Peer.hpp"
  44. #include "NodeConfig.hpp"
  45. #include "CMWC4096.hpp"
  46. #include "AntiRecursion.hpp"
  47. #include "../version.h"
  48. namespace ZeroTier {
  49. Switch::Switch(const RuntimeEnvironment *renv) :
  50. RR(renv),
  51. _lastBeacon(0)
  52. {
  53. }
  54. Switch::~Switch()
  55. {
  56. }
  57. void Switch::onRemotePacket(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,Buffer<ZT_SOCKET_MAX_MESSAGE_LEN> &data)
  58. {
  59. try {
  60. if (data.size() == ZT_PROTO_BEACON_LENGTH) {
  61. _handleBeacon(fromSock,fromAddr,data);
  62. } else if (data.size() > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
  63. if (data[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR)
  64. _handleRemotePacketFragment(fromSock,fromAddr,data);
  65. else if (data.size() >= ZT_PROTO_MIN_PACKET_LENGTH)
  66. _handleRemotePacketHead(fromSock,fromAddr,data);
  67. }
  68. } catch (std::exception &ex) {
  69. TRACE("dropped packet from %s: unexpected exception: %s",fromAddr.toString().c_str(),ex.what());
  70. } catch ( ... ) {
  71. TRACE("dropped packet from %s: unexpected exception: (unknown)",fromAddr.toString().c_str());
  72. }
  73. }
  74. void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,const Buffer<4096> &data)
  75. {
  76. SharedPtr<NetworkConfig> nconf(network->config2());
  77. if (!nconf)
  78. return;
  79. // Sanity check -- bridge loop? OS problem?
  80. if (to == network->mac())
  81. return;
  82. /* Check anti-recursion module to ensure that this is not ZeroTier talking over its own links.
  83. * Note: even when we introduce a more purposeful binding of the main UDP port, this can
  84. * still happen because Windows likes to send broadcasts over interfaces that have little
  85. * to do with their intended target audience. :P */
  86. if (!RR->antiRec->checkEthernetFrame(data.data(),data.size())) {
  87. TRACE("%s: rejected recursively addressed ZeroTier packet by tail match (type %s, length: %u)",network->tapDeviceName().c_str(),etherTypeName(etherType),data.size());
  88. return;
  89. }
  90. // Check to make sure this protocol is allowed on this network
  91. if (!nconf->permitsEtherType(etherType)) {
  92. TRACE("%s: ignored tap: %s -> %s: ethertype %s not allowed on network %.16llx",network->tapDeviceName().c_str(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),(unsigned long long)network->id());
  93. return;
  94. }
  95. // Check if this packet is from someone other than the tap -- i.e. bridged in
  96. bool fromBridged = false;
  97. if (from != network->mac()) {
  98. if (!network->permitsBridging(RR->identity.address())) {
  99. LOG("%s: %s -> %s %s not forwarded, bridging disabled on %.16llx or this peer not a bridge",network->tapDeviceName().c_str(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),network->id());
  100. return;
  101. }
  102. fromBridged = true;
  103. }
  104. if (to.isMulticast()) {
  105. // Destination is a multicast address (including broadcast)
  106. uint64_t now = Utils::now();
  107. MulticastGroup mg(to,0);
  108. if (to.isBroadcast()) {
  109. if ((etherType == ZT_ETHERTYPE_ARP)&&(data.size() >= 28)&&(data[2] == 0x08)&&(data[3] == 0x00)&&(data[4] == 6)&&(data[5] == 4)&&(data[7] == 0x01)) {
  110. // Cram IPv4 IP into ADI field to make IPv4 ARP broadcast channel specific and scalable
  111. // Also: enableBroadcast() does not apply to ARP since it's required for IPv4
  112. mg = MulticastGroup::deriveMulticastGroupForAddressResolution(InetAddress(data.field(24,4),4,0));
  113. } else if (!nconf->enableBroadcast()) {
  114. // Don't transmit broadcasts if this network doesn't want them
  115. TRACE("%s: dropped broadcast since ff:ff:ff:ff:ff:ff is not enabled on network %.16llx",network->tapDeviceName().c_str(),network->id());
  116. return;
  117. }
  118. }
  119. /* Learn multicast groups for bridged-in hosts.
  120. * Note that some OSes, most notably Linux, do this for you by learning
  121. * multicast addresses on bridge interfaces and subscribing each slave.
  122. * But in that case this does no harm, as the sets are just merged. */
  123. if (fromBridged)
  124. network->learnBridgedMulticastGroup(mg,now);
  125. // Check multicast/broadcast bandwidth quotas and reject if quota exceeded
  126. if (!network->updateAndCheckMulticastBalance(mg,data.size())) {
  127. TRACE("%s: didn't multicast %d bytes, quota exceeded for multicast group %s",network->tapDeviceName().c_str(),(int)data.size(),mg.toString().c_str());
  128. return;
  129. }
  130. TRACE("%s: MULTICAST %s -> %s %s %d",network->tapDeviceName().c_str(),from.toString().c_str(),mg.toString().c_str(),etherTypeName(etherType),(int)data.size());
  131. RR->mc->send(
  132. ((!nconf->isPublic())&&(nconf->com())) ? &(nconf->com()) : (const CertificateOfMembership *)0,
  133. nconf->multicastLimit(),
  134. now,
  135. network->id(),
  136. nconf->activeBridges(),
  137. mg,
  138. (fromBridged) ? from : MAC(),
  139. etherType,
  140. data.data(),
  141. data.size());
  142. return;
  143. }
  144. if (to[0] == MAC::firstOctetForNetwork(network->id())) {
  145. // Destination is another ZeroTier peer
  146. Address toZT(to.toAddress(network->id()));
  147. if (network->isAllowed(toZT)) {
  148. if (network->peerNeedsOurMembershipCertificate(toZT,Utils::now())) {
  149. // TODO: once there are no more <1.0.0 nodes around, we can
  150. // bundle this with EXT_FRAME instead of sending two packets.
  151. Packet outp(toZT,RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
  152. nconf->com().serialize(outp);
  153. send(outp,true);
  154. }
  155. if (fromBridged) {
  156. // EXT_FRAME is used for bridging or if we want to include a COM
  157. Packet outp(toZT,RR->identity.address(),Packet::VERB_EXT_FRAME);
  158. outp.append(network->id());
  159. outp.append((unsigned char)0);
  160. to.appendTo(outp);
  161. from.appendTo(outp);
  162. outp.append((uint16_t)etherType);
  163. outp.append(data);
  164. outp.compress();
  165. send(outp,true);
  166. } else {
  167. // FRAME is a shorter version that can be used when there's no bridging and no COM
  168. Packet outp(toZT,RR->identity.address(),Packet::VERB_FRAME);
  169. outp.append(network->id());
  170. outp.append((uint16_t)etherType);
  171. outp.append(data);
  172. outp.compress();
  173. send(outp,true);
  174. }
  175. } else {
  176. TRACE("%s: UNICAST: %s -> %s %s dropped, destination not a member of closed network %.16llx",network->tapDeviceName().c_str(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),network->id());
  177. }
  178. return;
  179. }
  180. {
  181. // Destination is bridged behind a remote peer
  182. Address bridges[ZT_MAX_BRIDGE_SPAM];
  183. unsigned int numBridges = 0;
  184. bridges[0] = network->findBridgeTo(to);
  185. if ((bridges[0])&&(bridges[0] != RR->identity.address())&&(network->isAllowed(bridges[0]))&&(network->permitsBridging(bridges[0]))) {
  186. // We have a known bridge route for this MAC.
  187. ++numBridges;
  188. } else if (!nconf->activeBridges().empty()) {
  189. /* If there is no known route, spam to up to ZT_MAX_BRIDGE_SPAM active
  190. * bridges. This is similar to what many switches do -- if they do not
  191. * know which port corresponds to a MAC, they send it to all ports. If
  192. * there aren't any active bridges, numBridges will stay 0 and packet
  193. * is dropped. */
  194. std::vector<Address>::const_iterator ab(nconf->activeBridges().begin());
  195. if (nconf->activeBridges().size() <= ZT_MAX_BRIDGE_SPAM) {
  196. // If there are <= ZT_MAX_BRIDGE_SPAM active bridges, spam them all
  197. while (ab != nconf->activeBridges().end()) {
  198. if (network->isAllowed(*ab)) // config sanity check
  199. bridges[numBridges++] = *ab;
  200. ++ab;
  201. }
  202. } else {
  203. // Otherwise pick a random set of them
  204. while (numBridges < ZT_MAX_BRIDGE_SPAM) {
  205. if (ab == nconf->activeBridges().end())
  206. ab = nconf->activeBridges().begin();
  207. if (((unsigned long)RR->prng->next32() % (unsigned long)nconf->activeBridges().size()) == 0) {
  208. if (network->isAllowed(*ab)) // config sanity check
  209. bridges[numBridges++] = *ab;
  210. ++ab;
  211. } else ++ab;
  212. }
  213. }
  214. }
  215. for(unsigned int b=0;b<numBridges;++b) {
  216. Packet outp(bridges[b],RR->identity.address(),Packet::VERB_EXT_FRAME);
  217. outp.append(network->id());
  218. outp.append((unsigned char)0);
  219. to.appendTo(outp);
  220. from.appendTo(outp);
  221. outp.append((uint16_t)etherType);
  222. outp.append(data);
  223. outp.compress();
  224. send(outp,true);
  225. }
  226. }
  227. }
  228. void Switch::send(const Packet &packet,bool encrypt)
  229. {
  230. if (packet.destination() == RR->identity.address()) {
  231. TRACE("BUG: caught attempt to send() to self, ignored");
  232. return;
  233. }
  234. if (!_trySend(packet,encrypt)) {
  235. Mutex::Lock _l(_txQueue_m);
  236. _txQueue.insert(std::pair< Address,TXQueueEntry >(packet.destination(),TXQueueEntry(Utils::now(),packet,encrypt)));
  237. }
  238. }
  239. void Switch::sendHELLO(const Address &dest)
  240. {
  241. Packet outp(dest,RR->identity.address(),Packet::VERB_HELLO);
  242. outp.append((unsigned char)ZT_PROTO_VERSION);
  243. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  244. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  245. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  246. outp.append(Utils::now());
  247. RR->identity.serialize(outp,false);
  248. send(outp,false);
  249. }
  250. bool Switch::sendHELLO(const SharedPtr<Peer> &dest,const Path &path)
  251. {
  252. uint64_t now = Utils::now();
  253. Packet outp(dest->address(),RR->identity.address(),Packet::VERB_HELLO);
  254. outp.append((unsigned char)ZT_PROTO_VERSION);
  255. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  256. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  257. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  258. outp.append(now);
  259. RR->identity.serialize(outp,false);
  260. outp.armor(dest->key(),false);
  261. RR->antiRec->logOutgoingZT(outp.data(),outp.size());
  262. return RR->sm->send(path.address(),path.tcp(),path.type() == Path::PATH_TYPE_TCP_OUT,outp.data(),outp.size());
  263. }
  264. bool Switch::sendHELLO(const SharedPtr<Peer> &dest,const InetAddress &destUdp)
  265. {
  266. uint64_t now = Utils::now();
  267. Packet outp(dest->address(),RR->identity.address(),Packet::VERB_HELLO);
  268. outp.append((unsigned char)ZT_PROTO_VERSION);
  269. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  270. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  271. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  272. outp.append(now);
  273. RR->identity.serialize(outp,false);
  274. outp.armor(dest->key(),false);
  275. RR->antiRec->logOutgoingZT(outp.data(),outp.size());
  276. return RR->sm->send(destUdp,false,false,outp.data(),outp.size());
  277. }
  278. bool Switch::unite(const Address &p1,const Address &p2,bool force)
  279. {
  280. if ((p1 == RR->identity.address())||(p2 == RR->identity.address()))
  281. return false;
  282. SharedPtr<Peer> p1p = RR->topology->getPeer(p1);
  283. if (!p1p)
  284. return false;
  285. SharedPtr<Peer> p2p = RR->topology->getPeer(p2);
  286. if (!p2p)
  287. return false;
  288. uint64_t now = Utils::now();
  289. std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
  290. if (!(cg.first))
  291. return false;
  292. // Addresses are sorted in key for last unite attempt map for order
  293. // invariant lookup: (p1,p2) == (p2,p1)
  294. Array<Address,2> uniteKey;
  295. if (p1 >= p2) {
  296. uniteKey[0] = p2;
  297. uniteKey[1] = p1;
  298. } else {
  299. uniteKey[0] = p1;
  300. uniteKey[1] = p2;
  301. }
  302. {
  303. Mutex::Lock _l(_lastUniteAttempt_m);
  304. std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey));
  305. if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL))
  306. return false;
  307. else _lastUniteAttempt[uniteKey] = now;
  308. }
  309. TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str());
  310. /* Tell P1 where to find P2 and vice versa, sending the packets to P1 and
  311. * P2 in randomized order in terms of which gets sent first. This is done
  312. * since in a few cases NAT-t can be sensitive to slight timing differences
  313. * in terms of when the two peers initiate. Normally this is accounted for
  314. * by the nearly-simultaneous RENDEZVOUS kickoff from the supernode, but
  315. * given that supernodes are hosted on cloud providers this can in some
  316. * cases have a few ms of latency between packet departures. By randomizing
  317. * the order we make each attempted NAT-t favor one or the other going
  318. * first, meaning if it doesn't succeed the first time it might the second
  319. * and so forth. */
  320. unsigned int alt = RR->prng->next32() & 1;
  321. unsigned int completed = alt + 2;
  322. while (alt != completed) {
  323. if ((alt & 1) == 0) {
  324. // Tell p1 where to find p2.
  325. Packet outp(p1,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  326. outp.append((unsigned char)0);
  327. p2.appendTo(outp);
  328. outp.append((uint16_t)cg.first.port());
  329. if (cg.first.isV6()) {
  330. outp.append((unsigned char)16);
  331. outp.append(cg.first.rawIpData(),16);
  332. } else {
  333. outp.append((unsigned char)4);
  334. outp.append(cg.first.rawIpData(),4);
  335. }
  336. outp.armor(p1p->key(),true);
  337. p1p->send(RR,outp.data(),outp.size(),now);
  338. } else {
  339. // Tell p2 where to find p1.
  340. Packet outp(p2,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  341. outp.append((unsigned char)0);
  342. p1.appendTo(outp);
  343. outp.append((uint16_t)cg.second.port());
  344. if (cg.second.isV6()) {
  345. outp.append((unsigned char)16);
  346. outp.append(cg.second.rawIpData(),16);
  347. } else {
  348. outp.append((unsigned char)4);
  349. outp.append(cg.second.rawIpData(),4);
  350. }
  351. outp.armor(p2p->key(),true);
  352. p2p->send(RR,outp.data(),outp.size(),now);
  353. }
  354. ++alt; // counts up and also flips LSB
  355. }
  356. return true;
  357. }
  358. void Switch::contact(const SharedPtr<Peer> &peer,const InetAddress &atAddr)
  359. {
  360. // Send simple packet directly to indicated address -- works for most NATs
  361. sendHELLO(peer,atAddr);
  362. TRACE("sending NAT-t HELLO to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
  363. // If we have not punched through after this timeout, open refreshing can of whupass
  364. {
  365. Mutex::Lock _l(_contactQueue_m);
  366. _contactQueue.push_back(ContactQueueEntry(peer,Utils::now() + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,atAddr));
  367. }
  368. // Kick main loop out of wait so that it can pick up this
  369. // change to our scheduled timer tasks.
  370. RR->sm->whack();
  371. }
  372. void Switch::requestWhois(const Address &addr)
  373. {
  374. //TRACE("requesting WHOIS for %s",addr.toString().c_str());
  375. bool inserted = false;
  376. {
  377. Mutex::Lock _l(_outstandingWhoisRequests_m);
  378. std::pair< std::map< Address,WhoisRequest >::iterator,bool > entry(_outstandingWhoisRequests.insert(std::pair<Address,WhoisRequest>(addr,WhoisRequest())));
  379. if ((inserted = entry.second))
  380. entry.first->second.lastSent = Utils::now();
  381. entry.first->second.retries = 0; // reset retry count if entry already existed
  382. }
  383. if (inserted)
  384. _sendWhoisRequest(addr,(const Address *)0,0);
  385. }
  386. void Switch::cancelWhoisRequest(const Address &addr)
  387. {
  388. Mutex::Lock _l(_outstandingWhoisRequests_m);
  389. _outstandingWhoisRequests.erase(addr);
  390. }
  391. void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
  392. {
  393. { // cancel pending WHOIS since we now know this peer
  394. Mutex::Lock _l(_outstandingWhoisRequests_m);
  395. _outstandingWhoisRequests.erase(peer->address());
  396. }
  397. { // finish processing any packets waiting on peer's public key / identity
  398. Mutex::Lock _l(_rxQueue_m);
  399. for(std::list< SharedPtr<IncomingPacket> >::iterator rxi(_rxQueue.begin());rxi!=_rxQueue.end();) {
  400. if ((*rxi)->tryDecode(RR))
  401. _rxQueue.erase(rxi++);
  402. else ++rxi;
  403. }
  404. }
  405. { // finish sending any packets waiting on peer's public key / identity
  406. Mutex::Lock _l(_txQueue_m);
  407. std::pair< std::multimap< Address,TXQueueEntry >::iterator,std::multimap< Address,TXQueueEntry >::iterator > waitingTxQueueItems(_txQueue.equal_range(peer->address()));
  408. for(std::multimap< Address,TXQueueEntry >::iterator txi(waitingTxQueueItems.first);txi!=waitingTxQueueItems.second;) {
  409. if (_trySend(txi->second.packet,txi->second.encrypt))
  410. _txQueue.erase(txi++);
  411. else ++txi;
  412. }
  413. }
  414. }
  415. unsigned long Switch::doTimerTasks()
  416. {
  417. unsigned long nextDelay = ~((unsigned long)0); // big number, caller will cap return value
  418. uint64_t now = Utils::now();
  419. {
  420. Mutex::Lock _l(_contactQueue_m);
  421. for(std::list<ContactQueueEntry>::iterator qi(_contactQueue.begin());qi!=_contactQueue.end();) {
  422. if (now >= qi->fireAtTime) {
  423. if (!qi->peer->hasActiveDirectPath(now)) {
  424. TRACE("deploying aggressive NAT-t against %s(%s)",qi->peer->address().toString().c_str(),qi->inaddr.toString().c_str());
  425. /* Shotgun approach -- literally -- against symmetric NATs. Most of these
  426. * either increment or decrement ports so this gets a good number. Also try
  427. * the original port one more time for good measure, since sometimes it
  428. * fails first time around. */
  429. int p = (int)qi->inaddr.port() - 2;
  430. for(int k=0;k<5;++k) {
  431. if ((p > 0)&&(p <= 0xffff)) {
  432. qi->inaddr.setPort((unsigned int)p);
  433. sendHELLO(qi->peer,qi->inaddr);
  434. }
  435. ++p;
  436. }
  437. }
  438. _contactQueue.erase(qi++);
  439. } else {
  440. nextDelay = std::min(nextDelay,(unsigned long)(qi->fireAtTime - now));
  441. ++qi;
  442. }
  443. }
  444. }
  445. {
  446. Mutex::Lock _l(_outstandingWhoisRequests_m);
  447. for(std::map< Address,WhoisRequest >::iterator i(_outstandingWhoisRequests.begin());i!=_outstandingWhoisRequests.end();) {
  448. unsigned long since = (unsigned long)(now - i->second.lastSent);
  449. if (since >= ZT_WHOIS_RETRY_DELAY) {
  450. if (i->second.retries >= ZT_MAX_WHOIS_RETRIES) {
  451. TRACE("WHOIS %s timed out",i->first.toString().c_str());
  452. _outstandingWhoisRequests.erase(i++);
  453. continue;
  454. } else {
  455. i->second.lastSent = now;
  456. i->second.peersConsulted[i->second.retries] = _sendWhoisRequest(i->first,i->second.peersConsulted,i->second.retries);
  457. ++i->second.retries;
  458. TRACE("WHOIS %s (retry %u)",i->first.toString().c_str(),i->second.retries);
  459. nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY);
  460. }
  461. } else nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since);
  462. ++i;
  463. }
  464. }
  465. {
  466. Mutex::Lock _l(_txQueue_m);
  467. for(std::multimap< Address,TXQueueEntry >::iterator i(_txQueue.begin());i!=_txQueue.end();) {
  468. if (_trySend(i->second.packet,i->second.encrypt))
  469. _txQueue.erase(i++);
  470. else if ((now - i->second.creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
  471. TRACE("TX %s -> %s timed out",i->second.packet.source().toString().c_str(),i->second.packet.destination().toString().c_str());
  472. _txQueue.erase(i++);
  473. } else ++i;
  474. }
  475. }
  476. {
  477. Mutex::Lock _l(_rxQueue_m);
  478. for(std::list< SharedPtr<IncomingPacket> >::iterator i(_rxQueue.begin());i!=_rxQueue.end();) {
  479. if ((now - (*i)->receiveTime()) > ZT_RECEIVE_QUEUE_TIMEOUT) {
  480. TRACE("RX %s -> %s timed out",(*i)->source().toString().c_str(),(*i)->destination().toString().c_str());
  481. _rxQueue.erase(i++);
  482. } else ++i;
  483. }
  484. }
  485. {
  486. Mutex::Lock _l(_defragQueue_m);
  487. for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) {
  488. if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
  489. TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first);
  490. _defragQueue.erase(i++);
  491. } else ++i;
  492. }
  493. }
  494. return std::max(nextDelay,(unsigned long)10); // minimum delay
  495. }
  496. const char *Switch::etherTypeName(const unsigned int etherType)
  497. throw()
  498. {
  499. switch(etherType) {
  500. case ZT_ETHERTYPE_IPV4: return "IPV4";
  501. case ZT_ETHERTYPE_ARP: return "ARP";
  502. case ZT_ETHERTYPE_RARP: return "RARP";
  503. case ZT_ETHERTYPE_ATALK: return "ATALK";
  504. case ZT_ETHERTYPE_AARP: return "AARP";
  505. case ZT_ETHERTYPE_IPX_A: return "IPX_A";
  506. case ZT_ETHERTYPE_IPX_B: return "IPX_B";
  507. case ZT_ETHERTYPE_IPV6: return "IPV6";
  508. }
  509. return "UNKNOWN";
  510. }
  511. void Switch::_handleRemotePacketFragment(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,const Buffer<4096> &data)
  512. {
  513. Packet::Fragment fragment(data);
  514. Address destination(fragment.destination());
  515. if (destination != RR->identity.address()) {
  516. // Fragment is not for us, so try to relay it
  517. if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
  518. fragment.incrementHops();
  519. // Note: we don't bother initiating NAT-t for fragments, since heads will set that off.
  520. // It wouldn't hurt anything, just redundant and unnecessary.
  521. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  522. if ((!relayTo)||(relayTo->send(RR,fragment.data(),fragment.size(),Utils::now()) == Path::PATH_TYPE_NULL)) {
  523. // Don't know peer or no direct path -- so relay via supernode
  524. relayTo = RR->topology->getBestSupernode();
  525. if (relayTo)
  526. relayTo->send(RR,fragment.data(),fragment.size(),Utils::now());
  527. }
  528. } else {
  529. TRACE("dropped relay [fragment](%s) -> %s, max hops exceeded",fromAddr.toString().c_str(),destination.toString().c_str());
  530. }
  531. } else {
  532. // Fragment looks like ours
  533. uint64_t pid = fragment.packetId();
  534. unsigned int fno = fragment.fragmentNumber();
  535. unsigned int tf = fragment.totalFragments();
  536. if ((tf <= ZT_MAX_PACKET_FRAGMENTS)&&(fno < ZT_MAX_PACKET_FRAGMENTS)&&(fno > 0)&&(tf > 1)) {
  537. // Fragment appears basically sane. Its fragment number must be
  538. // 1 or more, since a Packet with fragmented bit set is fragment 0.
  539. // Total fragments must be more than 1, otherwise why are we
  540. // seeing a Packet::Fragment?
  541. Mutex::Lock _l(_defragQueue_m);
  542. std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
  543. if (dqe == _defragQueue.end()) {
  544. // We received a Packet::Fragment without its head, so queue it and wait
  545. DefragQueueEntry &dq = _defragQueue[pid];
  546. dq.creationTime = Utils::now();
  547. dq.frags[fno - 1] = fragment;
  548. dq.totalFragments = tf; // total fragment count is known
  549. dq.haveFragments = 1 << fno; // we have only this fragment
  550. //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
  551. } else if (!(dqe->second.haveFragments & (1 << fno))) {
  552. // We have other fragments and maybe the head, so add this one and check
  553. dqe->second.frags[fno - 1] = fragment;
  554. dqe->second.totalFragments = tf;
  555. //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
  556. if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) {
  557. // We have all fragments -- assemble and process full Packet
  558. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  559. SharedPtr<IncomingPacket> packet(dqe->second.frag0);
  560. for(unsigned int f=1;f<tf;++f)
  561. packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
  562. _defragQueue.erase(dqe);
  563. if (!packet->tryDecode(RR)) {
  564. Mutex::Lock _l(_rxQueue_m);
  565. _rxQueue.push_back(packet);
  566. }
  567. }
  568. } // else this is a duplicate fragment, ignore
  569. }
  570. }
  571. }
  572. void Switch::_handleRemotePacketHead(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,const Buffer<4096> &data)
  573. {
  574. SharedPtr<IncomingPacket> packet(new IncomingPacket(data,fromSock,fromAddr));
  575. Address source(packet->source());
  576. Address destination(packet->destination());
  577. //TRACE("<< %.16llx %s -> %s (size: %u)",(unsigned long long)packet->packetId(),source.toString().c_str(),destination.toString().c_str(),packet->size());
  578. if (destination != RR->identity.address()) {
  579. // Packet is not for us, so try to relay it
  580. if (packet->hops() < ZT_RELAY_MAX_HOPS) {
  581. packet->incrementHops();
  582. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  583. Path::Type relayedVia;
  584. if ((relayTo)&&((relayedVia = relayTo->send(RR,packet->data(),packet->size(),Utils::now())) != Path::PATH_TYPE_NULL)) {
  585. /* If both paths are UDP, attempt to invoke UDP NAT-t between peers
  586. * by sending VERB_RENDEZVOUS. Do not do this for TCP due to GitHub
  587. * issue #63. */
  588. if ((fromSock->udp())&&(relayedVia == Path::PATH_TYPE_UDP))
  589. unite(source,destination,false);
  590. } else {
  591. // Don't know peer or no direct path -- so relay via supernode
  592. relayTo = RR->topology->getBestSupernode(&source,1,true);
  593. if (relayTo)
  594. relayTo->send(RR,packet->data(),packet->size(),Utils::now());
  595. }
  596. } else {
  597. TRACE("dropped relay %s(%s) -> %s, max hops exceeded",packet->source().toString().c_str(),fromAddr.toString().c_str(),destination.toString().c_str());
  598. }
  599. } else if (packet->fragmented()) {
  600. // Packet is the head of a fragmented packet series
  601. uint64_t pid = packet->packetId();
  602. Mutex::Lock _l(_defragQueue_m);
  603. std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
  604. if (dqe == _defragQueue.end()) {
  605. // If we have no other fragments yet, create an entry and save the head
  606. DefragQueueEntry &dq = _defragQueue[pid];
  607. dq.creationTime = Utils::now();
  608. dq.frag0 = packet;
  609. dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment
  610. dq.haveFragments = 1; // head is first bit (left to right)
  611. //TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str());
  612. } else if (!(dqe->second.haveFragments & 1)) {
  613. // If we have other fragments but no head, see if we are complete with the head
  614. if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) {
  615. // We have all fragments -- assemble and process full Packet
  616. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  617. // packet already contains head, so append fragments
  618. for(unsigned int f=1;f<dqe->second.totalFragments;++f)
  619. packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
  620. _defragQueue.erase(dqe);
  621. if (!packet->tryDecode(RR)) {
  622. Mutex::Lock _l(_rxQueue_m);
  623. _rxQueue.push_back(packet);
  624. }
  625. } else {
  626. // Still waiting on more fragments, so queue the head
  627. dqe->second.frag0 = packet;
  628. }
  629. } // else this is a duplicate head, ignore
  630. } else {
  631. // Packet is unfragmented, so just process it
  632. if (!packet->tryDecode(RR)) {
  633. Mutex::Lock _l(_rxQueue_m);
  634. _rxQueue.push_back(packet);
  635. }
  636. }
  637. }
  638. void Switch::_handleBeacon(const SharedPtr<Socket> &fromSock,const InetAddress &fromAddr,const Buffer<4096> &data)
  639. {
  640. Address beaconAddr(data.field(ZT_PROTO_BEACON_IDX_ADDRESS,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
  641. if (beaconAddr == RR->identity.address())
  642. return;
  643. SharedPtr<Peer> peer(RR->topology->getPeer(beaconAddr));
  644. if (peer) {
  645. uint64_t now = Utils::now();
  646. if (peer->haveUdpPath(fromAddr)) {
  647. if ((now - peer->lastDirectReceive()) >= ZT_PEER_DIRECT_PING_DELAY)
  648. peer->sendPing(RR,now);
  649. } else {
  650. if ((now - _lastBeacon) < ZT_MIN_BEACON_RESPONSE_INTERVAL)
  651. return;
  652. _lastBeacon = now;
  653. sendHELLO(peer,fromAddr);
  654. }
  655. }
  656. }
  657. Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
  658. {
  659. SharedPtr<Peer> supernode(RR->topology->getBestSupernode(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
  660. if (supernode) {
  661. Packet outp(supernode->address(),RR->identity.address(),Packet::VERB_WHOIS);
  662. addr.appendTo(outp);
  663. outp.armor(supernode->key(),true);
  664. uint64_t now = Utils::now();
  665. if (supernode->send(RR,outp.data(),outp.size(),now) != Path::PATH_TYPE_NULL)
  666. return supernode->address();
  667. }
  668. return Address();
  669. }
  670. bool Switch::_trySend(const Packet &packet,bool encrypt)
  671. {
  672. SharedPtr<Peer> peer(RR->topology->getPeer(packet.destination()));
  673. if (peer) {
  674. uint64_t now = Utils::now();
  675. SharedPtr<Peer> via;
  676. if (peer->hasActiveDirectPath(now)) {
  677. via = peer;
  678. } else {
  679. via = RR->topology->getBestSupernode();
  680. if (!via)
  681. return false;
  682. }
  683. Packet tmp(packet);
  684. unsigned int chunkSize = std::min(tmp.size(),(unsigned int)ZT_UDP_DEFAULT_PAYLOAD_MTU);
  685. tmp.setFragmented(chunkSize < tmp.size());
  686. tmp.armor(peer->key(),encrypt);
  687. if (via->send(RR,tmp.data(),chunkSize,now) != Path::PATH_TYPE_NULL) {
  688. if (chunkSize < tmp.size()) {
  689. // Too big for one bite, fragment the rest
  690. unsigned int fragStart = chunkSize;
  691. unsigned int remaining = tmp.size() - chunkSize;
  692. unsigned int fragsRemaining = (remaining / (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  693. if ((fragsRemaining * (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH)) < remaining)
  694. ++fragsRemaining;
  695. unsigned int totalFragments = fragsRemaining + 1;
  696. for(unsigned int fno=1;fno<totalFragments;++fno) {
  697. chunkSize = std::min(remaining,(unsigned int)(ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  698. Packet::Fragment frag(tmp,fragStart,chunkSize,fno,totalFragments);
  699. via->send(RR,frag.data(),frag.size(),now);
  700. fragStart += chunkSize;
  701. remaining -= chunkSize;
  702. }
  703. }
  704. return true;
  705. }
  706. } else {
  707. requestWhois(packet.destination());
  708. }
  709. return false;
  710. }
  711. } // namespace ZeroTier