Switch.cpp 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2026-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include "Switch.hpp"
  14. #include "../include/ZeroTierOne.h"
  15. #include "Constants.hpp"
  16. #include "InetAddress.hpp"
  17. #include "Metrics.hpp"
  18. #include "Node.hpp"
  19. #include "Packet.hpp"
  20. #include "Peer.hpp"
  21. #include "RuntimeEnvironment.hpp"
  22. #include "SelfAwareness.hpp"
  23. #include "Topology.hpp"
  24. #include "Trace.hpp"
  25. #include <algorithm>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. namespace ZeroTier {
  29. Switch::Switch(const RuntimeEnvironment* renv) : RR(renv), _lastBeaconResponse(0), _lastCheckedQueues(0), _lastUniteAttempt(8)
  30. {
  31. }
  32. // Returns true if packet appears valid; pos and proto will be set
  33. static bool _ipv6GetPayload(const uint8_t* frameData, unsigned int frameLen, unsigned int& pos, unsigned int& proto)
  34. {
  35. if (frameLen < 40) {
  36. return false;
  37. }
  38. pos = 40;
  39. proto = frameData[6];
  40. while (pos <= frameLen) {
  41. switch (proto) {
  42. case 0: // hop-by-hop options
  43. case 43: // routing
  44. case 60: // destination options
  45. case 135: // mobility options
  46. if ((pos + 8) > frameLen) {
  47. return false; // invalid!
  48. }
  49. proto = frameData[pos];
  50. pos += ((unsigned int)frameData[pos + 1] * 8) + 8;
  51. break;
  52. // case 44: // fragment -- we currently can't parse these and they are deprecated in IPv6 anyway
  53. // case 50:
  54. // case 51: // IPSec ESP and AH -- we have to stop here since this is encrypted stuff
  55. default:
  56. return true;
  57. }
  58. }
  59. return false; // overflow == invalid
  60. }
  61. void Switch::onRemotePacket(void* tPtr, const int64_t localSocket, const InetAddress& fromAddr, const void* data, unsigned int len)
  62. {
  63. int32_t flowId = ZT_QOS_NO_FLOW;
  64. try {
  65. const int64_t now = RR->node->now();
  66. const SharedPtr<Path> path(RR->topology->getPath(localSocket, fromAddr));
  67. path->received(now);
  68. if (len > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
  69. if (reinterpret_cast<const uint8_t*>(data)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
  70. // Handle fragment ----------------------------------------------------
  71. Packet::Fragment fragment(data, len);
  72. const Address destination(fragment.destination());
  73. if (destination != RR->identity.address()) {
  74. // RELAY: fragment is for a different node, so maybe send it there if we should relay.
  75. /*
  76. if ((! RR->topology->amUpstream()) && (! path->trustEstablished(now))) {
  77. return;
  78. }
  79. */
  80. if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
  81. fragment.incrementHops();
  82. // Note: we don't bother initiating NAT-t for fragments, since heads will set that off.
  83. // It wouldn't hurt anything, just redundant and unnecessary.
  84. SharedPtr<Peer> relayTo = RR->topology->getPeer(tPtr, destination);
  85. if ((! relayTo) || (! relayTo->sendDirect(tPtr, fragment.data(), fragment.size(), now, false))) {
  86. // Don't know peer or no direct path -- so relay via someone upstream
  87. relayTo = RR->topology->getUpstreamPeer(0);
  88. if (relayTo) {
  89. relayTo->sendDirect(tPtr, fragment.data(), fragment.size(), now, true);
  90. }
  91. }
  92. }
  93. }
  94. else {
  95. // RECEIVE: fragment appears to be ours (this is validated in cryptographic auth after assembly)
  96. const uint64_t fragmentPacketId = fragment.packetId();
  97. const unsigned int fragmentNumber = fragment.fragmentNumber();
  98. const unsigned int totalFragments = fragment.totalFragments();
  99. if ((totalFragments <= ZT_MAX_PACKET_FRAGMENTS) && (fragmentNumber < ZT_MAX_PACKET_FRAGMENTS) && (fragmentNumber > 0) && (totalFragments > 1)) {
  100. // Fragment appears basically sane. Its fragment number must be
  101. // 1 or more, since a Packet with fragmented bit set is fragment 0.
  102. // Total fragments must be more than 1, otherwise why are we
  103. // seeing a Packet::Fragment?
  104. RXQueueEntry* const rq = _findRXQueueEntry(fragmentPacketId);
  105. Mutex::Lock rql(rq->lock);
  106. if (rq->packetId != fragmentPacketId) {
  107. // No packet found, so we received a fragment without its head.
  108. rq->flowId = flowId;
  109. rq->timestamp = now;
  110. rq->packetId = fragmentPacketId;
  111. rq->frags[fragmentNumber - 1] = fragment;
  112. rq->totalFragments = totalFragments; // total fragment count is known
  113. rq->haveFragments = 1 << fragmentNumber; // we have only this fragment
  114. rq->complete = false;
  115. }
  116. else if (! (rq->haveFragments & (1 << fragmentNumber))) {
  117. // We have other fragments and maybe the head, so add this one and check
  118. rq->frags[fragmentNumber - 1] = fragment;
  119. rq->totalFragments = totalFragments;
  120. if (Utils::countBits(rq->haveFragments |= (1 << fragmentNumber)) == totalFragments) {
  121. // We have all fragments -- assemble and process full Packet
  122. for (unsigned int f = 1; f < totalFragments; ++f) {
  123. rq->frag0.append(rq->frags[f - 1].payload(), rq->frags[f - 1].payloadLength());
  124. }
  125. if (rq->frag0.tryDecode(RR, tPtr, flowId)) {
  126. rq->timestamp = 0; // packet decoded, free entry
  127. }
  128. else {
  129. rq->complete = true; // set complete flag but leave entry since it probably needs WHOIS or something
  130. }
  131. }
  132. } // else this is a duplicate fragment, ignore
  133. }
  134. }
  135. // --------------------------------------------------------------------
  136. }
  137. else if (len >= ZT_PROTO_MIN_PACKET_LENGTH) { // min length check is important!
  138. // Handle packet head -------------------------------------------------
  139. const Address destination(reinterpret_cast<const uint8_t*>(data) + 8, ZT_ADDRESS_LENGTH);
  140. const Address source(reinterpret_cast<const uint8_t*>(data) + 13, ZT_ADDRESS_LENGTH);
  141. if (source == RR->identity.address()) {
  142. return;
  143. }
  144. if (destination != RR->identity.address()) {
  145. // RELAY: packet head is for a different node, so maybe send it there if we should relay.
  146. if (/* (! RR->topology->amUpstream()) && (! path->trustEstablished(now)) && */ (source != RR->identity.address())) {
  147. return;
  148. }
  149. Packet packet(data, len);
  150. if (packet.hops() < ZT_RELAY_MAX_HOPS) {
  151. packet.incrementHops();
  152. SharedPtr<Peer> relayTo = RR->topology->getPeer(tPtr, destination);
  153. if ((relayTo) && (relayTo->sendDirect(tPtr, packet.data(), packet.size(), now, false))) {
  154. if ((source != RR->identity.address()) && (_shouldUnite(now, source, destination))) {
  155. const SharedPtr<Peer> sourcePeer(RR->topology->getPeer(tPtr, source));
  156. if (sourcePeer) {
  157. relayTo->introduce(tPtr, now, sourcePeer);
  158. }
  159. }
  160. }
  161. else {
  162. relayTo = RR->topology->getUpstreamPeer(0);
  163. if ((relayTo) && (relayTo->address() != source)) {
  164. if (relayTo->sendDirect(tPtr, packet.data(), packet.size(), now, true)) {
  165. const SharedPtr<Peer> sourcePeer(RR->topology->getPeer(tPtr, source));
  166. if (sourcePeer) {
  167. relayTo->introduce(tPtr, now, sourcePeer);
  168. }
  169. }
  170. }
  171. }
  172. }
  173. }
  174. else if ((reinterpret_cast<const uint8_t*>(data)[ZT_PACKET_IDX_FLAGS] & ZT_PROTO_FLAG_FRAGMENTED) != 0) {
  175. // RECEIVE: packet head appears to be ours (this is validated in cryptographic auth after assembly)
  176. const uint64_t packetId =
  177. ((((uint64_t)reinterpret_cast<const uint8_t*>(data)[0]) << 56) | (((uint64_t)reinterpret_cast<const uint8_t*>(data)[1]) << 48) | (((uint64_t)reinterpret_cast<const uint8_t*>(data)[2]) << 40)
  178. | (((uint64_t)reinterpret_cast<const uint8_t*>(data)[3]) << 32) | (((uint64_t)reinterpret_cast<const uint8_t*>(data)[4]) << 24) | (((uint64_t)reinterpret_cast<const uint8_t*>(data)[5]) << 16)
  179. | (((uint64_t)reinterpret_cast<const uint8_t*>(data)[6]) << 8) | ((uint64_t)reinterpret_cast<const uint8_t*>(data)[7]));
  180. RXQueueEntry* const rq = _findRXQueueEntry(packetId);
  181. Mutex::Lock rql(rq->lock);
  182. if (rq->packetId != packetId) {
  183. // If we have no other fragments yet, create an entry and save the head
  184. rq->flowId = flowId;
  185. rq->timestamp = now;
  186. rq->packetId = packetId;
  187. rq->frag0.init(data, len, path, now);
  188. rq->totalFragments = 0;
  189. rq->haveFragments = 1;
  190. rq->complete = false;
  191. }
  192. else if (! (rq->haveFragments & 1)) {
  193. // If we have other fragments but no head, see if we are complete with the head
  194. if ((rq->totalFragments > 1) && (Utils::countBits(rq->haveFragments |= 1) == rq->totalFragments)) {
  195. // We have all fragments -- assemble and process full Packet
  196. rq->frag0.init(data, len, path, now);
  197. for (unsigned int f = 1; f < rq->totalFragments; ++f) {
  198. rq->frag0.append(rq->frags[f - 1].payload(), rq->frags[f - 1].payloadLength());
  199. }
  200. if (rq->frag0.tryDecode(RR, tPtr, flowId)) {
  201. rq->timestamp = 0; // packet decoded, free entry
  202. }
  203. else {
  204. rq->complete = true; // set complete flag but leave entry since it probably needs WHOIS or something
  205. }
  206. }
  207. else {
  208. // Still waiting on more fragments, but keep the head
  209. rq->frag0.init(data, len, path, now);
  210. }
  211. } // else this is a duplicate head, ignore
  212. }
  213. else {
  214. // RECEIVE: unfragmented packet appears to be ours (this is validated in cryptographic auth after assembly)
  215. IncomingPacket packet(data, len, path, now);
  216. if (! packet.tryDecode(RR, tPtr, flowId)) {
  217. RXQueueEntry* const rq = _nextRXQueueEntry();
  218. Mutex::Lock rql(rq->lock);
  219. rq->flowId = flowId;
  220. rq->timestamp = now;
  221. rq->packetId = packet.packetId();
  222. rq->frag0 = packet;
  223. rq->totalFragments = 1;
  224. rq->haveFragments = 1;
  225. rq->complete = true;
  226. }
  227. }
  228. // --------------------------------------------------------------------
  229. }
  230. }
  231. }
  232. catch (...) {
  233. } // sanity check, should be caught elsewhere
  234. }
  235. void Switch::onLocalEthernet(void* tPtr, const SharedPtr<Network>& network, const MAC& from, const MAC& to, unsigned int etherType, unsigned int vlanId, const void* data, unsigned int len)
  236. {
  237. if (! network->hasConfig()) {
  238. return;
  239. }
  240. // Check if this packet is from someone other than the tap -- i.e. bridged in
  241. bool fromBridged;
  242. if ((fromBridged = (from != network->mac()))) {
  243. if (! network->config().permitsBridging(RR->identity.address())) {
  244. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "not a bridge");
  245. return;
  246. }
  247. }
  248. uint8_t qosBucket = ZT_AQM_DEFAULT_BUCKET;
  249. /**
  250. * A pseudo-unique identifier used by balancing and bonding policies to
  251. * categorize individual flows/conversations for assignment to a specific
  252. * physical path. This identifier consists of the source port and
  253. * destination port of the encapsulated frame.
  254. *
  255. * A flowId of -1 will indicate that there is no preference for how this
  256. * packet shall be sent. An example of this would be an ICMP packet.
  257. */
  258. int32_t flowId = ZT_QOS_NO_FLOW;
  259. if (etherType == ZT_ETHERTYPE_IPV4 && (len >= 20)) {
  260. uint16_t srcPort = 0;
  261. uint16_t dstPort = 0;
  262. uint8_t proto = (reinterpret_cast<const uint8_t*>(data)[9]);
  263. const unsigned int headerLen = 4 * (reinterpret_cast<const uint8_t*>(data)[0] & 0xf);
  264. switch (proto) {
  265. case 0x01: // ICMP
  266. // flowId = 0x01;
  267. break;
  268. // All these start with 16-bit source and destination port in that order
  269. case 0x06: // TCP
  270. case 0x11: // UDP
  271. case 0x84: // SCTP
  272. case 0x88: // UDPLite
  273. if (len > (headerLen + 4)) {
  274. unsigned int pos = headerLen + 0;
  275. srcPort = (reinterpret_cast<const uint8_t*>(data)[pos++]) << 8;
  276. srcPort |= (reinterpret_cast<const uint8_t*>(data)[pos]);
  277. pos++;
  278. dstPort = (reinterpret_cast<const uint8_t*>(data)[pos++]) << 8;
  279. dstPort |= (reinterpret_cast<const uint8_t*>(data)[pos]);
  280. flowId = dstPort ^ srcPort ^ proto;
  281. }
  282. break;
  283. }
  284. }
  285. if (etherType == ZT_ETHERTYPE_IPV6 && (len >= 40)) {
  286. uint16_t srcPort = 0;
  287. uint16_t dstPort = 0;
  288. unsigned int pos;
  289. unsigned int proto;
  290. _ipv6GetPayload((const uint8_t*)data, len, pos, proto);
  291. switch (proto) {
  292. case 0x3A: // ICMPv6
  293. // flowId = 0x3A;
  294. break;
  295. // All these start with 16-bit source and destination port in that order
  296. case 0x06: // TCP
  297. case 0x11: // UDP
  298. case 0x84: // SCTP
  299. case 0x88: // UDPLite
  300. if (len > (pos + 4)) {
  301. srcPort = (reinterpret_cast<const uint8_t*>(data)[pos++]) << 8;
  302. srcPort |= (reinterpret_cast<const uint8_t*>(data)[pos]);
  303. pos++;
  304. dstPort = (reinterpret_cast<const uint8_t*>(data)[pos++]) << 8;
  305. dstPort |= (reinterpret_cast<const uint8_t*>(data)[pos]);
  306. flowId = dstPort ^ srcPort ^ proto;
  307. }
  308. break;
  309. default:
  310. break;
  311. }
  312. }
  313. if (to.isMulticast()) {
  314. MulticastGroup multicastGroup(to, 0);
  315. if (to.isBroadcast()) {
  316. if ((etherType == ZT_ETHERTYPE_ARP) && (len >= 28)
  317. && ((((const uint8_t*)data)[2] == 0x08) && (((const uint8_t*)data)[3] == 0x00) && (((const uint8_t*)data)[4] == 6) && (((const uint8_t*)data)[5] == 4) && (((const uint8_t*)data)[7] == 0x01))) {
  318. /* IPv4 ARP is one of the few special cases that we impose upon what is
  319. * otherwise a straightforward Ethernet switch emulation. Vanilla ARP
  320. * is dumb old broadcast and simply doesn't scale. ZeroTier multicast
  321. * groups have an additional field called ADI (additional distinguishing
  322. * information) which was added specifically for ARP though it could
  323. * be used for other things too. We then take ARP broadcasts and turn
  324. * them into multicasts by stuffing the IP address being queried into
  325. * the 32-bit ADI field. In practice this uses our multicast pub/sub
  326. * system to implement a kind of extended/distributed ARP table. */
  327. multicastGroup = MulticastGroup::deriveMulticastGroupForAddressResolution(InetAddress(((const unsigned char*)data) + 24, 4, 0));
  328. }
  329. else if (! network->config().enableBroadcast()) {
  330. // Don't transmit broadcasts if this network doesn't want them
  331. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "broadcast disabled");
  332. return;
  333. }
  334. }
  335. else if ((etherType == ZT_ETHERTYPE_IPV6) && (len >= (40 + 8 + 16))) {
  336. // IPv6 NDP emulation for certain very special patterns of private IPv6 addresses -- if enabled
  337. if ((network->config().ndpEmulation()) && (reinterpret_cast<const uint8_t*>(data)[6] == 0x3a) && (reinterpret_cast<const uint8_t*>(data)[40] == 0x87)) { // ICMPv6 neighbor solicitation
  338. Address v6EmbeddedAddress;
  339. const uint8_t* const pkt6 = reinterpret_cast<const uint8_t*>(data) + 40 + 8;
  340. const uint8_t* my6 = (const uint8_t*)0;
  341. // ZT-RFC4193 address: fdNN:NNNN:NNNN:NNNN:NN99:93DD:DDDD:DDDD / 88 (one /128 per actual host)
  342. // ZT-6PLANE address: fcXX:XXXX:XXDD:DDDD:DDDD:####:####:#### / 40 (one /80 per actual host)
  343. // (XX - lower 32 bits of network ID XORed with higher 32 bits)
  344. // For these to work, we must have a ZT-managed address assigned in one of the
  345. // above formats, and the query must match its prefix.
  346. for (unsigned int sipk = 0; sipk < network->config().staticIpCount; ++sipk) {
  347. const InetAddress* const sip = &(network->config().staticIps[sipk]);
  348. if (sip->ss_family == AF_INET6) {
  349. my6 = reinterpret_cast<const uint8_t*>(reinterpret_cast<const struct sockaddr_in6*>(&(*sip))->sin6_addr.s6_addr);
  350. const unsigned int sipNetmaskBits = Utils::ntoh((uint16_t)reinterpret_cast<const struct sockaddr_in6*>(&(*sip))->sin6_port);
  351. if ((sipNetmaskBits == 88) && (my6[0] == 0xfd) && (my6[9] == 0x99) && (my6[10] == 0x93)) { // ZT-RFC4193 /88 ???
  352. unsigned int ptr = 0;
  353. while (ptr != 11) {
  354. if (pkt6[ptr] != my6[ptr]) {
  355. break;
  356. }
  357. ++ptr;
  358. }
  359. if (ptr == 11) { // prefix match!
  360. v6EmbeddedAddress.setTo(pkt6 + ptr, 5);
  361. break;
  362. }
  363. }
  364. else if (sipNetmaskBits == 40) { // ZT-6PLANE /40 ???
  365. const uint32_t nwid32 = (uint32_t)((network->id() ^ (network->id() >> 32)) & 0xffffffff);
  366. if ((my6[0] == 0xfc) && (my6[1] == (uint8_t)((nwid32 >> 24) & 0xff)) && (my6[2] == (uint8_t)((nwid32 >> 16) & 0xff)) && (my6[3] == (uint8_t)((nwid32 >> 8) & 0xff)) && (my6[4] == (uint8_t)(nwid32 & 0xff))) {
  367. unsigned int ptr = 0;
  368. while (ptr != 5) {
  369. if (pkt6[ptr] != my6[ptr]) {
  370. break;
  371. }
  372. ++ptr;
  373. }
  374. if (ptr == 5) { // prefix match!
  375. v6EmbeddedAddress.setTo(pkt6 + ptr, 5);
  376. break;
  377. }
  378. }
  379. }
  380. }
  381. }
  382. if ((v6EmbeddedAddress) && (v6EmbeddedAddress != RR->identity.address())) {
  383. const MAC peerMac(v6EmbeddedAddress, network->id());
  384. uint8_t adv[72];
  385. adv[0] = 0x60;
  386. adv[1] = 0x00;
  387. adv[2] = 0x00;
  388. adv[3] = 0x00;
  389. adv[4] = 0x00;
  390. adv[5] = 0x20;
  391. adv[6] = 0x3a;
  392. adv[7] = 0xff;
  393. for (int i = 0; i < 16; ++i) {
  394. adv[8 + i] = pkt6[i];
  395. }
  396. for (int i = 0; i < 16; ++i) {
  397. adv[24 + i] = my6[i];
  398. }
  399. adv[40] = 0x88;
  400. adv[41] = 0x00;
  401. adv[42] = 0x00;
  402. adv[43] = 0x00; // future home of checksum
  403. adv[44] = 0x60;
  404. adv[45] = 0x00;
  405. adv[46] = 0x00;
  406. adv[47] = 0x00;
  407. for (int i = 0; i < 16; ++i) {
  408. adv[48 + i] = pkt6[i];
  409. }
  410. adv[64] = 0x02;
  411. adv[65] = 0x01;
  412. adv[66] = peerMac[0];
  413. adv[67] = peerMac[1];
  414. adv[68] = peerMac[2];
  415. adv[69] = peerMac[3];
  416. adv[70] = peerMac[4];
  417. adv[71] = peerMac[5];
  418. uint16_t pseudo_[36];
  419. uint8_t* const pseudo = reinterpret_cast<uint8_t*>(pseudo_);
  420. for (int i = 0; i < 32; ++i) {
  421. pseudo[i] = adv[8 + i];
  422. }
  423. pseudo[32] = 0x00;
  424. pseudo[33] = 0x00;
  425. pseudo[34] = 0x00;
  426. pseudo[35] = 0x20;
  427. pseudo[36] = 0x00;
  428. pseudo[37] = 0x00;
  429. pseudo[38] = 0x00;
  430. pseudo[39] = 0x3a;
  431. for (int i = 0; i < 32; ++i) {
  432. pseudo[40 + i] = adv[40 + i];
  433. }
  434. uint32_t checksum = 0;
  435. for (int i = 0; i < 36; ++i) {
  436. checksum += Utils::hton(pseudo_[i]);
  437. }
  438. while ((checksum >> 16)) {
  439. checksum = (checksum & 0xffff) + (checksum >> 16);
  440. }
  441. checksum = ~checksum;
  442. adv[42] = (checksum >> 8) & 0xff;
  443. adv[43] = checksum & 0xff;
  444. //
  445. // call on separate background thread
  446. // this prevents problems related to trying to do rx while inside of doing tx, such as acquiring same lock recursively
  447. //
  448. std::thread([=]() { RR->node->putFrame(tPtr, network->id(), network->userPtr(), peerMac, from, ZT_ETHERTYPE_IPV6, 0, adv, 72); }).detach();
  449. return; // NDP emulation done. We have forged a "fake" reply, so no need to send actual NDP query.
  450. } // else no NDP emulation
  451. } // else no NDP emulation
  452. }
  453. // Check this after NDP emulation, since that has to be allowed in exactly this case
  454. if (network->config().multicastLimit == 0) {
  455. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "multicast disabled");
  456. return;
  457. }
  458. /* Learn multicast groups for bridged-in hosts.
  459. * Note that some OSes, most notably Linux, do this for you by learning
  460. * multicast addresses on bridge interfaces and subscribing each slave.
  461. * But in that case this does no harm, as the sets are just merged. */
  462. if (fromBridged) {
  463. network->learnBridgedMulticastGroup(tPtr, multicastGroup, RR->node->now());
  464. }
  465. // First pass sets noTee to false, but noTee is set to true in OutboundMulticast to prevent duplicates.
  466. if (! network->filterOutgoingPacket(tPtr, false, RR->identity.address(), Address(), from, to, (const uint8_t*)data, len, etherType, vlanId, qosBucket)) {
  467. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "filter blocked");
  468. return;
  469. }
  470. RR->mc->send(tPtr, RR->node->now(), network, Address(), multicastGroup, (fromBridged) ? from : MAC(), etherType, data, len);
  471. }
  472. else if (to == network->mac()) {
  473. // Destination is this node, so just reinject it
  474. //
  475. // same pattern as putFrame call above
  476. //
  477. std::thread([=]() { RR->node->putFrame(tPtr, network->id(), network->userPtr(), from, to, etherType, vlanId, data, len); }).detach();
  478. }
  479. else if (to[0] == MAC::firstOctetForNetwork(network->id())) {
  480. // Destination is another ZeroTier peer on the same network
  481. Address toZT(to.toAddress(network->id())); // since in-network MACs are derived from addresses and network IDs, we can reverse this
  482. SharedPtr<Peer> toPeer(RR->topology->getPeer(tPtr, toZT));
  483. if (! network->filterOutgoingPacket(tPtr, false, RR->identity.address(), toZT, from, to, (const uint8_t*)data, len, etherType, vlanId, qosBucket)) {
  484. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "filter blocked");
  485. return;
  486. }
  487. network->pushCredentialsIfNeeded(tPtr, toZT, RR->node->now());
  488. if (! fromBridged) {
  489. Packet outp(toZT, RR->identity.address(), Packet::VERB_FRAME);
  490. outp.append(network->id());
  491. outp.append((uint16_t)etherType);
  492. outp.append(data, len);
  493. // 1.4.8: disable compression for unicast as it almost never helps
  494. // if (!network->config().disableCompression())
  495. // outp.compress();
  496. aqm_enqueue(tPtr, network, outp, true, qosBucket, network->id(), flowId);
  497. }
  498. else {
  499. Packet outp(toZT, RR->identity.address(), Packet::VERB_EXT_FRAME);
  500. outp.append(network->id());
  501. outp.append((unsigned char)0x00);
  502. to.appendTo(outp);
  503. from.appendTo(outp);
  504. outp.append((uint16_t)etherType);
  505. outp.append(data, len);
  506. // 1.4.8: disable compression for unicast as it almost never helps
  507. // if (!network->config().disableCompression())
  508. // outp.compress();
  509. aqm_enqueue(tPtr, network, outp, true, qosBucket, network->id(), flowId);
  510. }
  511. }
  512. else {
  513. // Destination is bridged behind a remote peer
  514. // We filter with a NULL destination ZeroTier address first. Filtrations
  515. // for each ZT destination are also done below. This is the same rationale
  516. // and design as for multicast.
  517. if (! network->filterOutgoingPacket(tPtr, false, RR->identity.address(), Address(), from, to, (const uint8_t*)data, len, etherType, vlanId, qosBucket)) {
  518. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "filter blocked");
  519. return;
  520. }
  521. Address bridges[ZT_MAX_BRIDGE_SPAM];
  522. unsigned int numBridges = 0;
  523. /* Create an array of up to ZT_MAX_BRIDGE_SPAM recipients for this bridged frame. */
  524. bridges[0] = network->findBridgeTo(to);
  525. std::vector<Address> activeBridges(network->config().activeBridges());
  526. if ((bridges[0]) && (bridges[0] != RR->identity.address()) && (network->config().permitsBridging(bridges[0]))) {
  527. /* We have a known bridge route for this MAC, send it there. */
  528. ++numBridges;
  529. }
  530. else if (! activeBridges.empty()) {
  531. /* If there is no known route, spam to up to ZT_MAX_BRIDGE_SPAM active
  532. * bridges. If someone responds, we'll learn the route. */
  533. std::vector<Address>::const_iterator ab(activeBridges.begin());
  534. if (activeBridges.size() <= ZT_MAX_BRIDGE_SPAM) {
  535. // If there are <= ZT_MAX_BRIDGE_SPAM active bridges, spam them all
  536. while (ab != activeBridges.end()) {
  537. bridges[numBridges++] = *ab;
  538. ++ab;
  539. }
  540. }
  541. else {
  542. // Otherwise pick a random set of them
  543. while (numBridges < ZT_MAX_BRIDGE_SPAM) {
  544. if (ab == activeBridges.end()) {
  545. ab = activeBridges.begin();
  546. }
  547. if (((unsigned long)RR->node->prng() % (unsigned long)activeBridges.size()) == 0) {
  548. bridges[numBridges++] = *ab;
  549. ++ab;
  550. }
  551. else {
  552. ++ab;
  553. }
  554. }
  555. }
  556. }
  557. for (unsigned int b = 0; b < numBridges; ++b) {
  558. if (network->filterOutgoingPacket(tPtr, true, RR->identity.address(), bridges[b], from, to, (const uint8_t*)data, len, etherType, vlanId, qosBucket)) {
  559. Packet outp(bridges[b], RR->identity.address(), Packet::VERB_EXT_FRAME);
  560. outp.append(network->id());
  561. outp.append((uint8_t)0x00);
  562. to.appendTo(outp);
  563. from.appendTo(outp);
  564. outp.append((uint16_t)etherType);
  565. outp.append(data, len);
  566. // 1.4.8: disable compression for unicast as it almost never helps
  567. // if (!network->config().disableCompression())
  568. // outp.compress();
  569. aqm_enqueue(tPtr, network, outp, true, qosBucket, network->id(), flowId);
  570. }
  571. else {
  572. RR->t->outgoingNetworkFrameDropped(tPtr, network, from, to, etherType, vlanId, len, "filter blocked (bridge replication)");
  573. }
  574. }
  575. }
  576. }
  577. void Switch::aqm_enqueue(void* tPtr, const SharedPtr<Network>& network, Packet& packet, const bool encrypt, const int qosBucket, const uint64_t nwid, const int32_t flowId)
  578. {
  579. if (! network->qosEnabled()) {
  580. send(tPtr, packet, encrypt, nwid, flowId);
  581. return;
  582. }
  583. NetworkQoSControlBlock* nqcb = _netQueueControlBlock[network->id()];
  584. if (! nqcb) {
  585. nqcb = new NetworkQoSControlBlock();
  586. _netQueueControlBlock[network->id()] = nqcb;
  587. // Initialize ZT_QOS_NUM_BUCKETS queues and place them in the INACTIVE list
  588. // These queues will be shuffled between the new/old/inactive lists by the enqueue/dequeue algorithm
  589. for (int i = 0; i < ZT_AQM_NUM_BUCKETS; i++) {
  590. nqcb->inactiveQueues.push_back(new ManagedQueue(i));
  591. }
  592. }
  593. // Don't apply QoS scheduling to ZT protocol traffic
  594. if (packet.verb() != Packet::VERB_FRAME && packet.verb() != Packet::VERB_EXT_FRAME) {
  595. send(tPtr, packet, encrypt, nwid, flowId);
  596. }
  597. _aqm_m.lock();
  598. // Enqueue packet and move queue to appropriate list
  599. const Address dest(packet.destination());
  600. TXQueueEntry* txEntry = new TXQueueEntry(dest, nwid, RR->node->now(), packet, encrypt, flowId);
  601. ManagedQueue* selectedQueue = nullptr;
  602. for (size_t i = 0; i < ZT_AQM_NUM_BUCKETS; i++) {
  603. if (i < nqcb->oldQueues.size()) { // search old queues first (I think this is best since old would imply most recent usage of the queue)
  604. if (nqcb->oldQueues[i]->id == qosBucket) {
  605. selectedQueue = nqcb->oldQueues[i];
  606. }
  607. }
  608. if (i < nqcb->newQueues.size()) { // search new queues (this would imply not often-used queues)
  609. if (nqcb->newQueues[i]->id == qosBucket) {
  610. selectedQueue = nqcb->newQueues[i];
  611. }
  612. }
  613. if (i < nqcb->inactiveQueues.size()) { // search inactive queues
  614. if (nqcb->inactiveQueues[i]->id == qosBucket) {
  615. selectedQueue = nqcb->inactiveQueues[i];
  616. // move queue to end of NEW queue list
  617. selectedQueue->byteCredit = ZT_AQM_QUANTUM;
  618. // DEBUG_INFO("moving q=%p from INACTIVE to NEW list", selectedQueue);
  619. nqcb->newQueues.push_back(selectedQueue);
  620. nqcb->inactiveQueues.erase(nqcb->inactiveQueues.begin() + i);
  621. }
  622. }
  623. }
  624. if (! selectedQueue) {
  625. _aqm_m.unlock();
  626. return;
  627. }
  628. selectedQueue->q.push_back(txEntry);
  629. selectedQueue->byteLength += txEntry->packet.payloadLength();
  630. nqcb->_currEnqueuedPackets++;
  631. // DEBUG_INFO("nq=%2lu, oq=%2lu, iq=%2lu, nqcb.size()=%3d, bucket=%2d, q=%p", nqcb->newQueues.size(), nqcb->oldQueues.size(), nqcb->inactiveQueues.size(), nqcb->_currEnqueuedPackets, qosBucket, selectedQueue);
  632. // Drop a packet if necessary
  633. ManagedQueue* selectedQueueToDropFrom = nullptr;
  634. if (nqcb->_currEnqueuedPackets > ZT_AQM_MAX_ENQUEUED_PACKETS) {
  635. // DEBUG_INFO("too many enqueued packets (%d), finding packet to drop", nqcb->_currEnqueuedPackets);
  636. int maxQueueLength = 0;
  637. for (size_t i = 0; i < ZT_AQM_NUM_BUCKETS; i++) {
  638. if (i < nqcb->oldQueues.size()) {
  639. if (nqcb->oldQueues[i]->byteLength > maxQueueLength) {
  640. maxQueueLength = nqcb->oldQueues[i]->byteLength;
  641. selectedQueueToDropFrom = nqcb->oldQueues[i];
  642. }
  643. }
  644. if (i < nqcb->newQueues.size()) {
  645. if (nqcb->newQueues[i]->byteLength > maxQueueLength) {
  646. maxQueueLength = nqcb->newQueues[i]->byteLength;
  647. selectedQueueToDropFrom = nqcb->newQueues[i];
  648. }
  649. }
  650. if (i < nqcb->inactiveQueues.size()) {
  651. if (nqcb->inactiveQueues[i]->byteLength > maxQueueLength) {
  652. maxQueueLength = nqcb->inactiveQueues[i]->byteLength;
  653. selectedQueueToDropFrom = nqcb->inactiveQueues[i];
  654. }
  655. }
  656. }
  657. if (selectedQueueToDropFrom) {
  658. // DEBUG_INFO("dropping packet from head of largest queue (%d payload bytes)", maxQueueLength);
  659. int sizeOfDroppedPacket = selectedQueueToDropFrom->q.front()->packet.payloadLength();
  660. delete selectedQueueToDropFrom->q.front();
  661. selectedQueueToDropFrom->q.pop_front();
  662. selectedQueueToDropFrom->byteLength -= sizeOfDroppedPacket;
  663. nqcb->_currEnqueuedPackets--;
  664. }
  665. }
  666. _aqm_m.unlock();
  667. aqm_dequeue(tPtr);
  668. }
  669. uint64_t Switch::control_law(uint64_t t, int count)
  670. {
  671. return (uint64_t)(t + ZT_AQM_INTERVAL / sqrt(count));
  672. }
  673. Switch::dqr Switch::dodequeue(ManagedQueue* q, uint64_t now)
  674. {
  675. dqr r;
  676. r.ok_to_drop = false;
  677. r.p = q->q.front();
  678. if (r.p == NULL) {
  679. q->first_above_time = 0;
  680. return r;
  681. }
  682. uint64_t sojourn_time = now - r.p->creationTime;
  683. if (sojourn_time < ZT_AQM_TARGET || q->byteLength <= ZT_DEFAULT_MTU) {
  684. // went below - stay below for at least interval
  685. q->first_above_time = 0;
  686. }
  687. else {
  688. if (q->first_above_time == 0) {
  689. // just went above from below. if still above at
  690. // first_above_time, will say it's ok to drop.
  691. q->first_above_time = now + ZT_AQM_INTERVAL;
  692. }
  693. else if (now >= q->first_above_time) {
  694. r.ok_to_drop = true;
  695. }
  696. }
  697. return r;
  698. }
  699. Switch::TXQueueEntry* Switch::CoDelDequeue(ManagedQueue* q, bool isNew, uint64_t now)
  700. {
  701. dqr r = dodequeue(q, now);
  702. if (q->dropping) {
  703. if (! r.ok_to_drop) {
  704. q->dropping = false;
  705. }
  706. while (now >= q->drop_next && q->dropping) {
  707. q->q.pop_front(); // drop
  708. r = dodequeue(q, now);
  709. if (! r.ok_to_drop) {
  710. // leave dropping state
  711. q->dropping = false;
  712. }
  713. else {
  714. ++(q->count);
  715. // schedule the next drop.
  716. q->drop_next = control_law(q->drop_next, q->count);
  717. }
  718. }
  719. }
  720. else if (r.ok_to_drop) {
  721. q->q.pop_front(); // drop
  722. r = dodequeue(q, now);
  723. q->dropping = true;
  724. q->count = (q->count > 2 && now - q->drop_next < 8 * ZT_AQM_INTERVAL) ? q->count - 2 : 1;
  725. q->drop_next = control_law(now, q->count);
  726. }
  727. return r.p;
  728. }
  729. void Switch::aqm_dequeue(void* tPtr)
  730. {
  731. // Cycle through network-specific QoS control blocks
  732. for (std::map<uint64_t, NetworkQoSControlBlock*>::iterator nqcb(_netQueueControlBlock.begin()); nqcb != _netQueueControlBlock.end();) {
  733. if (! (*nqcb).second->_currEnqueuedPackets) {
  734. return;
  735. }
  736. uint64_t now = RR->node->now();
  737. TXQueueEntry* entryToEmit = nullptr;
  738. std::vector<ManagedQueue*>* currQueues = &((*nqcb).second->newQueues);
  739. std::vector<ManagedQueue*>* oldQueues = &((*nqcb).second->oldQueues);
  740. std::vector<ManagedQueue*>* inactiveQueues = &((*nqcb).second->inactiveQueues);
  741. _aqm_m.lock();
  742. // Attempt dequeue from queues in NEW list
  743. bool examiningNewQueues = true;
  744. while (currQueues->size()) {
  745. ManagedQueue* queueAtFrontOfList = currQueues->front();
  746. if (queueAtFrontOfList->byteCredit < 0) {
  747. queueAtFrontOfList->byteCredit += ZT_AQM_QUANTUM;
  748. // Move to list of OLD queues
  749. // DEBUG_INFO("moving q=%p from NEW to OLD list", queueAtFrontOfList);
  750. oldQueues->push_back(queueAtFrontOfList);
  751. currQueues->erase(currQueues->begin());
  752. }
  753. else {
  754. entryToEmit = CoDelDequeue(queueAtFrontOfList, examiningNewQueues, now);
  755. if (! entryToEmit) {
  756. // Move to end of list of OLD queues
  757. // DEBUG_INFO("moving q=%p from NEW to OLD list", queueAtFrontOfList);
  758. oldQueues->push_back(queueAtFrontOfList);
  759. currQueues->erase(currQueues->begin());
  760. }
  761. else {
  762. int len = entryToEmit->packet.payloadLength();
  763. queueAtFrontOfList->byteLength -= len;
  764. queueAtFrontOfList->byteCredit -= len;
  765. // Send the packet!
  766. queueAtFrontOfList->q.pop_front();
  767. send(tPtr, entryToEmit->packet, entryToEmit->encrypt, entryToEmit->nwid, entryToEmit->flowId);
  768. (*nqcb).second->_currEnqueuedPackets--;
  769. }
  770. if (queueAtFrontOfList) {
  771. // DEBUG_INFO("dequeuing from q=%p, len=%lu in NEW list (byteCredit=%d)", queueAtFrontOfList, queueAtFrontOfList->q.size(), queueAtFrontOfList->byteCredit);
  772. }
  773. break;
  774. }
  775. }
  776. // Attempt dequeue from queues in OLD list
  777. examiningNewQueues = false;
  778. currQueues = &((*nqcb).second->oldQueues);
  779. while (currQueues->size()) {
  780. ManagedQueue* queueAtFrontOfList = currQueues->front();
  781. if (queueAtFrontOfList->byteCredit < 0) {
  782. queueAtFrontOfList->byteCredit += ZT_AQM_QUANTUM;
  783. oldQueues->push_back(queueAtFrontOfList);
  784. currQueues->erase(currQueues->begin());
  785. }
  786. else {
  787. entryToEmit = CoDelDequeue(queueAtFrontOfList, examiningNewQueues, now);
  788. if (! entryToEmit) {
  789. // DEBUG_INFO("moving q=%p from OLD to INACTIVE list", queueAtFrontOfList);
  790. // Move to inactive list of queues
  791. inactiveQueues->push_back(queueAtFrontOfList);
  792. currQueues->erase(currQueues->begin());
  793. }
  794. else {
  795. int len = entryToEmit->packet.payloadLength();
  796. queueAtFrontOfList->byteLength -= len;
  797. queueAtFrontOfList->byteCredit -= len;
  798. queueAtFrontOfList->q.pop_front();
  799. send(tPtr, entryToEmit->packet, entryToEmit->encrypt, entryToEmit->nwid, entryToEmit->flowId);
  800. (*nqcb).second->_currEnqueuedPackets--;
  801. }
  802. if (queueAtFrontOfList) {
  803. // DEBUG_INFO("dequeuing from q=%p, len=%lu in OLD list (byteCredit=%d)", queueAtFrontOfList, queueAtFrontOfList->q.size(), queueAtFrontOfList->byteCredit);
  804. }
  805. break;
  806. }
  807. }
  808. nqcb++;
  809. _aqm_m.unlock();
  810. }
  811. }
  812. void Switch::removeNetworkQoSControlBlock(uint64_t nwid)
  813. {
  814. NetworkQoSControlBlock* nq = _netQueueControlBlock[nwid];
  815. if (nq) {
  816. _netQueueControlBlock.erase(nwid);
  817. delete nq;
  818. nq = NULL;
  819. }
  820. }
  821. void Switch::send(void* tPtr, Packet& packet, const bool encrypt, const uint64_t nwid, const int32_t flowId)
  822. {
  823. const Address dest(packet.destination());
  824. if (dest == RR->identity.address()) {
  825. return;
  826. }
  827. _recordOutgoingPacketMetrics(packet);
  828. if (! _trySend(tPtr, packet, encrypt, nwid, flowId)) {
  829. {
  830. Mutex::Lock _l(_txQueue_m);
  831. if (_txQueue.size() >= ZT_TX_QUEUE_SIZE) {
  832. _txQueue.pop_front();
  833. }
  834. _txQueue.push_back(TXQueueEntry(dest, nwid, RR->node->now(), packet, encrypt, flowId));
  835. }
  836. if (! RR->topology->getPeer(tPtr, dest)) {
  837. requestWhois(tPtr, RR->node->now(), dest);
  838. }
  839. }
  840. }
  841. void Switch::requestWhois(void* tPtr, const int64_t now, const Address& addr)
  842. {
  843. if (addr == RR->identity.address()) {
  844. return;
  845. }
  846. {
  847. Mutex::Lock _l(_lastSentWhoisRequest_m);
  848. int64_t& last = _lastSentWhoisRequest[addr];
  849. if ((now - last) < ZT_WHOIS_RETRY_DELAY) {
  850. return;
  851. }
  852. else {
  853. last = now;
  854. }
  855. }
  856. const SharedPtr<Peer> upstream(RR->topology->getUpstreamPeer(0));
  857. if (upstream) {
  858. int32_t flowId = ZT_QOS_NO_FLOW;
  859. Packet outp(upstream->address(), RR->identity.address(), Packet::VERB_WHOIS);
  860. addr.appendTo(outp);
  861. send(tPtr, outp, true, 0, flowId);
  862. }
  863. }
  864. void Switch::doAnythingWaitingForPeer(void* tPtr, const SharedPtr<Peer>& peer)
  865. {
  866. {
  867. Mutex::Lock _l(_lastSentWhoisRequest_m);
  868. _lastSentWhoisRequest.erase(peer->address());
  869. }
  870. const int64_t now = RR->node->now();
  871. for (unsigned int ptr = 0; ptr < ZT_RX_QUEUE_SIZE; ++ptr) {
  872. RXQueueEntry* const rq = &(_rxQueue[ptr]);
  873. Mutex::Lock rql(rq->lock);
  874. if ((rq->timestamp) && (rq->complete)) {
  875. if ((rq->frag0.tryDecode(RR, tPtr, rq->flowId)) || ((now - rq->timestamp) > ZT_RECEIVE_QUEUE_TIMEOUT)) {
  876. rq->timestamp = 0;
  877. }
  878. }
  879. }
  880. {
  881. Mutex::Lock _l(_txQueue_m);
  882. for (std::list<TXQueueEntry>::iterator txi(_txQueue.begin()); txi != _txQueue.end();) {
  883. if (txi->dest == peer->address()) {
  884. if (_trySend(tPtr, txi->packet, txi->encrypt, 0, txi->flowId)) {
  885. _txQueue.erase(txi++);
  886. }
  887. else {
  888. ++txi;
  889. }
  890. }
  891. else {
  892. ++txi;
  893. }
  894. }
  895. }
  896. }
  897. unsigned long Switch::doTimerTasks(void* tPtr, int64_t now)
  898. {
  899. const uint64_t timeSinceLastCheck = now - _lastCheckedQueues;
  900. if (timeSinceLastCheck < ZT_WHOIS_RETRY_DELAY) {
  901. return (unsigned long)(ZT_WHOIS_RETRY_DELAY - timeSinceLastCheck);
  902. }
  903. _lastCheckedQueues = now;
  904. std::vector<Address> needWhois;
  905. {
  906. Mutex::Lock _l(_txQueue_m);
  907. for (std::list<TXQueueEntry>::iterator txi(_txQueue.begin()); txi != _txQueue.end();) {
  908. if (_trySend(tPtr, txi->packet, txi->encrypt, 0, txi->flowId)) {
  909. _txQueue.erase(txi++);
  910. }
  911. else if ((now - txi->creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
  912. _txQueue.erase(txi++);
  913. }
  914. else {
  915. if (! RR->topology->getPeer(tPtr, txi->dest)) {
  916. needWhois.push_back(txi->dest);
  917. }
  918. ++txi;
  919. }
  920. }
  921. }
  922. for (std::vector<Address>::const_iterator i(needWhois.begin()); i != needWhois.end(); ++i) {
  923. requestWhois(tPtr, now, *i);
  924. }
  925. for (unsigned int ptr = 0; ptr < ZT_RX_QUEUE_SIZE; ++ptr) {
  926. RXQueueEntry* const rq = &(_rxQueue[ptr]);
  927. Mutex::Lock rql(rq->lock);
  928. if ((rq->timestamp) && (rq->complete)) {
  929. if ((rq->frag0.tryDecode(RR, tPtr, rq->flowId)) || ((now - rq->timestamp) > ZT_RECEIVE_QUEUE_TIMEOUT)) {
  930. rq->timestamp = 0;
  931. }
  932. else {
  933. const Address src(rq->frag0.source());
  934. if (! RR->topology->getPeer(tPtr, src)) {
  935. requestWhois(tPtr, now, src);
  936. }
  937. }
  938. }
  939. }
  940. {
  941. Mutex::Lock _l(_lastUniteAttempt_m);
  942. Hashtable<_LastUniteKey, uint64_t>::Iterator i(_lastUniteAttempt);
  943. _LastUniteKey* k = (_LastUniteKey*)0;
  944. uint64_t* v = (uint64_t*)0;
  945. while (i.next(k, v)) {
  946. if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 8)) {
  947. _lastUniteAttempt.erase(*k);
  948. }
  949. }
  950. }
  951. {
  952. Mutex::Lock _l(_lastSentWhoisRequest_m);
  953. Hashtable<Address, int64_t>::Iterator i(_lastSentWhoisRequest);
  954. Address* a = (Address*)0;
  955. int64_t* ts = (int64_t*)0;
  956. while (i.next(a, ts)) {
  957. if ((now - *ts) > (ZT_WHOIS_RETRY_DELAY * 2)) {
  958. _lastSentWhoisRequest.erase(*a);
  959. }
  960. }
  961. }
  962. return ZT_WHOIS_RETRY_DELAY;
  963. }
  964. bool Switch::_shouldUnite(const int64_t now, const Address& source, const Address& destination)
  965. {
  966. Mutex::Lock _l(_lastUniteAttempt_m);
  967. uint64_t& ts = _lastUniteAttempt[_LastUniteKey(source, destination)];
  968. if ((now - ts) >= ZT_MIN_UNITE_INTERVAL) {
  969. ts = now;
  970. return true;
  971. }
  972. return false;
  973. }
  974. bool Switch::_trySend(void* tPtr, Packet& packet, bool encrypt, const uint64_t nwid, const int32_t flowId)
  975. {
  976. SharedPtr<Path> viaPath;
  977. const int64_t now = RR->node->now();
  978. const Address destination(packet.destination());
  979. const SharedPtr<Peer> peer(RR->topology->getPeer(tPtr, destination));
  980. if (peer) {
  981. if ((peer->bondingPolicy() == ZT_BOND_POLICY_BROADCAST) && (packet.verb() == Packet::VERB_FRAME || packet.verb() == Packet::VERB_EXT_FRAME)) {
  982. const SharedPtr<Peer> relay(RR->topology->getUpstreamPeer(nwid));
  983. Mutex::Lock _l(peer->_paths_m);
  984. for (int i = 0; i < ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  985. if (peer->_paths[i].p && peer->_paths[i].p->alive(now)) {
  986. uint16_t userSpecifiedMtu = peer->_paths[i].p->mtu();
  987. _sendViaSpecificPath(tPtr, peer, peer->_paths[i].p, userSpecifiedMtu, now, packet, encrypt, flowId);
  988. }
  989. }
  990. return true;
  991. }
  992. else {
  993. viaPath = peer->getAppropriatePath(now, false, flowId);
  994. if (! viaPath) {
  995. peer->tryMemorizedPath(tPtr, now); // periodically attempt memorized or statically defined paths, if any are known
  996. const SharedPtr<Peer> relay(RR->topology->getUpstreamPeer(nwid));
  997. if ((! relay) || (! (viaPath = relay->getAppropriatePath(now, false, flowId)))) {
  998. if (! (viaPath = peer->getAppropriatePath(now, true, flowId))) {
  999. return false;
  1000. }
  1001. }
  1002. }
  1003. if (viaPath) {
  1004. uint16_t userSpecifiedMtu = viaPath->mtu();
  1005. _sendViaSpecificPath(tPtr, peer, viaPath, userSpecifiedMtu, now, packet, encrypt, flowId);
  1006. return true;
  1007. }
  1008. }
  1009. }
  1010. return false;
  1011. }
  1012. void Switch::_sendViaSpecificPath(void* tPtr, SharedPtr<Peer> peer, SharedPtr<Path> viaPath, uint16_t userSpecifiedMtu, int64_t now, Packet& packet, bool encrypt, int32_t flowId)
  1013. {
  1014. unsigned int mtu = ZT_DEFAULT_PHYSMTU;
  1015. uint64_t trustedPathId = 0;
  1016. RR->topology->getOutboundPathInfo(viaPath->address(), mtu, trustedPathId);
  1017. if (userSpecifiedMtu > 0) {
  1018. mtu = userSpecifiedMtu;
  1019. }
  1020. unsigned int chunkSize = std::min(packet.size(), mtu);
  1021. packet.setFragmented(chunkSize < packet.size());
  1022. if (trustedPathId) {
  1023. packet.setTrusted(trustedPathId);
  1024. }
  1025. else {
  1026. if (! packet.isEncrypted()) {
  1027. packet.armor(peer->key(), encrypt, false, peer->aesKeysIfSupported(), peer->identity());
  1028. }
  1029. RR->node->expectReplyTo(packet.packetId());
  1030. }
  1031. peer->recordOutgoingPacket(viaPath, packet.packetId(), packet.payloadLength(), packet.verb(), flowId, now);
  1032. if (viaPath->send(RR, tPtr, packet.data(), chunkSize, now)) {
  1033. if (chunkSize < packet.size()) {
  1034. // Too big for one packet, fragment the rest
  1035. unsigned int fragStart = chunkSize;
  1036. unsigned int remaining = packet.size() - chunkSize;
  1037. unsigned int fragsRemaining = (remaining / (mtu - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  1038. if ((fragsRemaining * (mtu - ZT_PROTO_MIN_FRAGMENT_LENGTH)) < remaining) {
  1039. ++fragsRemaining;
  1040. }
  1041. const unsigned int totalFragments = fragsRemaining + 1;
  1042. for (unsigned int fno = 1; fno < totalFragments; ++fno) {
  1043. chunkSize = std::min(remaining, (unsigned int)(mtu - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  1044. Packet::Fragment frag(packet, fragStart, chunkSize, fno, totalFragments);
  1045. viaPath->send(RR, tPtr, frag.data(), frag.size(), now);
  1046. fragStart += chunkSize;
  1047. remaining -= chunkSize;
  1048. }
  1049. }
  1050. }
  1051. }
  1052. void Switch::_recordOutgoingPacketMetrics(const Packet& p)
  1053. {
  1054. switch (p.verb()) {
  1055. case Packet::VERB_NOP:
  1056. Metrics::pkt_nop_out++;
  1057. break;
  1058. case Packet::VERB_HELLO:
  1059. Metrics::pkt_hello_out++;
  1060. break;
  1061. case Packet::VERB_ERROR:
  1062. Metrics::pkt_error_out++;
  1063. break;
  1064. case Packet::VERB_OK:
  1065. Metrics::pkt_ok_out++;
  1066. break;
  1067. case Packet::VERB_WHOIS:
  1068. Metrics::pkt_whois_out++;
  1069. break;
  1070. case Packet::VERB_RENDEZVOUS:
  1071. Metrics::pkt_rendezvous_out++;
  1072. break;
  1073. case Packet::VERB_FRAME:
  1074. Metrics::pkt_frame_out++;
  1075. break;
  1076. case Packet::VERB_EXT_FRAME:
  1077. Metrics::pkt_ext_frame_out++;
  1078. break;
  1079. case Packet::VERB_ECHO:
  1080. Metrics::pkt_echo_out++;
  1081. break;
  1082. case Packet::VERB_MULTICAST_LIKE:
  1083. Metrics::pkt_multicast_like_out++;
  1084. break;
  1085. case Packet::VERB_NETWORK_CREDENTIALS:
  1086. Metrics::pkt_network_credentials_out++;
  1087. break;
  1088. case Packet::VERB_NETWORK_CONFIG_REQUEST:
  1089. Metrics::pkt_network_config_request_out++;
  1090. break;
  1091. case Packet::VERB_NETWORK_CONFIG:
  1092. Metrics::pkt_network_config_out++;
  1093. break;
  1094. case Packet::VERB_MULTICAST_GATHER:
  1095. Metrics::pkt_multicast_gather_out++;
  1096. break;
  1097. case Packet::VERB_MULTICAST_FRAME:
  1098. Metrics::pkt_multicast_frame_out++;
  1099. break;
  1100. case Packet::VERB_PUSH_DIRECT_PATHS:
  1101. Metrics::pkt_push_direct_paths_out++;
  1102. break;
  1103. case Packet::VERB_ACK:
  1104. Metrics::pkt_ack_out++;
  1105. break;
  1106. case Packet::VERB_QOS_MEASUREMENT:
  1107. Metrics::pkt_qos_out++;
  1108. break;
  1109. case Packet::VERB_USER_MESSAGE:
  1110. Metrics::pkt_user_message_out++;
  1111. break;
  1112. case Packet::VERB_REMOTE_TRACE:
  1113. Metrics::pkt_remote_trace_out++;
  1114. break;
  1115. case Packet::VERB_PATH_NEGOTIATION_REQUEST:
  1116. Metrics::pkt_path_negotiation_request_out++;
  1117. break;
  1118. }
  1119. }
  1120. } // namespace ZeroTier