瀏覽代碼

Tweak some timings for better reliability.

Adam Ierymenko 9 年之前
父節點
當前提交
60ce886605
共有 8 個文件被更改,包括 131 次插入148 次删除
  1. 4 2
      node/Cluster.cpp
  2. 7 1
      node/Cluster.hpp
  3. 4 20
      node/Constants.hpp
  4. 111 109
      node/Multicaster.cpp
  5. 1 12
      node/Node.cpp
  6. 1 1
      tests/http/big-test-kill.sh
  7. 1 1
      tests/http/big-test-ready.sh
  8. 2 2
      tests/http/big-test-start.sh

+ 4 - 2
node/Cluster.cpp

@@ -85,7 +85,8 @@ Cluster::Cluster(
 	_members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
 	_peerAffinities(65536),
 	_lastCleanedPeerAffinities(0),
-	_lastCheckedPeersForAnnounce(0)
+	_lastCheckedPeersForAnnounce(0),
+	_lastFlushed(0)
 {
 	uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
 
@@ -510,7 +511,8 @@ void Cluster::doPeriodicTasks()
 	}
 
 	// Flush outgoing packet send queue every doPeriodicTasks()
-	{
+	if ((now - _lastFlushed) >= ZT_CLUSTER_FLUSH_PERIOD) {
+		_lastFlushed = now;
 		Mutex::Lock _l(_memberIds_m);
 		for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
 			Mutex::Lock _l2(_members[*mid].lock);

+ 7 - 1
node/Cluster.hpp

@@ -55,13 +55,18 @@
 /**
  * How often should we announce that we have a peer?
  */
-#define ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD ((ZT_PEER_ACTIVITY_TIMEOUT / 2) - 1000)
+#define ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD (ZT_PEER_DIRECT_PING_DELAY / 2)
 
 /**
  * Desired period between doPeriodicTasks() in milliseconds
  */
 #define ZT_CLUSTER_PERIODIC_TASK_PERIOD 250
 
+/**
+ * How often to flush outgoing message queues (maximum interval)
+ */
+#define ZT_CLUSTER_FLUSH_PERIOD 500
+
 namespace ZeroTier {
 
 class RuntimeEnvironment;
@@ -355,6 +360,7 @@ private:
 
 	uint64_t _lastCleanedPeerAffinities;
 	uint64_t _lastCheckedPeersForAnnounce;
+	uint64_t _lastFlushed;
 };
 
 } // namespace ZeroTier

+ 4 - 20
node/Constants.hpp

@@ -173,13 +173,8 @@
 
 /**
  * Timeout for receipt of fragmented packets in ms
- *
- * Since there's no retransmits, this is just a really bad case scenario for
- * transit time. It's short enough that a DOS attack from exhausing buffers is
- * very unlikely, as the transfer rate would have to be fast enough to fill
- * system memory in this time.
  */
-#define ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT 1000
+#define ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT 500
 
 /**
  * Length of secret key in bytes -- 256-bit -- do not change
@@ -194,7 +189,7 @@
 /**
  * Overriding granularity for timer tasks to prevent CPU-intensive thrashing on every packet
  */
-#define ZT_CORE_TIMER_TASK_GRANULARITY 1000
+#define ZT_CORE_TIMER_TASK_GRANULARITY 500
 
 /**
  * How long to remember peer records in RAM if they haven't been used
@@ -269,7 +264,7 @@
 /**
  * Delay between ordinary case pings of direct links
  */
-#define ZT_PEER_DIRECT_PING_DELAY 120000
+#define ZT_PEER_DIRECT_PING_DELAY 60000
 
 /**
  * Delay between requests for updated network autoconf information
@@ -279,18 +274,7 @@
 /**
  * Timeout for overall peer activity (measured from last receive)
  */
-#define ZT_PEER_ACTIVITY_TIMEOUT (ZT_PEER_DIRECT_PING_DELAY + (ZT_PING_CHECK_INVERVAL * 3))
-
-/**
- * Stop relaying via peers that have not responded to direct sends
- *
- * When we send something (including frames), we generally expect a response.
- * Switching relays if no response in a short period of time causes more
- * rapid failover if a root server goes down or becomes unreachable. In the
- * mistaken case, little harm is done as it'll pick the next-fastest
- * root server and will switch back eventually.
- */
-#define ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD 10000
+#define ZT_PEER_ACTIVITY_TIMEOUT ((ZT_PEER_DIRECT_PING_DELAY * 3) + (ZT_PING_CHECK_INVERVAL * 2))
 
 /**
  * Minimum interval between attempts by relays to unite peers

+ 111 - 109
node/Multicaster.cpp

@@ -175,128 +175,130 @@ void Multicaster::send(
 	unsigned long idxbuf[8194];
 	unsigned long *indexes = idxbuf;
 
-	Mutex::Lock _l(_groups_m);
-	MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
-
-	if (!gs.members.empty()) {
-		// Allocate a memory buffer if group is monstrous
-		if (gs.members.size() > (sizeof(idxbuf) / sizeof(unsigned long)))
-			indexes = new unsigned long[gs.members.size()];
-
-		// Generate a random permutation of member indexes
-		for(unsigned long i=0;i<gs.members.size();++i)
-			indexes[i] = i;
-		for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
-			unsigned long j = (unsigned long)RR->node->prng() % (i + 1);
-			unsigned long tmp = indexes[j];
-			indexes[j] = indexes[i];
-			indexes[i] = tmp;
+	try {
+		Mutex::Lock _l(_groups_m);
+		MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
+
+		if (!gs.members.empty()) {
+			// Allocate a memory buffer if group is monstrous
+			if (gs.members.size() > (sizeof(idxbuf) / sizeof(unsigned long)))
+				indexes = new unsigned long[gs.members.size()];
+
+			// Generate a random permutation of member indexes
+			for(unsigned long i=0;i<gs.members.size();++i)
+				indexes[i] = i;
+			for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
+				unsigned long j = (unsigned long)RR->node->prng() % (i + 1);
+				unsigned long tmp = indexes[j];
+				indexes[j] = indexes[i];
+				indexes[i] = tmp;
+			}
 		}
-	}
 
-	if (gs.members.size() >= limit) {
-		// Skip queue if we already have enough members to complete the send operation
-		OutboundMulticast out;
-
-		out.init(
-			RR,
-			now,
-			nwid,
-			com,
-			limit,
-			1, // we'll still gather a little from peers to keep multicast list fresh
-			src,
-			mg,
-			etherType,
-			data,
-			len);
-
-		unsigned int count = 0;
-
-		for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
-			if (*ast != RR->identity.address()) {
-				out.sendOnly(RR,*ast);
-				if (++count >= limit)
-					break;
+		if (gs.members.size() >= limit) {
+			// Skip queue if we already have enough members to complete the send operation
+			OutboundMulticast out;
+
+			out.init(
+				RR,
+				now,
+				nwid,
+				com,
+				limit,
+				1, // we'll still gather a little from peers to keep multicast list fresh
+				src,
+				mg,
+				etherType,
+				data,
+				len);
+
+			unsigned int count = 0;
+
+			for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
+				if (*ast != RR->identity.address()) {
+					out.sendOnly(RR,*ast); // optimization: don't use dedup log if it's a one-pass send
+					if (++count >= limit)
+						break;
+				}
 			}
-		}
 
-		unsigned long idx = 0;
-		while ((count < limit)&&(idx < gs.members.size())) {
-			Address ma(gs.members[indexes[idx++]].address);
-			if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
-				out.sendOnly(RR,ma);
-				++count;
+			unsigned long idx = 0;
+			while ((count < limit)&&(idx < gs.members.size())) {
+				Address ma(gs.members[indexes[idx++]].address);
+				if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
+					out.sendOnly(RR,ma); // optimization: don't use dedup log if it's a one-pass send
+					++count;
+				}
 			}
-		}
-	} else {
-		unsigned int gatherLimit = (limit - (unsigned int)gs.members.size()) + 1;
-
-		if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY) {
-			gs.lastExplicitGather = now;
-			SharedPtr<Peer> r(RR->topology->getBestRoot());
-			if (r) {
-				TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
-
-				const CertificateOfMembership *com = (CertificateOfMembership *)0;
-				{
-					SharedPtr<Network> nw(RR->node->network(nwid));
-					if (nw) {
-						SharedPtr<NetworkConfig> nconf(nw->config2());
-						if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(r->needsOurNetworkMembershipCertificate(nwid,now,true)))
-							com = &(nconf->com());
+		} else {
+			unsigned int gatherLimit = (limit - (unsigned int)gs.members.size()) + 1;
+
+			if ((gs.members.empty())||((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY)) {
+				gs.lastExplicitGather = now;
+				SharedPtr<Peer> r(RR->topology->getBestRoot());
+				if (r) {
+					TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
+
+					const CertificateOfMembership *com = (CertificateOfMembership *)0;
+					{
+						SharedPtr<Network> nw(RR->node->network(nwid));
+						if (nw) {
+							SharedPtr<NetworkConfig> nconf(nw->config2());
+							if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(r->needsOurNetworkMembershipCertificate(nwid,now,true)))
+								com = &(nconf->com());
+						}
 					}
-				}
 
-				Packet outp(r->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
-				outp.append(nwid);
-				outp.append((uint8_t)(com ? 0x01 : 0x00));
-				mg.mac().appendTo(outp);
-				outp.append((uint32_t)mg.adi());
-				outp.append((uint32_t)gatherLimit);
-				if (com)
-					com->serialize(outp);
-				outp.armor(r->key(),true);
-				r->send(RR,outp.data(),outp.size(),now);
+					Packet outp(r->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
+					outp.append(nwid);
+					outp.append((uint8_t)(com ? 0x01 : 0x00));
+					mg.mac().appendTo(outp);
+					outp.append((uint32_t)mg.adi());
+					outp.append((uint32_t)gatherLimit);
+					if (com)
+						com->serialize(outp);
+					outp.armor(r->key(),true);
+					r->send(RR,outp.data(),outp.size(),now);
+				}
+				gatherLimit = 0;
 			}
-			gatherLimit = 0;
-		}
 
-		gs.txQueue.push_back(OutboundMulticast());
-		OutboundMulticast &out = gs.txQueue.back();
-
-		out.init(
-			RR,
-			now,
-			nwid,
-			com,
-			limit,
-			gatherLimit,
-			src,
-			mg,
-			etherType,
-			data,
-			len);
-
-		unsigned int count = 0;
-
-		for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
-			if (*ast != RR->identity.address()) {
-				out.sendAndLog(RR,*ast);
-				if (++count >= limit)
-					break;
+			gs.txQueue.push_back(OutboundMulticast());
+			OutboundMulticast &out = gs.txQueue.back();
+
+			out.init(
+				RR,
+				now,
+				nwid,
+				com,
+				limit,
+				gatherLimit,
+				src,
+				mg,
+				etherType,
+				data,
+				len);
+
+			unsigned int count = 0;
+
+			for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
+				if (*ast != RR->identity.address()) {
+					out.sendAndLog(RR,*ast);
+					if (++count >= limit)
+						break;
+				}
 			}
-		}
 
-		unsigned long idx = 0;
-		while ((count < limit)&&(idx < gs.members.size())) {
-			Address ma(gs.members[indexes[idx++]].address);
-			if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
-				out.sendAndLog(RR,ma);
-				++count;
+			unsigned long idx = 0;
+			while ((count < limit)&&(idx < gs.members.size())) {
+				Address ma(gs.members[indexes[idx++]].address);
+				if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
+					out.sendAndLog(RR,ma);
+					++count;
+				}
 			}
 		}
-	}
+	} catch ( ... ) {} // this is a sanity check to catch any failures and make sure indexes[] still gets deleted
 
 	// Free allocated memory buffer if any
 	if (indexes != idxbuf)

+ 1 - 12
node/Node.cpp

@@ -305,18 +305,7 @@ ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextB
 			for(std::vector< SharedPtr<Network> >::const_iterator n(needConfig.begin());n!=needConfig.end();++n)
 				(*n)->requestConfiguration();
 
-			// Attempt to contact network preferred relays that we don't have direct links to
-			std::sort(networkRelays.begin(),networkRelays.end());
-			networkRelays.erase(std::unique(networkRelays.begin(),networkRelays.end()),networkRelays.end());
-			for(std::vector< std::pair<Address,InetAddress> >::const_iterator nr(networkRelays.begin());nr!=networkRelays.end();++nr) {
-				if (nr->second) {
-					SharedPtr<Peer> rp(RR->topology->getPeer(nr->first));
-					if ((rp)&&(!rp->hasActiveDirectPath(now)))
-						rp->attemptToContactAt(RR,InetAddress(),nr->second,now);
-				}
-			}
-
-			// Ping living or root server/relay peers
+			// Do pings and keepalives
 			_PingPeersThatNeedPing pfunc(RR,now,networkRelays);
 			RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
 

+ 1 - 1
tests/http/big-test-kill.sh

@@ -13,6 +13,6 @@ CONTAINER_IMAGE=zerotier/http-test
 
 export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
 
-pssh -h big-test-hosts -i -t 128 -p 256 "docker ps -aq | xargs -r docker rm -f"
+pssh -h big-test-hosts -i -t 0 -p 256 "docker ps -aq | xargs -r docker rm -f"
 
 exit 0

+ 1 - 1
tests/http/big-test-ready.sh

@@ -25,6 +25,6 @@ export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
 #	docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE
 #done
 
-pssh -h big-test-hosts -i -t 128 -p 256 "docker pull $CONTAINER_IMAGE"
+pssh -h big-test-hosts -i -t 0 -p 256 "docker pull $CONTAINER_IMAGE"
 
 exit 0

+ 2 - 2
tests/http/big-test-start.sh

@@ -1,7 +1,7 @@
 #!/bin/bash
 
 # Edit as needed -- note that >1000 per host is likely problematic due to Linux kernel limits
-NUM_CONTAINERS=100
+NUM_CONTAINERS=25
 CONTAINER_IMAGE=zerotier/http-test
 
 #
@@ -25,6 +25,6 @@ export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
 #	docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE
 #done
 
-pssh -h big-test-hosts -i -t 128 -p 256 "for ((n=0;n<$NUM_CONTAINERS;n++)); do docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE; sleep 0.25; done"
+pssh -h big-test-hosts -i -t 0 -p 256 "for ((n=0;n<$NUM_CONTAINERS;n++)); do docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE; sleep 0.25; done"
 
 exit 0