2
0
Adam Ierymenko 9 жил өмнө
parent
commit
73e2c6e511
3 өөрчлөгдсөн 31 нэмэгдсэн , 29 устгасан
  1. 19 23
      node/Cluster.cpp
  2. 6 3
      node/Cluster.hpp
  3. 6 3
      tests/http/agent.js

+ 19 - 23
node/Cluster.cpp

@@ -223,10 +223,10 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
 							std::list<_SQE> q;
 							{
 								Mutex::Lock _l(_sendViaClusterQueue_m);
-								std::map< Address,std::list<_SQE> >::iterator qe(_sendViaClusterQueue.find(id.address()));
-								if (qe != _sendViaClusterQueue.end()) {
-									q.swap(qe->second); // just swap ptr instead of copying
-									_sendViaClusterQueue.erase(qe);
+								for(std::list<_SQE>::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
+									if (qi->toPeerAddress == id.address())
+										q.splice(q.end(),_sendViaClusterQueue,qi++);
+									else ++qi;
 								}
 							}
 							for(std::list<_SQE>::iterator qi(q.begin());qi!=q.end();++qi)
@@ -368,16 +368,17 @@ void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
 	if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
 		return;
 
-	_sendViaClusterQueue_m.lock();
-	unsigned long queueCount;
+	unsigned int queueCount = 0;
 	{
-		std::map< Address,std::list<_SQE> >::const_iterator qe(_sendViaClusterQueue.find(fromPeerAddress));
-		queueCount = (qe == _sendViaClusterQueue.end()) ? 0 : (unsigned long)qe->second.size();
-	}
-	_sendViaClusterQueue_m.unlock();
-	if (queueCount > ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
-		TRACE("dropping sendViaCluster for %s -> %s since queue for sender is full",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
-		return;
+		Mutex::Lock _l(_sendViaClusterQueue_m);
+		for(std::list<_SQE>::const_iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();++qi) {
+			if (qi->fromPeerAddress == fromPeerAddress) {
+				if (++queueCount > ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
+					TRACE("dropping sendViaCluster for %s -> %s since queue for sender is full",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
+					return;
+				}
+			}
+		}
 	}
 
 	const uint64_t now = RR->node->now();
@@ -386,9 +387,9 @@ void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
 	unsigned int mostRecentMemberId = 0xffffffff;
 	{
 		Mutex::Lock _l2(_remotePeers_m);
-		std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(fromPeerAddress,0)));
+		std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
 		for(;;) {
-			if ((rpe == _remotePeers.end())||(rpe->first.first != fromPeerAddress))
+			if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
 				break;
 			else if (rpe->second > mostRecentTs) {
 				mostRecentTs = rpe->second;
@@ -420,7 +421,7 @@ void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
 		if (enqueueAndWait) {
 			TRACE("sendViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
 			Mutex::Lock _l(_sendViaClusterQueue_m);
-			_sendViaClusterQueue[fromPeerAddress].push_back(_SQE(now,toPeerAddress,data,len,unite));
+			_sendViaClusterQueue.push_back(_SQE(now,fromPeerAddress,toPeerAddress,data,len,unite));
 			return;
 		}
 	}
@@ -484,13 +485,8 @@ void Cluster::doPeriodicTasks()
 
 		{
 			Mutex::Lock _l2(_sendViaClusterQueue_m);
-			for(std::map< Address,std::list<_SQE> >::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
-				for(std::list<_SQE>::iterator qii(qi->second.begin());qii!=qi->second.end();) {
-					if ((now - qii->timestamp) >= ZT_CLUSTER_QUEUE_EXPIRATION)
-						qi->second.erase(qii++);
-					else ++qii;
-				}
-				if (qi->second.empty())
+			for(std::list<_SQE>::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
+				if ((now - qi->timestamp) >= ZT_CLUSTER_QUEUE_EXPIRATION)
 					_sendViaClusterQueue.erase(qi++);
 				else ++qi;
 			}

+ 6 - 3
node/Cluster.hpp

@@ -39,6 +39,7 @@
 #include "Constants.hpp"
 #include "../include/ZeroTierOne.h"
 #include "Address.hpp"
+#include "Array.hpp"
 #include "InetAddress.hpp"
 #include "SHA512.hpp"
 #include "Utils.hpp"
@@ -74,7 +75,7 @@
 /**
  * Expiration time for send queue entries
  */
-#define ZT_CLUSTER_QUEUE_EXPIRATION 1500
+#define ZT_CLUSTER_QUEUE_EXPIRATION 500
 
 namespace ZeroTier {
 
@@ -372,18 +373,20 @@ private:
 	struct _SQE
 	{
 		_SQE() : timestamp(0),len(0),unite(false) {}
-		_SQE(const uint64_t ts,const Address &t,const void *d,const unsigned int l,const bool u) :
+		_SQE(const uint64_t ts,const Address &f,const Address &t,const void *d,const unsigned int l,const bool u) :
 			timestamp(ts),
+			fromPeerAddress(f),
 			toPeerAddress(t),
 			len(l),
 			unite(u) { memcpy(data,d,l); }
 		uint64_t timestamp;
+		Address fromPeerAddress;
 		Address toPeerAddress;
 		unsigned int len;
 		bool unite;
 		unsigned char data[ZT_PROTO_MAX_PACKET_LENGTH];
 	};
-	std::map< Address,std::list<_SQE> > _sendViaClusterQueue;
+	std::list<_SQE> _sendViaClusterQueue;
 	Mutex _sendViaClusterQueue_m;
 
 	uint64_t _lastFlushed;

+ 6 - 3
tests/http/agent.js

@@ -3,14 +3,17 @@
 // ---------------------------------------------------------------------------
 // Customizable parameters:
 
+// Time between startup and first test attempt
+var TEST_STARTUP_LAG = 10000;
+
 // Maximum interval between test attempts (actual timing is random % this)
 var TEST_INTERVAL_MAX = (60000 * 10);
 
 // Test timeout in ms
-var TEST_TIMEOUT = 60000;
+var TEST_TIMEOUT = 30000;
 
 // Where should I get other agents' IDs and POST results?
-var SERVER_HOST = '52.32.186.221';
+var SERVER_HOST = '52.26.196.147';
 var SERVER_PORT = 18080;
 
 // Which port do agents use to serve up test data to each other?
@@ -186,5 +189,5 @@ app.get('/',function(req,res) { return res.status(200).send(payload); });
 
 var expressServer = app.listen(AGENT_PORT,function () {
 	// Start timeout-based loop
-	doTest();
+	setTimeout(doTest(),TEST_STARTUP_LAG);
 });