Browse Source

Some cluster parameter tweaks, and change the test code in tests/http to dramatically reduce the amount of data transfer to/from the test master. Also add results of first successful 50k test.

Adam Ierymenko 9 years ago
parent
commit
2854f14966

+ 2 - 2
node/Cluster.hpp

@@ -56,7 +56,7 @@
 /**
 /**
  * Desired period between doPeriodicTasks() in milliseconds
  * Desired period between doPeriodicTasks() in milliseconds
  */
  */
-#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 25
+#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 50
 
 
 /**
 /**
  * How often to flush outgoing message queues (maximum interval)
  * How often to flush outgoing message queues (maximum interval)
@@ -71,7 +71,7 @@
 /**
 /**
  * Expiration time for send queue entries
  * Expiration time for send queue entries
  */
  */
-#define ZT_CLUSTER_QUEUE_EXPIRATION 1500
+#define ZT_CLUSTER_QUEUE_EXPIRATION 5000
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 

BIN
tests/http/2015-11-10_01_50000.out.xz


+ 1 - 1
tests/http/agent.js

@@ -115,7 +115,7 @@ function doTest()
 				try {
 				try {
 					var peers = JSON.parse(body);
 					var peers = JSON.parse(body);
 					if (Array.isArray(peers))
 					if (Array.isArray(peers))
-						allOtherAgents = peers;
+						allOtherAgents = allOtherAgents.concat(peers);
 				} catch (e) {}
 				} catch (e) {}
 			}
 			}
 
 

+ 1 - 1
tests/http/big-test-start.sh

@@ -4,7 +4,7 @@
 # 250 with a 16GB RAM VM like Amazon m4.xlarge seems good
 # 250 with a 16GB RAM VM like Amazon m4.xlarge seems good
 NUM_CONTAINERS=250
 NUM_CONTAINERS=250
 CONTAINER_IMAGE=zerotier/http-test
 CONTAINER_IMAGE=zerotier/http-test
-SCALE_UP_DELAY=4
+SCALE_UP_DELAY=10
 
 
 export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
 export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
 
 

+ 41 - 39
tests/http/crunch-results.js

@@ -3,56 +3,58 @@
 // suitable for graphing.
 // suitable for graphing.
 //
 //
 
 
-// Average over this interval of time
-var GRAPH_INTERVAL = 60000;
+// Number of requests per statistical bracket
+var BRACKET_SIZE = 1000;
 
 
 // Number of bytes expected from each test
 // Number of bytes expected from each test
 var EXPECTED_BYTES = 5000;
 var EXPECTED_BYTES = 5000;
 
 
 var readline = require('readline');
 var readline = require('readline');
 var rl = readline.createInterface({
 var rl = readline.createInterface({
-  input: process.stdin,
-  output: process.stdout,
-  terminal: false
+	input: process.stdin,
+	output: process.stdout,
+	terminal: false
 });
 });
 
 
-var startTS = 0;
-
 var count = 0.0;
 var count = 0.0;
-var totalFailures = 0;
-var totalPartialFailures = 0;
+var overallCount = 0.0;
+var totalFailures = 0.0;
+var totalOverallFailures = 0.0;
 var totalMs = 0;
 var totalMs = 0;
 var totalData = 0;
 var totalData = 0;
+var devices = {};
 
 
 rl.on('line',function(line) {
 rl.on('line',function(line) {
-  line = line.trim();
-  var ls = line.split(',');
-  if (ls.length == 7) {
-    var ts = parseInt(ls[0]);
-    var from = ls[1];
-    var to = ls[2];
-    var ms = parseFloat(ls[3]);
-    var bytes = parseInt(ls[4]);
-    var timedOut = (ls[5] == 'true') ? true : false;
-    var errMsg = ls[6];
-
-    count += 1.0;
-    if ((bytes <= 0)||(timedOut))
-      ++totalFailures;
-    if (bytes !== EXPECTED_BYTES)
-      ++totalPartialFailures;
-    totalMs += ms;
-    totalData += bytes;
-
-    if (startTS === 0) {
-      startTS = ts;
-    } else if (((ts - startTS) >= GRAPH_INTERVAL)&&(count > 0.0)) {
-      console.log(count.toString()+','+(totalMs / count)+','+totalFailures+','+totalPartialFailures+','+totalData);
-
-      count = 0.0;
-      totalFailures = 0;
-      totalPartialFailures = 0;
-      totalMs = 0;
-    }
-  } // else ignore junk
+	line = line.trim();
+	var ls = line.split(',');
+	if (ls.length == 7) {
+		var ts = parseInt(ls[0]);
+		var fromId = ls[1];
+		var toId = ls[2];
+		var ms = parseFloat(ls[3]);
+		var bytes = parseInt(ls[4]);
+		var timedOut = (ls[5] == 'true') ? true : false;
+		var errMsg = ls[6];
+
+		count += 1.0;
+		overallCount += 1.0;
+		if ((bytes !== EXPECTED_BYTES)||(timedOut)) {
+			totalFailures += 1.0;
+			totalOverallFailures += 1.0;
+		}
+		totalMs += ms;
+		totalData += bytes;
+
+		devices[fromId] = true;
+		devices[toId] = true;
+
+		if (count >= BRACKET_SIZE) {
+			console.log(count.toString()+','+overallCount.toString()+','+(totalMs / count)+','+(totalFailures / count)+','+(totalOverallFailures / overallCount)+','+totalData+','+Object.keys(devices).length);
+
+			count = 0.0;
+			totalFailures = 0.0;
+			totalMs = 0;
+			totalData = 0;
+		}
+	} // else ignore junk
 });
 });

+ 12 - 3
tests/http/server.js

@@ -30,12 +30,21 @@ app.post('/:agentId',function(req,res) {
 		var resultData = null;
 		var resultData = null;
 		try {
 		try {
 			resultData = JSON.parse(req.rawBody);
 			resultData = JSON.parse(req.rawBody);
-			console.log(Date.now()+','+resultData.source+','+resultData.target+','+resultData.time+','+resultData.bytes+','+resultData.timedOut+',"'+((resultData.error) ? resultData.error : '')+'"');
+			console.log(Date.now().toString()+','+resultData.source+','+resultData.target+','+resultData.time+','+resultData.bytes+','+resultData.timedOut+',"'+((resultData.error) ? resultData.error : '')+'"');
 		} catch (e) {}
 		} catch (e) {}
 	}
 	}
 
 
-	knownAgents[agentId] = Date.now();
-	return res.status(200).send(JSON.stringify(Object.keys(knownAgents)));
+	var thisUpdate = null;
+	if (!(agentId in knownAgents)) {
+		thisUpdate = Object.keys(knownAgents);
+		for(var id in knownAgents)
+			knownAgents[id].push(agentId);
+		knownAgents[agentId] = [];
+	} else {
+		thisUpdate = knownAgents[agentId];
+		knownAgents[agentId] = [];
+	}
+	return res.status(200).send(JSON.stringify(thisUpdate));
 });
 });
 
 
 var expressServer = app.listen(SERVER_PORT,function () {
 var expressServer = app.listen(SERVER_PORT,function () {