Просмотр исходного кода

Results file now parses out any errors reported by wrk. The previous result files have been updated to include these reported errors

Patrick Falls 12 лет назад
Родитель
Сommit
310c763ce9

+ 2 - 1
benchmarker.py

@@ -175,7 +175,7 @@ class Benchmarker:
   ############################################################
   # report_results
   ############################################################
-  def report_results(self, framework, test, results, latency, requests, total_time):
+  def report_results(self, framework, test, results, latency, requests, total_time, errors):
     # Try to get the id in the result array if it exists.
     try:
       framework_id = str(self.results['frameworks'].index(framework.name))
@@ -188,6 +188,7 @@ class Benchmarker:
     self.results['weighttpData'][test][framework_id]['latency'] = latency
     self.results['weighttpData'][test][framework_id]['requests'] = requests
     self.results['weighttpData'][test][framework_id]['totalTime'] = total_time
+    self.results['weighttpData'][test][framework_id]['errors'] = errors
 
   ############################################################
   # End report_results

+ 25 - 6
framework_test.py

@@ -159,7 +159,7 @@ class FrameworkTest:
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
         results = self.__parse_test('json')
         self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'])
+          results=results['results'], total_time=results['total_time'], errors=results['errors'])
 
         print "Complete"
     except AttributeError:
@@ -173,7 +173,7 @@ class FrameworkTest:
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
         results = self.__parse_test('db')
         self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'])
+          results=results['results'], total_time=results['total_time'], errors=results['errors'])
 
         print "Complete"
     except AttributeError:
@@ -187,7 +187,7 @@ class FrameworkTest:
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
         results = self.__parse_test('query')
         self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'])
+          results=results['results'], total_time=results['total_time'], errors=results['errors'])
         print "Complete"
     except AttributeError:
       pass
@@ -204,19 +204,19 @@ class FrameworkTest:
     if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
       results = self.__parse_test('json')
       self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'])
+        results=results['results'], total_time=results['total_time'], errors=results['errors'])
     
     # DB
     if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
       results = self.__parse_test('db')
       self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'])
+        results=results['results'], total_time=results['total_time'], errors=results['errors'])
     
     # Query
     if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
       results = self.__parse_test('query')
       self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'])
+        results=results['results'], total_time=results['total_time'], errors=results['errors'])
   ############################################################
   # End parse_all
   ############################################################
@@ -239,6 +239,11 @@ class FrameworkTest:
       results['requests']['stdev'] = 0
       results['requests']['max'] = 0
       results['requests']['stdevPercent'] = 0
+      results['errors'] = dict()
+      results['errors']['connect'] = 0
+      results['errors']['read'] = 0
+      results['errors']['write'] = 0
+      results['errors']['timeout'] = 0
       with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
         is_warmup = False
         for line in raw_data:
@@ -285,6 +290,20 @@ class FrameworkTest:
                   results['total_time'] += float(raw_time[:len(raw_time)-1]) * 60.0
                 elif "h" in raw_time:
                   results['total_time'] += float(raw_time[:len(raw_time)-1]) * 3600.0
+            
+            if "Socket errors" in line:
+              if "connect" in line:
+                m = re.search("connect ([0-9]+)", line)
+                results['errors']['connect'] += int(m.group(1))
+              if "read" in line:
+                m = re.search("read ([0-9]+)", line)
+                results['errors']['read'] += int(m.group(1))
+              if "write" in line:
+                m = re.search("write ([0-9]+)", line)
+                results['errors']['write'] += int(m.group(1))
+              if "timeout" in line:
+                m = re.search("timeout ([0-9]+)", line)
+                results['errors']['timeout'] += int(m.group(1))
 
       return results
     except IOError:

+ 0 - 0
phreeze/__init__.py


Разница между файлами не показана из-за своего большого размера
+ 0 - 0
results/ec2/20130404200504/results.json


Разница между файлами не показана из-за своего большого размера
+ 0 - 0
results/i7/20130403093331/results.json


+ 0 - 0
unfiltered/__init__.py


Некоторые файлы не были показаны из-за большого количества измененных файлов