Procházet zdrojové kódy

Results file now parses out any errors reported by wrk. The previous result files have been updated to include these reported errors

Patrick Falls před 12 roky
rodič
revize
310c763ce9

+ 2 - 1
benchmarker.py

@@ -175,7 +175,7 @@ class Benchmarker:
   ############################################################
   ############################################################
   # report_results
   # report_results
   ############################################################
   ############################################################
-  def report_results(self, framework, test, results, latency, requests, total_time):
+  def report_results(self, framework, test, results, latency, requests, total_time, errors):
     # Try to get the id in the result array if it exists.
     # Try to get the id in the result array if it exists.
     try:
     try:
       framework_id = str(self.results['frameworks'].index(framework.name))
       framework_id = str(self.results['frameworks'].index(framework.name))
@@ -188,6 +188,7 @@ class Benchmarker:
     self.results['weighttpData'][test][framework_id]['latency'] = latency
     self.results['weighttpData'][test][framework_id]['latency'] = latency
     self.results['weighttpData'][test][framework_id]['requests'] = requests
     self.results['weighttpData'][test][framework_id]['requests'] = requests
     self.results['weighttpData'][test][framework_id]['totalTime'] = total_time
     self.results['weighttpData'][test][framework_id]['totalTime'] = total_time
+    self.results['weighttpData'][test][framework_id]['errors'] = errors
 
 
   ############################################################
   ############################################################
   # End report_results
   # End report_results

+ 25 - 6
framework_test.py

@@ -159,7 +159,7 @@ class FrameworkTest:
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
         results = self.__parse_test('json')
         results = self.__parse_test('json')
         self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
         self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'])
+          results=results['results'], total_time=results['total_time'], errors=results['errors'])
 
 
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
@@ -173,7 +173,7 @@ class FrameworkTest:
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
         results = self.__parse_test('db')
         results = self.__parse_test('db')
         self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
         self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'])
+          results=results['results'], total_time=results['total_time'], errors=results['errors'])
 
 
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
@@ -187,7 +187,7 @@ class FrameworkTest:
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
         results = self.__parse_test('query')
         results = self.__parse_test('query')
         self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
         self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'])
+          results=results['results'], total_time=results['total_time'], errors=results['errors'])
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
       pass
       pass
@@ -204,19 +204,19 @@ class FrameworkTest:
     if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
       results = self.__parse_test('json')
       results = self.__parse_test('json')
       self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
       self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'])
+        results=results['results'], total_time=results['total_time'], errors=results['errors'])
     
     
     # DB
     # DB
     if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
       results = self.__parse_test('db')
       results = self.__parse_test('db')
       self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
       self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'])
+        results=results['results'], total_time=results['total_time'], errors=results['errors'])
     
     
     # Query
     # Query
     if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
       results = self.__parse_test('query')
       results = self.__parse_test('query')
       self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
       self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'])
+        results=results['results'], total_time=results['total_time'], errors=results['errors'])
   ############################################################
   ############################################################
   # End parse_all
   # End parse_all
   ############################################################
   ############################################################
@@ -239,6 +239,11 @@ class FrameworkTest:
       results['requests']['stdev'] = 0
       results['requests']['stdev'] = 0
       results['requests']['max'] = 0
       results['requests']['max'] = 0
       results['requests']['stdevPercent'] = 0
       results['requests']['stdevPercent'] = 0
+      results['errors'] = dict()
+      results['errors']['connect'] = 0
+      results['errors']['read'] = 0
+      results['errors']['write'] = 0
+      results['errors']['timeout'] = 0
       with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
       with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
         is_warmup = False
         is_warmup = False
         for line in raw_data:
         for line in raw_data:
@@ -285,6 +290,20 @@ class FrameworkTest:
                   results['total_time'] += float(raw_time[:len(raw_time)-1]) * 60.0
                   results['total_time'] += float(raw_time[:len(raw_time)-1]) * 60.0
                 elif "h" in raw_time:
                 elif "h" in raw_time:
                   results['total_time'] += float(raw_time[:len(raw_time)-1]) * 3600.0
                   results['total_time'] += float(raw_time[:len(raw_time)-1]) * 3600.0
+            
+            if "Socket errors" in line:
+              if "connect" in line:
+                m = re.search("connect ([0-9]+)", line)
+                results['errors']['connect'] += int(m.group(1))
+              if "read" in line:
+                m = re.search("read ([0-9]+)", line)
+                results['errors']['read'] += int(m.group(1))
+              if "write" in line:
+                m = re.search("write ([0-9]+)", line)
+                results['errors']['write'] += int(m.group(1))
+              if "timeout" in line:
+                m = re.search("timeout ([0-9]+)", line)
+                results['errors']['timeout'] += int(m.group(1))
 
 
       return results
       return results
     except IOError:
     except IOError:

+ 0 - 0
phreeze/__init__.py


Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 0 - 0
results/ec2/20130404200504/results.json


Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 0 - 0
results/i7/20130403093331/results.json


+ 0 - 0
unfiltered/__init__.py


Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů