Browse Source

Merge branch 'master' of gitlab.techempower.com:techempower/frameworkbenchmarks

Patrick Falls 12 years ago
parent
commit
db72217cdd

+ 1 - 14
benchmarker.py

@@ -175,18 +175,11 @@ class Benchmarker:
   ############################################################
   ############################################################
   # report_results
   # report_results
   ############################################################
   ############################################################
-  def report_results(self, framework, test, results, latency, requests, total_time, errors, total_requests):
+  def report_results(self, framework, test, results):
     if test not in self.results['rawData'].keys():
     if test not in self.results['rawData'].keys():
       self.results['rawData'][test] = dict()
       self.results['rawData'][test] = dict()
-      self.results['weighttpData'][test] = dict()
 
 
     self.results['rawData'][test][framework.sort] = results
     self.results['rawData'][test][framework.sort] = results
-    self.results['weighttpData'][test][framework.sort] = dict()
-    self.results['weighttpData'][test][framework.sort]['latency'] = latency
-    self.results['weighttpData'][test][framework.sort]['requests'] = requests
-    self.results['weighttpData'][test][framework.sort]['totalTime'] = total_time
-    self.results['weighttpData'][test][framework.sort]['errors'] = errors
-    self.results['weighttpData'][test][framework.sort]['totalRequests'] = total_requests
 
 
   ############################################################
   ############################################################
   # End report_results
   # End report_results
@@ -516,12 +509,6 @@ class Benchmarker:
       self.results['rawData']['query'] = dict()
       self.results['rawData']['query'] = dict()
       self.results['rawData']['fortune'] = dict()
       self.results['rawData']['fortune'] = dict()
       self.results['rawData']['update'] = dict()
       self.results['rawData']['update'] = dict()
-      self.results['weighttpData'] = dict()
-      self.results['weighttpData']['json'] = dict()
-      self.results['weighttpData']['db'] = dict()
-      self.results['weighttpData']['query'] = dict()
-      self.results['weighttpData']['fortune'] = dict()
-      self.results['weighttpData']['update'] = dict()
     else:
     else:
       #for x in self.__gather_tests():
       #for x in self.__gather_tests():
       #  if x.name not in self.results['frameworks']:
       #  if x.name not in self.results['frameworks']:

+ 17 - 0
bottle/app.py

@@ -68,5 +68,22 @@ def get_random_world_single_raw():
   connection.close()
   connection.close()
   return ujson.dumps(worlds)
   return ujson.dumps(worlds)
 
 
[email protected]("/update")
+def update_worlds(db):
+  num_queries = int(request.query.queries or '1')
+  if num_queries < 1:
+    num_queries = 1
+  else if num_queries > 500:
+    num_queries = 500
+
+  worlds = []
+  for i in range(num_queries):
+    wid = randint(1, 10000)
+    world = db.query(World).get(wid)
+    world.randomNumber = randint(1, 10000)
+    db.commit()
+    worlds.append(world.serialize)
+  return ujson.dumps(worlds)
+
 if __name__ == "__main__":
 if __name__ == "__main__":
     app.run()
     app.run()

+ 1 - 0
bottle/benchmark_config

@@ -6,6 +6,7 @@
       "json_url": "/json",
       "json_url": "/json",
       "db_url": "/dbs",
       "db_url": "/dbs",
       "query_url": "/db?queries=",
       "query_url": "/db?queries=",
+      "update_url": "/update?queries=",
       "port": 8080,
       "port": 8080,
       "sort": 88
       "sort": 88
     },
     },

+ 51 - 72
framework_test.py

@@ -203,8 +203,7 @@ class FrameworkTest:
         remote_script = self.__generate_concurrency_script(self.json_url, self.port)
         remote_script = self.__generate_concurrency_script(self.json_url, self.port)
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
         results = self.__parse_test('json')
         results = self.__parse_test('json')
-        self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+        self.benchmarker.report_results(framework=self, test="json", results=results['results'])
 
 
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
@@ -217,8 +216,7 @@ class FrameworkTest:
         remote_script = self.__generate_concurrency_script(self.db_url, self.port)
         remote_script = self.__generate_concurrency_script(self.db_url, self.port)
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
         results = self.__parse_test('db')
         results = self.__parse_test('db')
-        self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+        self.benchmarker.report_results(framework=self, test="db", results=results['results'])
 
 
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
@@ -231,8 +229,7 @@ class FrameworkTest:
         remote_script = self.__generate_query_script(self.query_url, self.port)
         remote_script = self.__generate_query_script(self.query_url, self.port)
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
         results = self.__parse_test('query')
         results = self.__parse_test('query')
-        self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+        self.benchmarker.report_results(framework=self, test="query", results=results['results'])
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
       pass
       pass
@@ -244,8 +241,7 @@ class FrameworkTest:
         remote_script = self.__generate_concurrency_script(self.fortune_url, self.port)
         remote_script = self.__generate_concurrency_script(self.fortune_url, self.port)
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'fortune'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'fortune'))
         results = self.__parse_test('fortune')
         results = self.__parse_test('fortune')
-        self.benchmarker.report_results(framework=self, test="fortune", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+        self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
       pass
       pass
@@ -257,8 +253,7 @@ class FrameworkTest:
         remote_script = self.__generate_query_script(self.update_url, self.port)
         remote_script = self.__generate_query_script(self.update_url, self.port)
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'update'))
         self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'update'))
         results = self.__parse_test('update')
         results = self.__parse_test('update')
-        self.benchmarker.report_results(framework=self, test="update", requests=results['requests'], latency=results['latency'],
-          results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+        self.benchmarker.report_results(framework=self, test="update", results=results['results'])
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
       pass
       pass
@@ -274,32 +269,27 @@ class FrameworkTest:
     # JSON
     # JSON
     if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
       results = self.__parse_test('json')
       results = self.__parse_test('json')
-      self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+      self.benchmarker.report_results(framework=self, test="json", results=results['results'])
     
     
     # DB
     # DB
     if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
       results = self.__parse_test('db')
       results = self.__parse_test('db')
-      self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+      self.benchmarker.report_results(framework=self, test="db", results=results['results'])
     
     
     # Query
     # Query
     if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
       results = self.__parse_test('query')
       results = self.__parse_test('query')
-      self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+      self.benchmarker.report_results(framework=self, test="query", results=results['results'])
 
 
     # Fortune
     # Fortune
     if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
       results = self.__parse_test('fortune')
       results = self.__parse_test('fortune')
-      self.benchmarker.report_results(framework=self, test="fortune", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+      self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
 
 
     # Update
     # Update
     if os.path.exists(self.benchmarker.output_file(self.name, 'update')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'update')):
       results = self.__parse_test('update')
       results = self.__parse_test('update')
-      self.benchmarker.report_results(framework=self, test="update", requests=results['requests'], latency=results['latency'],
-        results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+      self.benchmarker.report_results(framework=self, test="update", results=results['results'])
   ############################################################
   ############################################################
   # End parse_all
   # End parse_all
   ############################################################
   ############################################################
@@ -311,94 +301,83 @@ class FrameworkTest:
     try:
     try:
       results = dict()
       results = dict()
       results['results'] = []
       results['results'] = []
-      results['total_time'] = 0
-      results['totalRequests'] = 0
-      results['latency'] = dict()
-      results['latency']['avg'] = 0
-      results['latency']['stdev'] = 0
-      results['latency']['max'] = 0
-      results['latency']['stdevPercent'] = 0
-      results['requests'] = dict()
-      results['requests']['avg'] = 0
-      results['requests']['stdev'] = 0
-      results['requests']['max'] = 0
-      results['requests']['stdevPercent'] = 0
-      results['errors'] = dict()
-      results['errors']['connect'] = 0
-      results['errors']['read'] = 0
-      results['errors']['write'] = 0
-      results['errors']['timeout'] = 0
-      results['errors']['5xx'] = 0
+      
       with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
       with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
-        is_warmup = False
+        is_warmup = True
+        rawData = None
         for line in raw_data:
         for line in raw_data:
 
 
           if "Queries:" in line or "Concurrency:" in line:
           if "Queries:" in line or "Concurrency:" in line:
             is_warmup = False
             is_warmup = False
+            rawData = None
             continue
             continue
           if "Warmup" in line or "Primer" in line:
           if "Warmup" in line or "Primer" in line:
             is_warmup = True
             is_warmup = True
             continue
             continue
 
 
           if not is_warmup:
           if not is_warmup:
-            if "Requests/sec:" in line:
-              m = re.search("Requests/sec:\s+([0-9]+)", line)
-              results['results'].append(m.group(1))
+            if rawData == None:
+              rawData = dict()
+              results['results'].append(rawData)
+
+            #if "Requests/sec:" in line:
+            #  m = re.search("Requests/sec:\s+([0-9]+)", line)
+            #  rawData['reportedResults'] = m.group(1)
               
               
             # search for weighttp data such as succeeded and failed.
             # search for weighttp data such as succeeded and failed.
             if "Latency" in line:
             if "Latency" in line:
               m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
               m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
               if len(m) == 4:
               if len(m) == 4:
-                results['latency']['avg'] = m[0]
-                results['latency']['stdev'] = m[1]
-                results['latency']['max'] = m[2]
-                results['latency']['stdevPercent'] = m[3]
+                rawData['latencyAvg'] = m[0]
+                rawData['latencyStdev'] = m[1]
+                rawData['latencyMax'] = m[2]
+            #    rawData['latencyStdevPercent'] = m[3]
             
             
-            if "Req/Sec" in line:
-              m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
-              if len(m) == 4:
-                results['requests']['avg'] = m[0]
-                results['requests']['stdev'] = m[1]
-                results['requests']['max'] = m[2]
-                results['requests']['stdevPercent'] = m[3]
+            #if "Req/Sec" in line:
+            #  m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
+            #  if len(m) == 4:
+            #    rawData['requestsAvg'] = m[0]
+            #    rawData['requestsStdev'] = m[1]
+            #    rawData['requestsMax'] = m[2]
+            #    rawData['requestsStdevPercent'] = m[3]
               
               
-            if "requests in" in line:
-              m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
-              if m != None: 
-                # parse out the raw time, which may be in minutes or seconds
-                raw_time = m.group(1)
-                if "ms" in raw_time:
-                  results['total_time'] += float(raw_time[:len(raw_time)-2]) / 1000.0
-                elif "s" in raw_time:
-                  results['total_time'] += float(raw_time[:len(raw_time)-1])
-                elif "m" in raw_time:
-                  results['total_time'] += float(raw_time[:len(raw_time)-1]) * 60.0
-                elif "h" in raw_time:
-                  results['total_time'] += float(raw_time[:len(raw_time)-1]) * 3600.0
+            #if "requests in" in line:
+            #  m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
+            #  if m != None: 
+            #    # parse out the raw time, which may be in minutes or seconds
+            #    raw_time = m.group(1)
+            #    if "ms" in raw_time:
+            #      rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
+            #    elif "s" in raw_time:
+            #      rawData['total_time'] = float(raw_time[:len(raw_time)-1])
+            #    elif "m" in raw_time:
+            #      rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
+            #    elif "h" in raw_time:
+            #      rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
            
            
             if "requests in" in line:
             if "requests in" in line:
               m = re.search("([0-9]+) requests in", line)
               m = re.search("([0-9]+) requests in", line)
               if m != None: 
               if m != None: 
-                results['totalRequests'] += int(m.group(1))
+                rawData['totalRequests'] = int(m.group(1))
             
             
             if "Socket errors" in line:
             if "Socket errors" in line:
               if "connect" in line:
               if "connect" in line:
                 m = re.search("connect ([0-9]+)", line)
                 m = re.search("connect ([0-9]+)", line)
-                results['errors']['connect'] += int(m.group(1))
+                rawData['connect'] = int(m.group(1))
               if "read" in line:
               if "read" in line:
                 m = re.search("read ([0-9]+)", line)
                 m = re.search("read ([0-9]+)", line)
-                results['errors']['read'] += int(m.group(1))
+                rawData['read'] = int(m.group(1))
               if "write" in line:
               if "write" in line:
                 m = re.search("write ([0-9]+)", line)
                 m = re.search("write ([0-9]+)", line)
-                results['errors']['write'] += int(m.group(1))
+                rawData['write'] = int(m.group(1))
               if "timeout" in line:
               if "timeout" in line:
                 m = re.search("timeout ([0-9]+)", line)
                 m = re.search("timeout ([0-9]+)", line)
-                results['errors']['timeout'] += int(m.group(1))
+                rawData['timeout'] = int(m.group(1))
             
             
             if "Non-2xx" in line:
             if "Non-2xx" in line:
               m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
               m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
               if m != None: 
               if m != None: 
-                results['errors']['5xx'] += int(m.group(1))
+                rawData['5xx'] = int(m.group(1))
               
               
 
 
       return results
       return results

File diff suppressed because it is too large
+ 0 - 0
results/ec2/20130415193456/results.json


File diff suppressed because it is too large
+ 0 - 0
results/ec2/20130430223518/results.json


File diff suppressed because it is too large
+ 0 - 0
results/i7/20130415095717/results.json


File diff suppressed because it is too large
+ 0 - 0
results/i7/20130430144443/results.json


Some files were not shown because too many files changed in this diff