Browse Source

the run script now understands the new update test. and initial implementation for gemini

Patrick Falls 12 years ago
parent
commit
d3194f9dcf

+ 2 - 0
benchmarker.py

@@ -515,11 +515,13 @@ class Benchmarker:
       self.results['rawData']['db'] = dict()
       self.results['rawData']['db'] = dict()
       self.results['rawData']['query'] = dict()
       self.results['rawData']['query'] = dict()
       self.results['rawData']['fortune'] = dict()
       self.results['rawData']['fortune'] = dict()
+      self.results['rawData']['update'] = dict()
       self.results['weighttpData'] = dict()
       self.results['weighttpData'] = dict()
       self.results['weighttpData']['json'] = dict()
       self.results['weighttpData']['json'] = dict()
       self.results['weighttpData']['db'] = dict()
       self.results['weighttpData']['db'] = dict()
       self.results['weighttpData']['query'] = dict()
       self.results['weighttpData']['query'] = dict()
       self.results['weighttpData']['fortune'] = dict()
       self.results['weighttpData']['fortune'] = dict()
+      self.results['weighttpData']['update'] = dict()
     else:
     else:
       #for x in self.__gather_tests():
       #for x in self.__gather_tests():
       #  if x.name not in self.results['frameworks']:
       #  if x.name not in self.results['frameworks']:

+ 32 - 1
framework_test.py

@@ -151,6 +151,16 @@ class FrameworkTest:
       self.fortune_url_passed = True
       self.fortune_url_passed = True
     except (AttributeError, subprocess.CalledProcessError) as e:
     except (AttributeError, subprocess.CalledProcessError) as e:
       self.fortune_url_passed = False
       self.fortune_url_passed = False
+
+    # Update
+    try:
+      print "VERIFYING Update (" + self.update_url + ") ..."
+      url = self.benchmarker.generate_url(self.update_url, self.port)
+      subprocess.check_call(["curl", "-f", url])
+      print ""
+      self.update_url_passed = True
+    except (AttributeError, subprocess.CalledProcessError) as e:
+      self.update_url_passed = False
   ############################################################
   ############################################################
   # End verify_urls
   # End verify_urls
   ############################################################
   ############################################################
@@ -170,6 +180,8 @@ class FrameworkTest:
         return True
         return True
       if type == 'fortune' and self.fortune_url != None:
       if type == 'fortune' and self.fortune_url != None:
         return True
         return True
+      if type == 'update' and self.update_url != None:
+        return True
     except AttributeError:
     except AttributeError:
       pass
       pass
       
       
@@ -237,6 +249,19 @@ class FrameworkTest:
         print "Complete"
         print "Complete"
     except AttributeError:
     except AttributeError:
       pass
       pass
+
+    # update
+    try:
+      if self.update_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "update"):
+        sys.stdout.write("BENCHMARKING Update ... ") 
+        remote_script = self.__generate_query_script(self.update_url, self.port)
+        self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'update'))
+        results = self.__parse_test('update')
+        self.benchmarker.report_results(framework=self, test="update", requests=results['requests'], latency=results['latency'],
+          results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+        print "Complete"
+    except AttributeError:
+      pass
   ############################################################
   ############################################################
   # End benchmark
   # End benchmark
   ############################################################
   ############################################################
@@ -264,11 +289,17 @@ class FrameworkTest:
       self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
       self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
         results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
         results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
 
 
-    # Query
+    # Fortune
     if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
     if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
       results = self.__parse_test('fortune')
       results = self.__parse_test('fortune')
       self.benchmarker.report_results(framework=self, test="fortune", requests=results['requests'], latency=results['latency'],
       self.benchmarker.report_results(framework=self, test="fortune", requests=results['requests'], latency=results['latency'],
         results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
         results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
+
+    # Update
+    if os.path.exists(self.benchmarker.output_file(self.name, 'update')):
+      results = self.__parse_test('update')
+      self.benchmarker.report_results(framework=self, test="update", requests=results['requests'], latency=results['latency'],
+        results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
   ############################################################
   ############################################################
   # End parse_all
   # End parse_all
   ############################################################
   ############################################################

+ 23 - 0
gemini/Source/hello/home/handler/HelloHandler.java

@@ -73,4 +73,27 @@ public class HelloHandler
     return mustache("fortunes", fortunes);
     return mustache("fortunes", fortunes);
   }
   }
 
 
+  /**
+   * Return a list of World objects as JSON, selected randomly from the World
+   * table.  For each row that is retrieved, that row will have it's randomNumber
+   * field updated and persisted. For consistency, we have assumed the table has 10,000 rows.
+   */
+  @PathSegment
+  public boolean update()
+  {
+    final Random random = ThreadLocalRandom.current();
+    final int queries = context().getInt("queries", 1, 1, 500);
+    final World[] worlds = new World[queries];
+
+    for (int i = 0; i < queries; i++)
+    {
+      worlds[i] = store.get(World.class, random.nextInt(DB_ROWS) + 1);
+      worlds[i].setRandomNumber(random.nextInt(DB_ROWS) + 1);
+    }
+
+    store.putAll(Arrays.asList(worlds));
+    
+    return json(worlds);
+  }
+
 }
 }

+ 1 - 0
gemini/benchmark_config

@@ -7,6 +7,7 @@
       "db_url": "/db",
       "db_url": "/db",
       "query_url": "/db?queries=",
       "query_url": "/db?queries=",
       "fortune_url": "/fortunes",
       "fortune_url": "/fortunes",
+      "update_url": "/update?queries=",
       "port": 8080,
       "port": 8080,
       "sort": 0
       "sort": 0
     }
     }

+ 1 - 1
run-tests.py

@@ -19,7 +19,7 @@ parser.add_argument('-p', dest='password_prompt', action='store_true')
 parser.add_argument('--install-software', action='store_true', help='runs the installation script before running the rest of the commands')
 parser.add_argument('--install-software', action='store_true', help='runs the installation script before running the rest of the commands')
 parser.add_argument('--test', nargs='+', help='names of tests to run')
 parser.add_argument('--test', nargs='+', help='names of tests to run')
 parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
 parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
-parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune'], default='all', help='which type of test to run')
+parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update'], default='all', help='which type of test to run')
 parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
 parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
 parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
 parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
 parser.add_argument('--next-sort', action='store_true', default=False, help='displays the next value that can be used as a sort value')
 parser.add_argument('--next-sort', action='store_true', default=False, help='displays the next value that can be used as a sort value')