Browse Source

Replace concurrency and query arguments with single list argument

Hamilton Turner 10 years ago
parent
commit
b8ec3ef621
3 changed files with 25 additions and 39 deletions
  1. 1 20
      toolset/benchmark/benchmarker.py
  2. 21 14
      toolset/benchmark/framework_test.py
  3. 3 5
      toolset/run-tests.py

+ 1 - 20
toolset/benchmark/benchmarker.py

@@ -868,25 +868,6 @@ class Benchmarker:
       self.timestamp = self.parse
       self.timestamp = self.parse
     else:
     else:
       self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
       self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
-
-    # Setup the concurrency levels array. This array goes from
-    # starting_concurrency to max concurrency, doubling each time
-    self.concurrency_levels = []
-    concurrency = self.starting_concurrency
-    while concurrency <= self.max_concurrency:
-      self.concurrency_levels.append(concurrency)
-      concurrency = concurrency * 2
-
-    # Setup query interval array
-    # starts at 1, and goes up to max_queries, using the query_interval
-    self.query_intervals = []
-    queries = 1
-    while queries <= self.max_queries:
-      self.query_intervals.append(queries)
-      if queries == 1:
-        queries = 0
-
-      queries = queries + self.query_interval
     
     
     # Load the latest data
     # Load the latest data
     #self.latest = None
     #self.latest = None
@@ -920,7 +901,7 @@ class Benchmarker:
       self.results = dict()
       self.results = dict()
       self.results['name'] = self.name
       self.results['name'] = self.name
       self.results['concurrencyLevels'] = self.concurrency_levels
       self.results['concurrencyLevels'] = self.concurrency_levels
-      self.results['queryIntervals'] = self.query_intervals
+      self.results['queryIntervals'] = self.query_levels
       self.results['frameworks'] = [t.name for t in self.__gather_tests]
       self.results['frameworks'] = [t.name for t in self.__gather_tests]
       self.results['duration'] = self.duration
       self.results['duration'] = self.duration
       self.results['rawData'] = dict()
       self.results['rawData'] = dict()

+ 21 - 14
toolset/benchmark/framework_test.py

@@ -57,7 +57,7 @@ class FrameworkTest:
     echo ""
     echo ""
     ntpdate -s pool.ntp.org
     ntpdate -s pool.ntp.org
 
 
-    for c in {interval}
+    for c in {levels}
     do
     do
       echo ""
       echo ""
       echo "---------------------------------------------------------"
       echo "---------------------------------------------------------"
@@ -100,7 +100,7 @@ class FrameworkTest:
     echo ""
     echo ""
     ntpdate -s pool.ntp.org
     ntpdate -s pool.ntp.org
 
 
-    for c in {interval}
+    for c in {levels}
     do
     do
       echo ""
       echo ""
       echo "---------------------------------------------------------"
       echo "---------------------------------------------------------"
@@ -891,7 +891,7 @@ class FrameworkTest:
             # Simply opening the file in write mode should create the empty file.
             # Simply opening the file in write mode should create the empty file.
             pass
             pass
         if self.plaintext_url_passed:
         if self.plaintext_url_passed:
-          remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
+          remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, levels=[256,1024,4096,16384], pipeline="16")
           self.__begin_logging(self.PLAINTEXT)
           self.__begin_logging(self.PLAINTEXT)
           self.__run_benchmark(remote_script, output_file, err)
           self.__run_benchmark(remote_script, output_file, err)
           self.__end_logging()
           self.__end_logging()
@@ -1078,14 +1078,18 @@ class FrameworkTest:
   # specifically works for the variable concurrency tests (JSON
   # specifically works for the variable concurrency tests (JSON
   # and DB)
   # and DB)
   ############################################################
   ############################################################
-  def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
-    if len(intervals) == 0:
-      intervals = self.benchmarker.concurrency_levels
+  def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", levels=[], pipeline=""):
+    if len(levels) == 0:
+      levels = self.benchmarker.concurrency_levels
     headers = self.__get_request_headers(accept_header)
     headers = self.__get_request_headers(accept_header)
-    return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency, 
-      max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration, 
-      interval=" ".join("{}".format(item) for item in intervals), 
-      server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
+    return self.concurrency_template.format(
+      max_concurrency=max(self.benchmarker.concurrency_levels), 
+      max_threads=self.benchmarker.threads, 
+      name=self.name, 
+      duration=self.benchmarker.duration, 
+      levels=" ".join(levels), 
+      server_host=self.benchmarker.server_host, 
+      port=port, url=url, headers=headers, wrk=wrk_command,
       pipeline=pipeline)
       pipeline=pipeline)
   ############################################################
   ############################################################
   # End __generate_concurrency_script
   # End __generate_concurrency_script
@@ -1099,10 +1103,13 @@ class FrameworkTest:
   ############################################################
   ############################################################
   def __generate_query_script(self, url, port, accept_header):
   def __generate_query_script(self, url, port, accept_header):
     headers = self.__get_request_headers(accept_header)
     headers = self.__get_request_headers(accept_header)
-    return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency, 
-      max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration, 
-      interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals), 
-      server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
+    return self.query_template.format(name=self.name, 
+      max_concurrency=max(self.benchmarker.concurrency_levels), 
+      max_threads=self.benchmarker.threads, 
+      duration=self.benchmarker.duration, 
+      levels=" ".join(self.benchmarker.query_levels), 
+      server_host=self.benchmarker.server_host, 
+      port=port, url=url, headers=headers)
   ############################################################
   ############################################################
   # End __generate_query_script
   # End __generate_query_script
   ############################################################
   ############################################################

+ 3 - 5
toolset/run-tests.py

@@ -154,12 +154,10 @@ def main(argv=None):
     parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
     parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
 
 
     # Benchmark options
     # Benchmark options
-    parser.add_argument('--max-concurrency', default=256, help='the maximum number of HTTP connections that wrk will keep open. The query tests will run at this maximum', type=int)
-    parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
-    parser.add_argument('--query-interval', default=5, type=int, help='Query tests will go from 1 query to max queries in increments of interval queries')
-    parser.add_argument('--max-threads', default=maxThreads, help='The max number of threads to run wrk at. This should be set to the number of cores for your client system.', type=int)
+    parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
+    parser.add_argument('--query-levels', default=[5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction) 
+    parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
     parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
     parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
-    parser.add_argument('--starting-concurrency', default=8, type=int)
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
 
 
     # Misc Options
     # Misc Options