123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776 |
- from benchmark import FortuneHTMLParser
- import importlib
- import os
- import subprocess
- import time
- import re
- import pprint
- import sys
- import traceback
- import json
- class FrameworkTest:
- ##########################################################################################
- # Class variables
- ##########################################################################################
- headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
- headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
-
- accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
- accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
- accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
- concurrency_template = """
-
- echo ""
- echo "---------------------------------------------------------"
- echo " Running Primer {name}"
- echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
- echo "---------------------------------------------------------"
- echo ""
- {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
- sleep 5
-
- echo ""
- echo "---------------------------------------------------------"
- echo " Running Warmup {name}"
- echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
- echo "---------------------------------------------------------"
- echo ""
- {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
- sleep 5
- for c in {interval}
- do
- echo ""
- echo "---------------------------------------------------------"
- echo " Concurrency: $c for {name}"
- echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
- echo "---------------------------------------------------------"
- echo ""
- {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
- sleep 2
- done
- """
- query_template = """
-
- echo ""
- echo "---------------------------------------------------------"
- echo " Running Primer {name}"
- echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
- echo "---------------------------------------------------------"
- echo ""
- wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
- sleep 5
-
- echo ""
- echo "---------------------------------------------------------"
- echo " Running Warmup {name}"
- echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
- echo "---------------------------------------------------------"
- echo ""
- wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
- sleep 5
- for c in {interval}
- do
- echo ""
- echo "---------------------------------------------------------"
- echo " Queries: $c for {name}"
- echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
- echo "---------------------------------------------------------"
- echo ""
- wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
- sleep 2
- done
- """
- language = None
- platform = None
- webserver = None
- classification = None
- database = None
- approach = None
- orm = None
- framework = None
- os = None
- database_os = None
- display_name = None
- notes = None
- versus = None
- ############################################################
- # Test Variables
- ############################################################
- JSON = "json"
- DB = "db"
- QUERY = "query"
- FORTUNE = "fortune"
- UPDATE = "update"
- PLAINTEXT = "plaintext"
- ##########################################################################################
- # Public Methods
- ##########################################################################################
- ############################################################
- # Validates the jsonString is a JSON object with a 'message'
- # key with the value "hello, world!" (case-insensitive).
- ############################################################
- def validateJson(self, jsonString):
- obj = json.loads(jsonString)
- if not obj:
- return False
- if not obj["message"]:
- return False
- if not obj["message"].lower() == "hello, world!":
- return False
- return True
- ############################################################
- # Validates the jsonString is a JSON object that has an "id"
- # and a "randomNumber" key, and that both keys map to
- # integers.
- ############################################################
- def validateDb(self, jsonString):
- obj = json.loads(jsonString)
- if not obj:
- return False
- if not obj["id"] or type(obj["id"]) != int:
- return False
- if not obj["randomNumber"] or type(obj["randomNumber"]) != int:
- return False
- return True
- ############################################################
- # Validates the jsonString is an array with a length of
- # 2, that each entry in the array is a JSON object, that
- # each object has an "id" and a "randomNumber" key, and that
- # both keys map to integers.
- ############################################################
- def validateQuery(self, jsonString):
- arr = json.loads(jsonString)
- if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
- return False
- if not arr[0]["id"] or type(arr[0]["id"]) != int:
- return False
- if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
- return False
- if not arr[1]["id"] or type(arr[1]["id"]) != int:
- return False
- if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
- return False
- return True
- ############################################################
- #
- ############################################################
- def validateFortune(self, htmlString):
- parser = FortuneHTMLParser()
- parser.feed(htmlString)
- return fortuneValidator.isValidFortune()
- ############################################################
- # Validates the jsonString is an array with a length of
- # 2, that each entry in the array is a JSON object, that
- # each object has an "id" and a "randomNumber" key, and that
- # both keys map to integers.
- ############################################################
- def validateUpdate(self, jsonString):
- arr = json.loads(jsonString)
- if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
- return False
- if not arr[0]["id"] or type(arr[0]["id"]) != int:
- return False
- if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
- return False
- if not arr[1]["id"] or type(arr[1]["id"]) != int:
- return False
- if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
- return False
- return True
- ############################################################
- #
- ############################################################
- def validatePlaintext(self, jsonString):
- return jsonString.lower().strip() == "hello, world!"
- ############################################################
- # start(benchmarker)
- # Start the test using it's setup file
- ############################################################
- def start(self, out, err):
- return self.setup_module.start(self.benchmarker, out, err)
- ############################################################
- # End start
- ############################################################
- ############################################################
- # stop(benchmarker)
- # Stops the test using it's setup file
- ############################################################
- def stop(self, out, err):
- return self.setup_module.stop(out, err)
- ############################################################
- # End stop
- ############################################################
- ############################################################
- # verify_urls
- # Verifys each of the URLs for this test. THis will sinply
- # curl the URL and check for it's return status.
- # For each url, a flag will be set on this object for whether
- # or not it passed
- ############################################################
- def verify_urls(self, out, err):
- # JSON
- if self.runTests[self.JSON]:
- try:
- out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
- out.flush()
- url = self.benchmarker.generate_url(self.json_url, self.port)
- output = self.__curl_url(url, self.JSON, out, err)
- if self.validateJson(output):
- self.json_url_passed = True
- else:
- self.json_url_passed = False
- except (AttributeError, subprocess.CalledProcessError) as e:
- self.json_url_passed = False
- # DB
- if self.runTests[self.DB]:
- try:
- out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
- out.flush()
- url = self.benchmarker.generate_url(self.db_url, self.port)
- output = self.__curl_url(url, self.DB, out, err)
- if self.validateDb(output):
- self.db_url_passed = True
- else:
- self.db_url_passed = False
- except (AttributeError, subprocess.CalledProcessError) as e:
- self.db_url_passed = False
- # Query
- if self.runTests[self.QUERY]:
- try:
- out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
- out.flush()
- url = self.benchmarker.generate_url(self.query_url + "2", self.port)
- output = self.__curl_url(url, self.QUERY, out, err)
- if self.validateQuery(output):
- self.query_url_passed = True
- else:
- self.query_url_passed = False
- except (AttributeError, subprocess.CalledProcessError) as e:
- self.query_url_passed = False
- # Fortune
- if self.runTests[self.FORTUNE]:
- try:
- out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
- out.flush()
- url = self.benchmarker.generate_url(self.fortune_url, self.port)
- output = self.__curl_url(url, self.FORTUNE, out, err)
- if self.validateFortune(output):
- self.fortune_url_passed = True
- else:
- self.fortune_url_passed = False
- except (AttributeError, subprocess.CalledProcessError) as e:
- self.fortune_url_passed = False
- # Update
- if self.runTests[self.UPDATE]:
- try:
- out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
- out.flush()
- url = self.benchmarker.generate_url(self.update_url + "2", self.port)
- output = self.__curl_url(url, self.UPDATE, out, err)
- if self.validateUpdate(output):
- self.update_url_passed = True
- else:
- self.update_url_passed = False
- except (AttributeError, subprocess.CalledProcessError) as e:
- self.update_url_passed = False
- # plaintext
- if self.runTests[self.PLAINTEXT]:
- try:
- out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
- out.flush()
- url = self.benchmarker.generate_url(self.plaintext_url, self.port)
- output = self.__curl_url(url, self.PLAINTEXT, out, err)
- if self.validatePlaintext(output):
- self.plaintext_url_passed = True
- else:
- self.plaintext_url_passed = False
- except (AttributeError, subprocess.CalledProcessError) as e:
- self.plaintext_url_passed = False
- ############################################################
- # End verify_urls
- ############################################################
- ############################################################
- # contains_type(type)
- # true if this test contains an implementation of the given
- # test type (json, db, etc.)
- ############################################################
- def contains_type(self, type):
- try:
- if type == self.JSON and self.json_url != None:
- return True
- if type == self.DB and self.db_url != None:
- return True
- if type == self.QUERY and self.query_url != None:
- return True
- if type == self.FORTUNE and self.fortune_url != None:
- return True
- if type == self.UPDATE and self.update_url != None:
- return True
- if type == self.PLAINTEXT and self.plaintext_url != None:
- return True
- except AttributeError:
- pass
-
- return False
- ############################################################
- # End stop
- ############################################################
- ############################################################
- # benchmark
- # Runs the benchmark for each type of test that it implements
- # JSON/DB/Query.
- ############################################################
- def benchmark(self, out, err):
- # JSON
- if self.runTests[self.JSON]:
- try:
- if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
- out.write("BENCHMARKING JSON ... ")
- out.flush()
- results = None
- if self.json_url_passed:
- remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
- self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
- results = self.__parse_test(self.JSON)
- else:
- results = dict()
- results['results'] = []
- self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], passed=self.json_url_passed)
- out.write( "Complete\n" )
- out.flush()
- except AttributeError:
- pass
- # DB
- if self.runTests[self.DB]:
- try:
- if self.benchmarker.type == "all" or self.benchmarker.type == self.DB:
- out.write("BENCHMARKING DB ... ")
- out.flush()
- results = None
- if self.db_url_passed:
- remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
- self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
- results = self.__parse_test(self.DB)
- else:
- results = dict()
- results['results'] = []
- self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], passed=self.db_url_passed)
- out.write( "Complete\n" )
- except AttributeError:
- pass
- # Query
- if self.runTests[self.QUERY]:
- try:
- if self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY:
- out.write("BENCHMARKING Query ... ")
- out.flush()
- results = None
- if self.query_url_passed:
- remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
- self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
- results = self.__parse_test(self.QUERY)
- else:
- results = dict()
- results['results'] = []
- self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], passed=self.query_url_passed)
- out.write( "Complete\n" )
- out.flush()
- except AttributeError:
- pass
- # fortune
- if self.runTests[self.FORTUNE]:
- try:
- if self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE:
- out.write("BENCHMARKING Fortune ... ")
- out.flush()
- results = None
- if self.fortune_url_passed:
- remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
- self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
- results = self.__parse_test(self.FORTUNE)
- else:
- results = dict()
- results['results'] = []
- self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], passed=self.fortune_url_passed)
- out.write( "Complete\n" )
- out.flush()
- except AttributeError:
- pass
- # update
- if self.runTests[self.UPDATE]:
- try:
- if self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE:
- out.write("BENCHMARKING Update ... ")
- out.flush()
- results = None
- if self.update_url_passed:
- remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
- self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
- results = self.__parse_test(self.UPDATE)
- else:
- results = dict()
- results['results'] = []
- self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], passed=self.update_url_passed)
- out.write( "Complete\n" )
- out.flush()
- except AttributeError:
- pass
- # plaintext
- if self.runTests[self.PLAINTEXT]:
- try:
- if self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT:
- out.write("BENCHMARKING Plaintext ... ")
- out.flush()
- results = None
- if self.plaintext_url_passed:
- remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
- self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
- results = self.__parse_test(self.PLAINTEXT)
- else:
- results = dict()
- results['results'] = []
- self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], passed=self.plaintext_url_passed)
- out.write( "Complete\n" )
- out.flush()
- except AttributeError:
- traceback.print_exc()
- pass
- ############################################################
- # End benchmark
- ############################################################
-
- ############################################################
- # parse_all
- # Method meant to be run for a given timestamp
- ############################################################
- def parse_all(self):
- # JSON
- if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
- results = self.__parse_test(self.JSON)
- self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
-
- # DB
- if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
- results = self.__parse_test(self.DB)
- self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
-
- # Query
- if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
- results = self.__parse_test(self.QUERY)
- self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
- # Fortune
- if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
- results = self.__parse_test(self.FORTUNE)
- self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
- # Update
- if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
- results = self.__parse_test(self.UPDATE)
- self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
- # Plaintext
- if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
- results = self.__parse_test(self.PLAINTEXT)
- self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
- ############################################################
- # End parse_all
- ############################################################
- ############################################################
- # __parse_test(test_type)
- ############################################################
- def __parse_test(self, test_type):
- try:
- results = dict()
- results['results'] = []
-
- with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
- is_warmup = True
- rawData = None
- for line in raw_data:
- if "Queries:" in line or "Concurrency:" in line:
- is_warmup = False
- rawData = None
- continue
- if "Warmup" in line or "Primer" in line:
- is_warmup = True
- continue
- if not is_warmup:
- if rawData == None:
- rawData = dict()
- results['results'].append(rawData)
- #if "Requests/sec:" in line:
- # m = re.search("Requests/sec:\s+([0-9]+)", line)
- # rawData['reportedResults'] = m.group(1)
-
- # search for weighttp data such as succeeded and failed.
- if "Latency" in line:
- m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
- if len(m) == 4:
- rawData['latencyAvg'] = m[0]
- rawData['latencyStdev'] = m[1]
- rawData['latencyMax'] = m[2]
- # rawData['latencyStdevPercent'] = m[3]
-
- #if "Req/Sec" in line:
- # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
- # if len(m) == 4:
- # rawData['requestsAvg'] = m[0]
- # rawData['requestsStdev'] = m[1]
- # rawData['requestsMax'] = m[2]
- # rawData['requestsStdevPercent'] = m[3]
-
- #if "requests in" in line:
- # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
- # if m != None:
- # # parse out the raw time, which may be in minutes or seconds
- # raw_time = m.group(1)
- # if "ms" in raw_time:
- # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
- # elif "s" in raw_time:
- # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
- # elif "m" in raw_time:
- # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
- # elif "h" in raw_time:
- # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
-
- if "requests in" in line:
- m = re.search("([0-9]+) requests in", line)
- if m != None:
- rawData['totalRequests'] = int(m.group(1))
-
- if "Socket errors" in line:
- if "connect" in line:
- m = re.search("connect ([0-9]+)", line)
- rawData['connect'] = int(m.group(1))
- if "read" in line:
- m = re.search("read ([0-9]+)", line)
- rawData['read'] = int(m.group(1))
- if "write" in line:
- m = re.search("write ([0-9]+)", line)
- rawData['write'] = int(m.group(1))
- if "timeout" in line:
- m = re.search("timeout ([0-9]+)", line)
- rawData['timeout'] = int(m.group(1))
-
- if "Non-2xx" in line:
- m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
- if m != None:
- rawData['5xx'] = int(m.group(1))
-
- return results
- except IOError:
- return None
- ############################################################
- # End benchmark
- ############################################################
- ##########################################################################################
- # Private Methods
- ##########################################################################################
- ############################################################
- # __run_benchmark(script, output_file)
- # Runs a single benchmark using the script which is a bash
- # template that uses weighttp to run the test. All the results
- # outputed to the output_file.
- ############################################################
- def __run_benchmark(self, script, output_file, err):
- with open(output_file, 'w') as raw_file:
-
- p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
- p.communicate(script)
- err.flush()
- ############################################################
- # End __run_benchmark
- ############################################################
- ############################################################
- # __generate_concurrency_script(url, port)
- # Generates the string containing the bash script that will
- # be run on the client to benchmark a single test. This
- # specifically works for the variable concurrency tests (JSON
- # and DB)
- ############################################################
- def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
- if len(intervals) == 0:
- intervals = self.benchmarker.concurrency_levels
- headers = self.__get_request_headers(accept_header)
- return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
- max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
- interval=" ".join("{}".format(item) for item in intervals),
- server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
- pipeline=pipeline)
- ############################################################
- # End __generate_concurrency_script
- ############################################################
- ############################################################
- # __generate_query_script(url, port)
- # Generates the string containing the bash script that will
- # be run on the client to benchmark a single test. This
- # specifically works for the variable query tests (Query)
- ############################################################
- def __generate_query_script(self, url, port, accept_header):
- headers = self.__get_request_headers(accept_header)
- return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
- max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
- interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
- server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
- ############################################################
- # End __generate_query_script
- ############################################################
- ############################################################
- # __get_request_headers(accept_header)
- # Generates the complete HTTP header string
- ############################################################
- def __get_request_headers(self, accept_header):
- return self.headers_template.format(accept=accept_header)
- ############################################################
- # End __format_request_headers
- ############################################################
- ############################################################
- # __curl_url
- # Dump HTTP response and headers. Throw exception if there
- # is an HTTP error.
- ############################################################
- def __curl_url(self, url, testType, out, err):
- # Use -i to output response with headers.
- # Don't use -f so that the HTTP response code is ignored.
- # Use --stderr - to redirect stderr to stdout so we get
- # error output for sure in stdout.
- # Use -sS to hide progress bar, but show errors.
- subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
- out.flush()
- err.flush()
- # HTTP output may not end in a newline, so add that here.
- out.write( "\n" )
- out.flush()
- # We need to get the respond body from the curl and return it.
- p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
- output = p.communicate()
- # In the curl invocation above we could not use -f because
- # then the HTTP response would not be output, so use -f in
- # an additional invocation so that if there is an HTTP error,
- # subprocess.CalledProcessError will be thrown. Note that this
- # uses check_output() instead of check_call() so that we can
- # ignore the HTTP response because we already output that in
- # the first curl invocation.
- subprocess.check_output(["curl", "-fsS", url], stderr=err)
- out.flush()
- err.flush()
- # HTTP output may not end in a newline, so add that here.
- out.write( "\n" )
- out.flush()
- if output:
- # We have the response body - return it
- return output[0]
- ##############################################################
- # End __curl_url
- ##############################################################
- ##########################################################################################
- # Constructor
- ##########################################################################################
- def __init__(self, name, directory, benchmarker, runTests, args):
- self.name = name
- self.directory = directory
- self.benchmarker = benchmarker
- self.runTests = runTests
- self.__dict__.update(args)
- # ensure directory has __init__.py file so that we can use it as a Python package
- if not os.path.exists(os.path.join(directory, "__init__.py")):
- open(os.path.join(directory, "__init__.py"), 'w').close()
- self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
- ############################################################
- # End __init__
- ############################################################
- ############################################################
- # End FrameworkTest
- ############################################################
- ##########################################################################################
- # Static methods
- ##########################################################################################
- ##############################################################
- # parse_config(config, directory, benchmarker)
- # parses a config file and returns a list of FrameworkTest
- # objects based on that config file.
- ##############################################################
- def parse_config(config, directory, benchmarker):
- tests = []
- # The config object can specify multiple tests, we neep to loop
- # over them and parse them out
- for test in config['tests']:
- for key, value in test.iteritems():
- test_name = config['framework']
-
- runTests = dict()
- runTests["json"] = True if value.get("json_url", False) else False
- runTests["db"] = True if value.get("db_url", False) else False
- runTests["query"] = True if value.get("query_url", False) else False
- runTests["fortune"] = True if value.get("fortune_url", False) else False
- runTests["update"] = True if value.get("update_url", False) else False
- runTests["plaintext"] = True if value.get("plaintext_url", False) else False
- # if the test uses the 'defualt' keywork, then we don't
- # append anything to it's name. All configs should only have 1 default
- if key != 'default':
- # we need to use the key in the test_name
- test_name = test_name + "-" + key
- tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
- return tests
- ##############################################################
- # End parse_config
- ##############################################################
|