framework_test.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. import importlib
  2. import os
  3. import subprocess
  4. import time
  5. import re
  6. import pprint
  7. import sys
  8. import traceback
  9. import json
  10. class FrameworkTest:
  11. ##########################################################################################
  12. # Class variables
  13. ##########################################################################################
  14. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  15. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  16. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  17. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  18. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. concurrency_template = """
  20. echo ""
  21. echo "---------------------------------------------------------"
  22. echo " Running Primer {name}"
  23. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  24. echo "---------------------------------------------------------"
  25. echo ""
  26. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  27. sleep 5
  28. echo ""
  29. echo "---------------------------------------------------------"
  30. echo " Running Warmup {name}"
  31. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  32. echo "---------------------------------------------------------"
  33. echo ""
  34. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  35. sleep 5
  36. for c in {interval}
  37. do
  38. echo ""
  39. echo "---------------------------------------------------------"
  40. echo " Concurrency: $c for {name}"
  41. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  42. echo "---------------------------------------------------------"
  43. echo ""
  44. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  45. sleep 2
  46. done
  47. """
  48. query_template = """
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Running Primer {name}"
  52. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  56. sleep 5
  57. echo ""
  58. echo "---------------------------------------------------------"
  59. echo " Running Warmup {name}"
  60. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  61. echo "---------------------------------------------------------"
  62. echo ""
  63. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  64. sleep 5
  65. for c in {interval}
  66. do
  67. echo ""
  68. echo "---------------------------------------------------------"
  69. echo " Queries: $c for {name}"
  70. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  71. echo "---------------------------------------------------------"
  72. echo ""
  73. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  74. sleep 2
  75. done
  76. """
  77. language = None
  78. platform = None
  79. webserver = None
  80. classification = None
  81. database = None
  82. approach = None
  83. orm = None
  84. framework = None
  85. os = None
  86. database_os = None
  87. display_name = None
  88. notes = None
  89. versus = None
  90. ############################################################
  91. # Test Variables
  92. ############################################################
  93. JSON = "json"
  94. DB = "db"
  95. QUERY = "query"
  96. FORTUNE = "fortune"
  97. UPDATE = "update"
  98. PLAINTEXT = "plaintext"
  99. ##########################################################################################
  100. # Public Methods
  101. ##########################################################################################
  102. ############################################################
  103. # Validates the jsonString is a JSON object with a 'message'
  104. # key with the value "hello, world!" (case-insensitive).
  105. ############################################################
  106. def validateJson(self, jsonString):
  107. obj = json.loads(jsonString)
  108. if not obj:
  109. return False
  110. if not obj["message"]:
  111. return False
  112. if not obj["message"].lower() == "hello, world!":
  113. return False
  114. return True
  115. ############################################################
  116. # Validates the jsonString is an array with a length of
  117. # 2, that each entry in the array is a JSON object, that
  118. # each object has an "id" and a "randomNumber" key, and that
  119. # both keys map to integers.
  120. ############################################################
  121. def validateDb(self, jsonString):
  122. arr = json.loads(jsonString)
  123. if not arr:
  124. return False
  125. if not len(arr) == 2:
  126. return False
  127. if type(arr[0]) != dict:
  128. return False
  129. if type(arr[1]) != dict:
  130. return False
  131. if not arr[0]["id"]:
  132. return False
  133. if not arr[0]["randomNumber"]:
  134. return False
  135. if type(arr[0]["id"]) != int:
  136. return False
  137. if type(arr[0]["randomNumber"]) != int:
  138. return False
  139. if not arr[1]["id"]:
  140. return False
  141. if not arr[1]["randomNumber"]:
  142. return False
  143. if type(arr[1]["id"]) != int:
  144. return False
  145. if type(arr[1]["randomNumber"]) != int:
  146. return False
  147. return True
  148. ############################################################
  149. # start(benchmarker)
  150. # Start the test using it's setup file
  151. ############################################################
  152. def start(self, out, err):
  153. return self.setup_module.start(self.benchmarker, out, err)
  154. ############################################################
  155. # End start
  156. ############################################################
  157. ############################################################
  158. # stop(benchmarker)
  159. # Stops the test using it's setup file
  160. ############################################################
  161. def stop(self, out, err):
  162. return self.setup_module.stop(out, err)
  163. ############################################################
  164. # End stop
  165. ############################################################
  166. ############################################################
  167. # verify_urls
  168. # Verifys each of the URLs for this test. THis will sinply
  169. # curl the URL and check for it's return status.
  170. # For each url, a flag will be set on this object for whether
  171. # or not it passed
  172. ############################################################
  173. def verify_urls(self, out, err):
  174. # JSON
  175. try:
  176. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  177. out.flush()
  178. url = self.benchmarker.generate_url(self.json_url, self.port)
  179. output = self.__curl_url(url, self.JSON, out, err)
  180. if self.validateJson(output):
  181. self.json_url_passed = True
  182. else:
  183. self.json_url_passed = False
  184. except (AttributeError, subprocess.CalledProcessError) as e:
  185. self.json_url_passed = False
  186. # DB
  187. try:
  188. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  189. out.flush()
  190. url = self.benchmarker.generate_url(self.db_url, self.port)
  191. output = self.__curl_url(url, self.DB, out, err)
  192. if self.validateDb(output):
  193. self.db_url_passed = True
  194. else:
  195. self.db_url_passed = False
  196. except (AttributeError, subprocess.CalledProcessError) as e:
  197. self.db_url_passed = False
  198. # Query
  199. try:
  200. out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
  201. out.flush()
  202. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  203. output = self.__curl_url(url, self.QUERY, out, err)
  204. self.query_url_passed = True
  205. except (AttributeError, subprocess.CalledProcessError) as e:
  206. self.query_url_passed = False
  207. # Fortune
  208. try:
  209. out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
  210. out.flush()
  211. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  212. output = self.__curl_url(url, self.FORTUNE, out, err)
  213. self.fortune_url_passed = True
  214. except (AttributeError, subprocess.CalledProcessError) as e:
  215. self.fortune_url_passed = False
  216. # Update
  217. try:
  218. out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
  219. out.flush()
  220. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  221. output = self.__curl_url(url, self.UPDATE, out, err)
  222. self.update_url_passed = True
  223. except (AttributeError, subprocess.CalledProcessError) as e:
  224. self.update_url_passed = False
  225. # plaintext
  226. try:
  227. out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
  228. out.flush()
  229. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  230. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  231. self.plaintext_url_passed = True
  232. except (AttributeError, subprocess.CalledProcessError) as e:
  233. self.plaintext_url_passed = False
  234. ############################################################
  235. # End verify_urls
  236. ############################################################
  237. ############################################################
  238. # contains_type(type)
  239. # true if this test contains an implementation of the given
  240. # test type (json, db, etc.)
  241. ############################################################
  242. def contains_type(self, type):
  243. try:
  244. if type == self.JSON and self.json_url != None:
  245. return True
  246. if type == self.DB and self.db_url != None:
  247. return True
  248. if type == self.QUERY and self.query_url != None:
  249. return True
  250. if type == self.FORTUNE and self.fortune_url != None:
  251. return True
  252. if type == self.UPDATE and self.update_url != None:
  253. return True
  254. if type == self.PLAINTEXT and self.plaintext_url != None:
  255. return True
  256. except AttributeError:
  257. pass
  258. return False
  259. ############################################################
  260. # End stop
  261. ############################################################
  262. ############################################################
  263. # benchmark
  264. # Runs the benchmark for each type of test that it implements
  265. # JSON/DB/Query.
  266. ############################################################
  267. def benchmark(self, out, err):
  268. # JSON
  269. try:
  270. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  271. out.write("BENCHMARKING JSON ... ")
  272. out.flush()
  273. results = None
  274. if self.json_url_passed:
  275. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  276. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
  277. results = self.__parse_test(self.JSON)
  278. else:
  279. # This should report results with a mark of failed JSON.
  280. results = dict()
  281. results['results'] = []
  282. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], passed=self.json_url_passed)
  283. out.write( "Complete\n" )
  284. out.flush()
  285. except AttributeError:
  286. pass
  287. # DB
  288. try:
  289. if self.db_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.DB):
  290. out.write("BENCHMARKING DB ... ")
  291. out.flush()
  292. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  293. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
  294. results = self.__parse_test(self.DB)
  295. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], passed=self.db_url_passed)
  296. out.write( "Complete\n" )
  297. except AttributeError:
  298. traceback.print_exc()
  299. pass
  300. # Query
  301. try:
  302. if self.query_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY):
  303. out.write("BENCHMARKING Query ... ")
  304. out.flush()
  305. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  306. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
  307. results = self.__parse_test(self.QUERY)
  308. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], passed=self.query_url_passed)
  309. out.write( "Complete\n" )
  310. out.flush()
  311. except AttributeError:
  312. traceback.print_exc()
  313. pass
  314. # fortune
  315. try:
  316. if self.fortune_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE):
  317. out.write("BENCHMARKING Fortune ... ")
  318. out.flush()
  319. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  320. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
  321. results = self.__parse_test(self.FORTUNE)
  322. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], passed=self.fortune_url_passed)
  323. out.write( "Complete\n" )
  324. out.flush()
  325. except AttributeError:
  326. traceback.print_exc()
  327. pass
  328. # update
  329. try:
  330. if self.update_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE):
  331. out.write("BENCHMARKING Update ... ")
  332. out.flush()
  333. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  334. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
  335. results = self.__parse_test(self.UPDATE)
  336. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], passed=self.update_url_passed)
  337. out.write( "Complete\n" )
  338. out.flush()
  339. except AttributeError:
  340. # TODO - this needs to report some logging
  341. traceback.print_exc()
  342. pass
  343. # plaintext
  344. try:
  345. if self.plaintext_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT):
  346. out.write("BENCHMARKING Plaintext ... ")
  347. out.flush()
  348. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  349. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
  350. results = self.__parse_test(self.PLAINTEXT)
  351. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], passed=self.plaintext_url_passed)
  352. out.write( "Complete\n" )
  353. out.flush()
  354. except AttributeError:
  355. traceback.print_exc()
  356. pass
  357. ############################################################
  358. # End benchmark
  359. ############################################################
  360. ############################################################
  361. # parse_all
  362. # Method meant to be run for a given timestamp
  363. ############################################################
  364. def parse_all(self):
  365. # JSON
  366. if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
  367. results = self.__parse_test(self.JSON)
  368. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  369. # DB
  370. if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
  371. results = self.__parse_test(self.DB)
  372. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  373. # Query
  374. if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
  375. results = self.__parse_test(self.QUERY)
  376. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  377. # Fortune
  378. if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
  379. results = self.__parse_test(self.FORTUNE)
  380. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  381. # Update
  382. if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
  383. results = self.__parse_test(self.UPDATE)
  384. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  385. # Plaintext
  386. if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
  387. results = self.__parse_test(self.PLAINTEXT)
  388. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  389. ############################################################
  390. # End parse_all
  391. ############################################################
  392. ############################################################
  393. # __parse_test(test_type)
  394. ############################################################
  395. def __parse_test(self, test_type):
  396. try:
  397. results = dict()
  398. results['results'] = []
  399. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  400. is_warmup = True
  401. rawData = None
  402. for line in raw_data:
  403. if "Queries:" in line or "Concurrency:" in line:
  404. is_warmup = False
  405. rawData = None
  406. continue
  407. if "Warmup" in line or "Primer" in line:
  408. is_warmup = True
  409. continue
  410. if not is_warmup:
  411. if rawData == None:
  412. rawData = dict()
  413. results['results'].append(rawData)
  414. #if "Requests/sec:" in line:
  415. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  416. # rawData['reportedResults'] = m.group(1)
  417. # search for weighttp data such as succeeded and failed.
  418. if "Latency" in line:
  419. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  420. if len(m) == 4:
  421. rawData['latencyAvg'] = m[0]
  422. rawData['latencyStdev'] = m[1]
  423. rawData['latencyMax'] = m[2]
  424. # rawData['latencyStdevPercent'] = m[3]
  425. #if "Req/Sec" in line:
  426. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  427. # if len(m) == 4:
  428. # rawData['requestsAvg'] = m[0]
  429. # rawData['requestsStdev'] = m[1]
  430. # rawData['requestsMax'] = m[2]
  431. # rawData['requestsStdevPercent'] = m[3]
  432. #if "requests in" in line:
  433. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  434. # if m != None:
  435. # # parse out the raw time, which may be in minutes or seconds
  436. # raw_time = m.group(1)
  437. # if "ms" in raw_time:
  438. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  439. # elif "s" in raw_time:
  440. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  441. # elif "m" in raw_time:
  442. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  443. # elif "h" in raw_time:
  444. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  445. if "requests in" in line:
  446. m = re.search("([0-9]+) requests in", line)
  447. if m != None:
  448. rawData['totalRequests'] = int(m.group(1))
  449. if "Socket errors" in line:
  450. if "connect" in line:
  451. m = re.search("connect ([0-9]+)", line)
  452. rawData['connect'] = int(m.group(1))
  453. if "read" in line:
  454. m = re.search("read ([0-9]+)", line)
  455. rawData['read'] = int(m.group(1))
  456. if "write" in line:
  457. m = re.search("write ([0-9]+)", line)
  458. rawData['write'] = int(m.group(1))
  459. if "timeout" in line:
  460. m = re.search("timeout ([0-9]+)", line)
  461. rawData['timeout'] = int(m.group(1))
  462. if "Non-2xx" in line:
  463. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  464. if m != None:
  465. rawData['5xx'] = int(m.group(1))
  466. return results
  467. except IOError:
  468. return None
  469. ############################################################
  470. # End benchmark
  471. ############################################################
  472. ##########################################################################################
  473. # Private Methods
  474. ##########################################################################################
  475. ############################################################
  476. # __run_benchmark(script, output_file)
  477. # Runs a single benchmark using the script which is a bash
  478. # template that uses weighttp to run the test. All the results
  479. # outputed to the output_file.
  480. ############################################################
  481. def __run_benchmark(self, script, output_file, err):
  482. with open(output_file, 'w') as raw_file:
  483. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  484. p.communicate(script)
  485. err.flush()
  486. ############################################################
  487. # End __run_benchmark
  488. ############################################################
  489. ############################################################
  490. # __generate_concurrency_script(url, port)
  491. # Generates the string containing the bash script that will
  492. # be run on the client to benchmark a single test. This
  493. # specifically works for the variable concurrency tests (JSON
  494. # and DB)
  495. ############################################################
  496. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  497. if len(intervals) == 0:
  498. intervals = self.benchmarker.concurrency_levels
  499. headers = self.__get_request_headers(accept_header)
  500. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  501. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  502. interval=" ".join("{}".format(item) for item in intervals),
  503. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  504. pipeline=pipeline)
  505. ############################################################
  506. # End __generate_concurrency_script
  507. ############################################################
  508. ############################################################
  509. # __generate_query_script(url, port)
  510. # Generates the string containing the bash script that will
  511. # be run on the client to benchmark a single test. This
  512. # specifically works for the variable query tests (Query)
  513. ############################################################
  514. def __generate_query_script(self, url, port, accept_header):
  515. headers = self.__get_request_headers(accept_header)
  516. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  517. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  518. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  519. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  520. ############################################################
  521. # End __generate_query_script
  522. ############################################################
  523. ############################################################
  524. # __get_request_headers(accept_header)
  525. # Generates the complete HTTP header string
  526. ############################################################
  527. def __get_request_headers(self, accept_header):
  528. return self.headers_template.format(accept=accept_header)
  529. ############################################################
  530. # End __format_request_headers
  531. ############################################################
  532. ############################################################
  533. # __curl_url
  534. # Dump HTTP response and headers. Throw exception if there
  535. # is an HTTP error.
  536. ############################################################
  537. def __curl_url(self, url, testType, out, err):
  538. # Use -i to output response with headers.
  539. # Don't use -f so that the HTTP response code is ignored.
  540. # Use --stderr - to redirect stderr to stdout so we get
  541. # error output for sure in stdout.
  542. # Use -sS to hide progress bar, but show errors.
  543. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  544. out.flush()
  545. err.flush()
  546. # HTTP output may not end in a newline, so add that here.
  547. out.write( "\n" )
  548. out.flush()
  549. # We need to get the respond body from the curl and return it.
  550. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  551. output = p.communicate()
  552. # In the curl invocation above we could not use -f because
  553. # then the HTTP response would not be output, so use -f in
  554. # an additional invocation so that if there is an HTTP error,
  555. # subprocess.CalledProcessError will be thrown. Note that this
  556. # uses check_output() instead of check_call() so that we can
  557. # ignore the HTTP response because we already output that in
  558. # the first curl invocation.
  559. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  560. out.flush()
  561. err.flush()
  562. # HTTP output may not end in a newline, so add that here.
  563. out.write( "\n" )
  564. out.flush()
  565. if output:
  566. # We have the response body - return it
  567. return output[0]
  568. ##############################################################
  569. # End __curl_url
  570. ##############################################################
  571. ##########################################################################################
  572. # Constructor
  573. ##########################################################################################
  574. def __init__(self, name, directory, benchmarker, args):
  575. self.name = name
  576. self.directory = directory
  577. self.benchmarker = benchmarker
  578. self.__dict__.update(args)
  579. # ensure directory has __init__.py file so that we can use it as a Python package
  580. if not os.path.exists(os.path.join(directory, "__init__.py")):
  581. open(os.path.join(directory, "__init__.py"), 'w').close()
  582. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  583. ############################################################
  584. # End __init__
  585. ############################################################
  586. ############################################################
  587. # End FrameworkTest
  588. ############################################################
  589. ##########################################################################################
  590. # Static methods
  591. ##########################################################################################
  592. ##############################################################
  593. # parse_config(config, directory, benchmarker)
  594. # parses a config file and returns a list of FrameworkTest
  595. # objects based on that config file.
  596. ##############################################################
  597. def parse_config(config, directory, benchmarker):
  598. tests = []
  599. # The config object can specify multiple tests, we neep to loop
  600. # over them and parse them out
  601. for test in config['tests']:
  602. for key, value in test.iteritems():
  603. test_name = config['framework']
  604. # if the test uses the 'defualt' keywork, then we don't
  605. # append anything to it's name. All configs should only have 1 default
  606. if key != 'default':
  607. # we need to use the key in the test_name
  608. test_name = test_name + "-" + key
  609. tests.append(FrameworkTest(test_name, directory, benchmarker, value))
  610. return tests
  611. ##############################################################
  612. # End parse_config
  613. ##############################################################