framework_test.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. import importlib
  2. import os
  3. import subprocess
  4. import time
  5. import re
  6. import pprint
  7. import sys
  8. import traceback
  9. import json
  10. class FrameworkTest:
  11. ##########################################################################################
  12. # Class variables
  13. ##########################################################################################
  14. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  15. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  16. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  17. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  18. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. concurrency_template = """
  20. echo ""
  21. echo "---------------------------------------------------------"
  22. echo " Running Primer {name}"
  23. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  24. echo "---------------------------------------------------------"
  25. echo ""
  26. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  27. sleep 5
  28. echo ""
  29. echo "---------------------------------------------------------"
  30. echo " Running Warmup {name}"
  31. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  32. echo "---------------------------------------------------------"
  33. echo ""
  34. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  35. sleep 5
  36. for c in {interval}
  37. do
  38. echo ""
  39. echo "---------------------------------------------------------"
  40. echo " Concurrency: $c for {name}"
  41. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  42. echo "---------------------------------------------------------"
  43. echo ""
  44. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  45. sleep 2
  46. done
  47. """
  48. query_template = """
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Running Primer {name}"
  52. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  56. sleep 5
  57. echo ""
  58. echo "---------------------------------------------------------"
  59. echo " Running Warmup {name}"
  60. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  61. echo "---------------------------------------------------------"
  62. echo ""
  63. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  64. sleep 5
  65. for c in {interval}
  66. do
  67. echo ""
  68. echo "---------------------------------------------------------"
  69. echo " Queries: $c for {name}"
  70. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  71. echo "---------------------------------------------------------"
  72. echo ""
  73. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  74. sleep 2
  75. done
  76. """
  77. language = None
  78. platform = None
  79. webserver = None
  80. classification = None
  81. database = None
  82. approach = None
  83. orm = None
  84. framework = None
  85. os = None
  86. database_os = None
  87. display_name = None
  88. notes = None
  89. versus = None
  90. ############################################################
  91. # Test Variables
  92. ############################################################
  93. JSON = "json"
  94. DB = "db"
  95. QUERY = "query"
  96. FORTUNE = "fortune"
  97. UPDATE = "update"
  98. PLAINTEXT = "plaintext"
  99. ##########################################################################################
  100. # Public Methods
  101. ##########################################################################################
  102. def validateJson(self, jsonString):
  103. obj = json.loads(jsonString)
  104. if not obj:
  105. return False
  106. if not obj.message:
  107. return False
  108. if not obj.message.lower() == "hello, world!":
  109. return False
  110. return True
  111. ############################################################
  112. # start(benchmarker)
  113. # Start the test using it's setup file
  114. ############################################################
  115. def start(self, out, err):
  116. return self.setup_module.start(self.benchmarker, out, err)
  117. ############################################################
  118. # End start
  119. ############################################################
  120. ############################################################
  121. # stop(benchmarker)
  122. # Stops the test using it's setup file
  123. ############################################################
  124. def stop(self, out, err):
  125. return self.setup_module.stop(out, err)
  126. ############################################################
  127. # End stop
  128. ############################################################
  129. ############################################################
  130. # verify_urls
  131. # Verifys each of the URLs for this test. THis will sinply
  132. # curl the URL and check for it's return status.
  133. # For each url, a flag will be set on this object for whether
  134. # or not it passed
  135. ############################################################
  136. def verify_urls(self, out, err):
  137. # JSON
  138. try:
  139. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  140. out.flush()
  141. url = self.benchmarker.generate_url(self.json_url, self.port)
  142. output = self.__curl_url(url, self.JSON, out, err)
  143. if self.validateJson(output):
  144. self.json_url_passed = True
  145. else:
  146. self.json_url_passed = False
  147. except (AttributeError, subprocess.CalledProcessError) as e:
  148. self.json_url_passed = False
  149. # DB
  150. try:
  151. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  152. out.flush()
  153. url = self.benchmarker.generate_url(self.db_url, self.port)
  154. output = self.__curl_url(url, self.DB, out, err)
  155. self.db_url_passed = True
  156. except (AttributeError, subprocess.CalledProcessError) as e:
  157. self.db_url_passed = False
  158. # Query
  159. try:
  160. out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
  161. out.flush()
  162. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  163. output = self.__curl_url(url, self.QUERY, out, err)
  164. self.query_url_passed = True
  165. except (AttributeError, subprocess.CalledProcessError) as e:
  166. self.query_url_passed = False
  167. # Fortune
  168. try:
  169. out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
  170. out.flush()
  171. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  172. output = self.__curl_url(url, self.FORTUNE, out, err)
  173. self.fortune_url_passed = True
  174. except (AttributeError, subprocess.CalledProcessError) as e:
  175. self.fortune_url_passed = False
  176. # Update
  177. try:
  178. out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
  179. out.flush()
  180. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  181. output = self.__curl_url(url, self.UPDATE, out, err)
  182. self.update_url_passed = True
  183. except (AttributeError, subprocess.CalledProcessError) as e:
  184. self.update_url_passed = False
  185. # plaintext
  186. try:
  187. out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
  188. out.flush()
  189. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  190. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  191. self.plaintext_url_passed = True
  192. except (AttributeError, subprocess.CalledProcessError) as e:
  193. self.plaintext_url_passed = False
  194. ############################################################
  195. # End verify_urls
  196. ############################################################
  197. ############################################################
  198. # contains_type(type)
  199. # true if this test contains an implementation of the given
  200. # test type (json, db, etc.)
  201. ############################################################
  202. def contains_type(self, type):
  203. try:
  204. if type == self.JSON and self.json_url != None:
  205. return True
  206. if type == self.DB and self.db_url != None:
  207. return True
  208. if type == self.QUERY and self.query_url != None:
  209. return True
  210. if type == self.FORTUNE and self.fortune_url != None:
  211. return True
  212. if type == self.UPDATE and self.update_url != None:
  213. return True
  214. if type == self.PLAINTEXT and self.plaintext_url != None:
  215. return True
  216. except AttributeError:
  217. pass
  218. return False
  219. ############################################################
  220. # End stop
  221. ############################################################
  222. ############################################################
  223. # benchmark
  224. # Runs the benchmark for each type of test that it implements
  225. # JSON/DB/Query.
  226. ############################################################
  227. def benchmark(self, out, err):
  228. # JSON
  229. try:
  230. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  231. out.write("BENCHMARKING JSON ... ")
  232. out.flush()
  233. results = None
  234. if self.json_url_passed:
  235. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  236. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
  237. results = self.__parse_test(self.JSON)
  238. else:
  239. # This should report results with a mark of failed JSON.
  240. results = dict()
  241. results['results'] = []
  242. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], self.json_url_passed)
  243. out.write( "Complete\n" )
  244. out.flush()
  245. except AttributeError:
  246. pass
  247. # DB
  248. try:
  249. if self.db_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.DB):
  250. out.write("BENCHMARKING DB ... ")
  251. out.flush()
  252. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  253. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
  254. results = self.__parse_test(self.DB)
  255. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], self.db_url_passed)
  256. out.write( "Complete\n" )
  257. except AttributeError:
  258. traceback.print_exc()
  259. pass
  260. # Query
  261. try:
  262. if self.query_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY):
  263. out.write("BENCHMARKING Query ... ")
  264. out.flush()
  265. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  266. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
  267. results = self.__parse_test(self.QUERY)
  268. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], self.query_url_passed)
  269. out.write( "Complete\n" )
  270. out.flush()
  271. except AttributeError:
  272. traceback.print_exc()
  273. pass
  274. # fortune
  275. try:
  276. if self.fortune_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE):
  277. out.write("BENCHMARKING Fortune ... ")
  278. out.flush()
  279. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  280. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
  281. results = self.__parse_test(self.FORTUNE)
  282. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], self.fortune_url_passed)
  283. out.write( "Complete\n" )
  284. out.flush()
  285. except AttributeError:
  286. traceback.print_exc()
  287. pass
  288. # update
  289. try:
  290. if self.update_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE):
  291. out.write("BENCHMARKING Update ... ")
  292. out.flush()
  293. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  294. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
  295. results = self.__parse_test(self.UPDATE)
  296. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], self.update_url_passed)
  297. out.write( "Complete\n" )
  298. out.flush()
  299. except AttributeError:
  300. # TODO - this needs to report some logging
  301. traceback.print_exc()
  302. pass
  303. # plaintext
  304. try:
  305. if self.plaintext_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT):
  306. out.write("BENCHMARKING Plaintext ... ")
  307. out.flush()
  308. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  309. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
  310. results = self.__parse_test(self.PLAINTEXT)
  311. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], self.plaintext_url_passed)
  312. out.write( "Complete\n" )
  313. out.flush()
  314. except AttributeError:
  315. traceback.print_exc()
  316. pass
  317. ############################################################
  318. # End benchmark
  319. ############################################################
  320. ############################################################
  321. # parse_all
  322. # Method meant to be run for a given timestamp
  323. ############################################################
  324. def parse_all(self):
  325. # JSON
  326. if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
  327. results = self.__parse_test(self.JSON)
  328. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  329. # DB
  330. if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
  331. results = self.__parse_test(self.DB)
  332. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  333. # Query
  334. if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
  335. results = self.__parse_test(self.QUERY)
  336. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  337. # Fortune
  338. if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
  339. results = self.__parse_test(self.FORTUNE)
  340. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  341. # Update
  342. if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
  343. results = self.__parse_test(self.UPDATE)
  344. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  345. # Plaintext
  346. if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
  347. results = self.__parse_test(self.PLAINTEXT)
  348. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  349. ############################################################
  350. # End parse_all
  351. ############################################################
  352. ############################################################
  353. # __parse_test(test_type)
  354. ############################################################
  355. def __parse_test(self, test_type):
  356. try:
  357. results = dict()
  358. results['results'] = []
  359. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  360. is_warmup = True
  361. rawData = None
  362. for line in raw_data:
  363. if "Queries:" in line or "Concurrency:" in line:
  364. is_warmup = False
  365. rawData = None
  366. continue
  367. if "Warmup" in line or "Primer" in line:
  368. is_warmup = True
  369. continue
  370. if not is_warmup:
  371. if rawData == None:
  372. rawData = dict()
  373. results['results'].append(rawData)
  374. #if "Requests/sec:" in line:
  375. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  376. # rawData['reportedResults'] = m.group(1)
  377. # search for weighttp data such as succeeded and failed.
  378. if "Latency" in line:
  379. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  380. if len(m) == 4:
  381. rawData['latencyAvg'] = m[0]
  382. rawData['latencyStdev'] = m[1]
  383. rawData['latencyMax'] = m[2]
  384. # rawData['latencyStdevPercent'] = m[3]
  385. #if "Req/Sec" in line:
  386. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  387. # if len(m) == 4:
  388. # rawData['requestsAvg'] = m[0]
  389. # rawData['requestsStdev'] = m[1]
  390. # rawData['requestsMax'] = m[2]
  391. # rawData['requestsStdevPercent'] = m[3]
  392. #if "requests in" in line:
  393. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  394. # if m != None:
  395. # # parse out the raw time, which may be in minutes or seconds
  396. # raw_time = m.group(1)
  397. # if "ms" in raw_time:
  398. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  399. # elif "s" in raw_time:
  400. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  401. # elif "m" in raw_time:
  402. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  403. # elif "h" in raw_time:
  404. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  405. if "requests in" in line:
  406. m = re.search("([0-9]+) requests in", line)
  407. if m != None:
  408. rawData['totalRequests'] = int(m.group(1))
  409. if "Socket errors" in line:
  410. if "connect" in line:
  411. m = re.search("connect ([0-9]+)", line)
  412. rawData['connect'] = int(m.group(1))
  413. if "read" in line:
  414. m = re.search("read ([0-9]+)", line)
  415. rawData['read'] = int(m.group(1))
  416. if "write" in line:
  417. m = re.search("write ([0-9]+)", line)
  418. rawData['write'] = int(m.group(1))
  419. if "timeout" in line:
  420. m = re.search("timeout ([0-9]+)", line)
  421. rawData['timeout'] = int(m.group(1))
  422. if "Non-2xx" in line:
  423. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  424. if m != None:
  425. rawData['5xx'] = int(m.group(1))
  426. return results
  427. except IOError:
  428. return None
  429. ############################################################
  430. # End benchmark
  431. ############################################################
  432. ##########################################################################################
  433. # Private Methods
  434. ##########################################################################################
  435. ############################################################
  436. # __run_benchmark(script, output_file)
  437. # Runs a single benchmark using the script which is a bash
  438. # template that uses weighttp to run the test. All the results
  439. # outputed to the output_file.
  440. ############################################################
  441. def __run_benchmark(self, script, output_file, err):
  442. with open(output_file, 'w') as raw_file:
  443. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  444. p.communicate(script)
  445. err.flush()
  446. ############################################################
  447. # End __run_benchmark
  448. ############################################################
  449. ############################################################
  450. # __generate_concurrency_script(url, port)
  451. # Generates the string containing the bash script that will
  452. # be run on the client to benchmark a single test. This
  453. # specifically works for the variable concurrency tests (JSON
  454. # and DB)
  455. ############################################################
  456. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  457. if len(intervals) == 0:
  458. intervals = self.benchmarker.concurrency_levels
  459. headers = self.__get_request_headers(accept_header)
  460. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  461. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  462. interval=" ".join("{}".format(item) for item in intervals),
  463. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  464. pipeline=pipeline)
  465. ############################################################
  466. # End __generate_concurrency_script
  467. ############################################################
  468. ############################################################
  469. # __generate_query_script(url, port)
  470. # Generates the string containing the bash script that will
  471. # be run on the client to benchmark a single test. This
  472. # specifically works for the variable query tests (Query)
  473. ############################################################
  474. def __generate_query_script(self, url, port, accept_header):
  475. headers = self.__get_request_headers(accept_header)
  476. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  477. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  478. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  479. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  480. ############################################################
  481. # End __generate_query_script
  482. ############################################################
  483. ############################################################
  484. # __get_request_headers(accept_header)
  485. # Generates the complete HTTP header string
  486. ############################################################
  487. def __get_request_headers(self, accept_header):
  488. return self.headers_template.format(accept=accept_header)
  489. ############################################################
  490. # End __format_request_headers
  491. ############################################################
  492. ############################################################
  493. # __curl_url
  494. # Dump HTTP response and headers. Throw exception if there
  495. # is an HTTP error.
  496. ############################################################
  497. def __curl_url(self, url, testType, out, err):
  498. # Use -i to output response with headers.
  499. # Don't use -f so that the HTTP response code is ignored.
  500. # Use --stderr - to redirect stderr to stdout so we get
  501. # error output for sure in stdout.
  502. # Use -sS to hide progress bar, but show errors.
  503. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  504. out.flush()
  505. err.flush()
  506. # HTTP output may not end in a newline, so add that here.
  507. out.write( "\n" )
  508. out.flush()
  509. # We need to get the respond body from the curl and return it.
  510. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  511. output = p.communicate()
  512. # In the curl invocation above we could not use -f because
  513. # then the HTTP response would not be output, so use -f in
  514. # an additional invocation so that if there is an HTTP error,
  515. # subprocess.CalledProcessError will be thrown. Note that this
  516. # uses check_output() instead of check_call() so that we can
  517. # ignore the HTTP response because we already output that in
  518. # the first curl invocation.
  519. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  520. out.flush()
  521. err.flush()
  522. # HTTP output may not end in a newline, so add that here.
  523. out.write( "\n" )
  524. out.flush()
  525. if output:
  526. # We have the response body - return it
  527. return output[0]
  528. ##############################################################
  529. # End __curl_url
  530. ##############################################################
  531. ##########################################################################################
  532. # Constructor
  533. ##########################################################################################
  534. def __init__(self, name, directory, benchmarker, args):
  535. self.name = name
  536. self.directory = directory
  537. self.benchmarker = benchmarker
  538. self.__dict__.update(args)
  539. # ensure directory has __init__.py file so that we can use it as a Python package
  540. if not os.path.exists(os.path.join(directory, "__init__.py")):
  541. open(os.path.join(directory, "__init__.py"), 'w').close()
  542. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  543. ############################################################
  544. # End __init__
  545. ############################################################
  546. ############################################################
  547. # End FrameworkTest
  548. ############################################################
  549. ##########################################################################################
  550. # Static methods
  551. ##########################################################################################
  552. ##############################################################
  553. # parse_config(config, directory, benchmarker)
  554. # parses a config file and returns a list of FrameworkTest
  555. # objects based on that config file.
  556. ##############################################################
  557. def parse_config(config, directory, benchmarker):
  558. tests = []
  559. # The config object can specify multiple tests, we neep to loop
  560. # over them and parse them out
  561. for test in config['tests']:
  562. for key, value in test.iteritems():
  563. test_name = config['framework']
  564. # if the test uses the 'defualt' keywork, then we don't
  565. # append anything to it's name. All configs should only have 1 default
  566. if key != 'default':
  567. # we need to use the key in the test_name
  568. test_name = test_name + "-" + key
  569. tests.append(FrameworkTest(test_name, directory, benchmarker, value))
  570. return tests
  571. ##############################################################
  572. # End parse_config
  573. ##############################################################