framework_test.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. import importlib
  2. import os
  3. import subprocess
  4. import time
  5. import re
  6. import pprint
  7. import sys
  8. import traceback
  9. import json
  10. class FrameworkTest:
  11. ##########################################################################################
  12. # Class variables
  13. ##########################################################################################
  14. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  15. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  16. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  17. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  18. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. concurrency_template = """
  20. echo ""
  21. echo "---------------------------------------------------------"
  22. echo " Running Primer {name}"
  23. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  24. echo "---------------------------------------------------------"
  25. echo ""
  26. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  27. sleep 5
  28. echo ""
  29. echo "---------------------------------------------------------"
  30. echo " Running Warmup {name}"
  31. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  32. echo "---------------------------------------------------------"
  33. echo ""
  34. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  35. sleep 5
  36. for c in {interval}
  37. do
  38. echo ""
  39. echo "---------------------------------------------------------"
  40. echo " Concurrency: $c for {name}"
  41. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  42. echo "---------------------------------------------------------"
  43. echo ""
  44. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  45. sleep 2
  46. done
  47. """
  48. query_template = """
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Running Primer {name}"
  52. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  56. sleep 5
  57. echo ""
  58. echo "---------------------------------------------------------"
  59. echo " Running Warmup {name}"
  60. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  61. echo "---------------------------------------------------------"
  62. echo ""
  63. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  64. sleep 5
  65. for c in {interval}
  66. do
  67. echo ""
  68. echo "---------------------------------------------------------"
  69. echo " Queries: $c for {name}"
  70. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  71. echo "---------------------------------------------------------"
  72. echo ""
  73. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  74. sleep 2
  75. done
  76. """
  77. language = None
  78. platform = None
  79. webserver = None
  80. classification = None
  81. database = None
  82. approach = None
  83. orm = None
  84. framework = None
  85. os = None
  86. database_os = None
  87. display_name = None
  88. notes = None
  89. versus = None
  90. ############################################################
  91. # Test Variables
  92. ############################################################
  93. JSON = "json"
  94. DB = "db"
  95. QUERY = "query"
  96. FORTUNE = "fortune"
  97. UPDATE = "update"
  98. PLAINTEXT = "plaintext"
  99. ##########################################################################################
  100. # Public Methods
  101. ##########################################################################################
  102. ############################################################
  103. # Validates the jsonString is a JSON object with a 'message'
  104. # key with the value "hello, world!" (case-insensitive).
  105. ############################################################
  106. def validateJson(self, jsonString):
  107. obj = json.loads(jsonString)
  108. if not obj:
  109. return False
  110. if not obj["message"]:
  111. return False
  112. if not obj["message"].lower() == "hello, world!":
  113. return False
  114. return True
  115. ############################################################
  116. # Validates the jsonString is a JSON object that has an "id"
  117. # and a "randomNumber" key, and that both keys map to
  118. # integers.
  119. ############################################################
  120. def validateDb(self, jsonString):
  121. obj = json.loads(jsonString)
  122. if not obj:
  123. return False
  124. if not obj["id"] or type(obj["id"]) != int:
  125. return False
  126. if not obj["randomNumber"] or type(obj["randomNumber"]) != int:
  127. return False
  128. return True
  129. ############################################################
  130. # Validates the jsonString is an array with a length of
  131. # 2, that each entry in the array is a JSON object, that
  132. # each object has an "id" and a "randomNumber" key, and that
  133. # both keys map to integers.
  134. ############################################################
  135. def validateQuery(self, jsonString):
  136. arr = json.loads(jsonString)
  137. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  138. return False
  139. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  140. return False
  141. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  142. return False
  143. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  144. return False
  145. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  146. return False
  147. return True
  148. ############################################################
  149. #
  150. ############################################################
  151. def validateFortune(self, htmlString):
  152. pass
  153. ############################################################
  154. #
  155. ############################################################
  156. def validateUpdate(self, jsonString):
  157. pass
  158. ############################################################
  159. #
  160. ############################################################
  161. def validatePlaintext(self, jsonString):
  162. pass
  163. ############################################################
  164. # start(benchmarker)
  165. # Start the test using it's setup file
  166. ############################################################
  167. def start(self, out, err):
  168. return self.setup_module.start(self.benchmarker, out, err)
  169. ############################################################
  170. # End start
  171. ############################################################
  172. ############################################################
  173. # stop(benchmarker)
  174. # Stops the test using it's setup file
  175. ############################################################
  176. def stop(self, out, err):
  177. return self.setup_module.stop(out, err)
  178. ############################################################
  179. # End stop
  180. ############################################################
  181. ############################################################
  182. # verify_urls
  183. # Verifys each of the URLs for this test. THis will sinply
  184. # curl the URL and check for it's return status.
  185. # For each url, a flag will be set on this object for whether
  186. # or not it passed
  187. ############################################################
  188. def verify_urls(self, out, err):
  189. # JSON
  190. if self.runTests[self.JSON]
  191. try:
  192. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  193. out.flush()
  194. url = self.benchmarker.generate_url(self.json_url, self.port)
  195. output = self.__curl_url(url, self.JSON, out, err)
  196. if self.validateJson(output):
  197. self.json_url_passed = True
  198. else:
  199. self.json_url_passed = False
  200. except (AttributeError, subprocess.CalledProcessError) as e:
  201. self.json_url_passed = False
  202. # DB
  203. if self.runTests[self.DB]
  204. try:
  205. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  206. out.flush()
  207. url = self.benchmarker.generate_url(self.db_url, self.port)
  208. output = self.__curl_url(url, self.DB, out, err)
  209. if self.validateDb(output):
  210. self.db_url_passed = True
  211. else:
  212. self.db_url_passed = False
  213. except (AttributeError, subprocess.CalledProcessError) as e:
  214. self.db_url_passed = False
  215. # Query
  216. if self.runTests[self.QUERY]
  217. try:
  218. out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
  219. out.flush()
  220. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  221. output = self.__curl_url(url, self.QUERY, out, err)
  222. if self.validateQuery(output):
  223. self.query_url_passed = True
  224. else:
  225. self.query_url_passed = False
  226. except (AttributeError, subprocess.CalledProcessError) as e:
  227. self.query_url_passed = False
  228. # Fortune
  229. if self.runTests[self.FORTUNE]
  230. try:
  231. out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
  232. out.flush()
  233. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  234. output = self.__curl_url(url, self.FORTUNE, out, err)
  235. if self.validateFortune(output):
  236. self.fortune_url_passed = True
  237. else:
  238. self.fortune_url_passed = False
  239. except (AttributeError, subprocess.CalledProcessError) as e:
  240. self.fortune_url_passed = False
  241. # Update
  242. if self.runTests[self.UPDATE]
  243. try:
  244. out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
  245. out.flush()
  246. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  247. output = self.__curl_url(url, self.UPDATE, out, err)
  248. if self.validateUpdate(output):
  249. self.update_url_passed = True
  250. else:
  251. self.update_url_passed = False
  252. except (AttributeError, subprocess.CalledProcessError) as e:
  253. self.update_url_passed = False
  254. # plaintext
  255. if self.runTests[self.UPDATE]
  256. try:
  257. out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
  258. out.flush()
  259. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  260. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  261. if self.validatePlaintext(output):
  262. self.plaintext_url_passed = True
  263. else:
  264. self.plaintext_url_passed = False
  265. except (AttributeError, subprocess.CalledProcessError) as e:
  266. self.plaintext_url_passed = False
  267. ############################################################
  268. # End verify_urls
  269. ############################################################
  270. ############################################################
  271. # contains_type(type)
  272. # true if this test contains an implementation of the given
  273. # test type (json, db, etc.)
  274. ############################################################
  275. def contains_type(self, type):
  276. try:
  277. if type == self.JSON and self.json_url != None:
  278. return True
  279. if type == self.DB and self.db_url != None:
  280. return True
  281. if type == self.QUERY and self.query_url != None:
  282. return True
  283. if type == self.FORTUNE and self.fortune_url != None:
  284. return True
  285. if type == self.UPDATE and self.update_url != None:
  286. return True
  287. if type == self.PLAINTEXT and self.plaintext_url != None:
  288. return True
  289. except AttributeError:
  290. pass
  291. return False
  292. ############################################################
  293. # End stop
  294. ############################################################
  295. ############################################################
  296. # benchmark
  297. # Runs the benchmark for each type of test that it implements
  298. # JSON/DB/Query.
  299. ############################################################
  300. def benchmark(self, out, err):
  301. # JSON
  302. if self.runTests[self.JSON]:
  303. try:
  304. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  305. out.write("BENCHMARKING JSON ... ")
  306. out.flush()
  307. results = None
  308. if self.json_url_passed:
  309. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  310. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
  311. results = self.__parse_test(self.JSON)
  312. else:
  313. results = dict()
  314. results['results'] = []
  315. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], passed=self.json_url_passed)
  316. out.write( "Complete\n" )
  317. out.flush()
  318. except AttributeError:
  319. pass
  320. # DB
  321. if self.runTests[self.DB]:
  322. try:
  323. if self.benchmarker.type == "all" or self.benchmarker.type == self.DB:
  324. out.write("BENCHMARKING DB ... ")
  325. out.flush()
  326. results = None
  327. if self.db_url_passed:
  328. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  329. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
  330. results = self.__parse_test(self.DB)
  331. else:
  332. results = dict()
  333. results['results'] = []
  334. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], passed=self.db_url_passed)
  335. out.write( "Complete\n" )
  336. except AttributeError:
  337. pass
  338. # Query
  339. if self.runTests[self.QUERY]:
  340. try:
  341. if self.query_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY):
  342. out.write("BENCHMARKING Query ... ")
  343. out.flush()
  344. results = None
  345. if self.db_url_passed:
  346. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  347. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
  348. results = self.__parse_test(self.QUERY)
  349. else:
  350. results = dict()
  351. results['results'] = []
  352. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], passed=self.query_url_passed)
  353. out.write( "Complete\n" )
  354. out.flush()
  355. except AttributeError:
  356. pass
  357. # fortune
  358. if self.runTests[self.FORTUNE]:
  359. try:
  360. if self.fortune_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE):
  361. out.write("BENCHMARKING Fortune ... ")
  362. out.flush()
  363. results = None
  364. if self.db_url_passed:
  365. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  366. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
  367. results = self.__parse_test(self.FORTUNE)
  368. else:
  369. results = dict()
  370. results['results'] = []
  371. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], passed=self.fortune_url_passed)
  372. out.write( "Complete\n" )
  373. out.flush()
  374. except AttributeError:
  375. pass
  376. # update
  377. if self.runTests[self.UPDATE]:
  378. try:
  379. if self.update_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE):
  380. out.write("BENCHMARKING Update ... ")
  381. out.flush()
  382. results = None
  383. if self.db_url_passed:
  384. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  385. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
  386. results = self.__parse_test(self.UPDATE)
  387. else:
  388. results = dict()
  389. results['results'] = []
  390. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], passed=self.update_url_passed)
  391. out.write( "Complete\n" )
  392. out.flush()
  393. except AttributeError:
  394. pass
  395. # plaintext
  396. if self.runTests[self.PLAINTEXT]:
  397. try:
  398. if self.plaintext_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT):
  399. out.write("BENCHMARKING Plaintext ... ")
  400. out.flush()
  401. results = None
  402. if self.db_url_passed:
  403. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  404. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
  405. results = self.__parse_test(self.PLAINTEXT)
  406. else:
  407. results = dict()
  408. results['results'] = []
  409. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], passed=self.plaintext_url_passed)
  410. out.write( "Complete\n" )
  411. out.flush()
  412. except AttributeError:
  413. traceback.print_exc()
  414. pass
  415. ############################################################
  416. # End benchmark
  417. ############################################################
  418. ############################################################
  419. # parse_all
  420. # Method meant to be run for a given timestamp
  421. ############################################################
  422. def parse_all(self):
  423. # JSON
  424. if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
  425. results = self.__parse_test(self.JSON)
  426. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  427. # DB
  428. if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
  429. results = self.__parse_test(self.DB)
  430. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  431. # Query
  432. if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
  433. results = self.__parse_test(self.QUERY)
  434. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  435. # Fortune
  436. if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
  437. results = self.__parse_test(self.FORTUNE)
  438. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  439. # Update
  440. if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
  441. results = self.__parse_test(self.UPDATE)
  442. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  443. # Plaintext
  444. if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
  445. results = self.__parse_test(self.PLAINTEXT)
  446. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  447. ############################################################
  448. # End parse_all
  449. ############################################################
  450. ############################################################
  451. # __parse_test(test_type)
  452. ############################################################
  453. def __parse_test(self, test_type):
  454. try:
  455. results = dict()
  456. results['results'] = []
  457. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  458. is_warmup = True
  459. rawData = None
  460. for line in raw_data:
  461. if "Queries:" in line or "Concurrency:" in line:
  462. is_warmup = False
  463. rawData = None
  464. continue
  465. if "Warmup" in line or "Primer" in line:
  466. is_warmup = True
  467. continue
  468. if not is_warmup:
  469. if rawData == None:
  470. rawData = dict()
  471. results['results'].append(rawData)
  472. #if "Requests/sec:" in line:
  473. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  474. # rawData['reportedResults'] = m.group(1)
  475. # search for weighttp data such as succeeded and failed.
  476. if "Latency" in line:
  477. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  478. if len(m) == 4:
  479. rawData['latencyAvg'] = m[0]
  480. rawData['latencyStdev'] = m[1]
  481. rawData['latencyMax'] = m[2]
  482. # rawData['latencyStdevPercent'] = m[3]
  483. #if "Req/Sec" in line:
  484. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  485. # if len(m) == 4:
  486. # rawData['requestsAvg'] = m[0]
  487. # rawData['requestsStdev'] = m[1]
  488. # rawData['requestsMax'] = m[2]
  489. # rawData['requestsStdevPercent'] = m[3]
  490. #if "requests in" in line:
  491. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  492. # if m != None:
  493. # # parse out the raw time, which may be in minutes or seconds
  494. # raw_time = m.group(1)
  495. # if "ms" in raw_time:
  496. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  497. # elif "s" in raw_time:
  498. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  499. # elif "m" in raw_time:
  500. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  501. # elif "h" in raw_time:
  502. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  503. if "requests in" in line:
  504. m = re.search("([0-9]+) requests in", line)
  505. if m != None:
  506. rawData['totalRequests'] = int(m.group(1))
  507. if "Socket errors" in line:
  508. if "connect" in line:
  509. m = re.search("connect ([0-9]+)", line)
  510. rawData['connect'] = int(m.group(1))
  511. if "read" in line:
  512. m = re.search("read ([0-9]+)", line)
  513. rawData['read'] = int(m.group(1))
  514. if "write" in line:
  515. m = re.search("write ([0-9]+)", line)
  516. rawData['write'] = int(m.group(1))
  517. if "timeout" in line:
  518. m = re.search("timeout ([0-9]+)", line)
  519. rawData['timeout'] = int(m.group(1))
  520. if "Non-2xx" in line:
  521. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  522. if m != None:
  523. rawData['5xx'] = int(m.group(1))
  524. return results
  525. except IOError:
  526. return None
  527. ############################################################
  528. # End benchmark
  529. ############################################################
  530. ##########################################################################################
  531. # Private Methods
  532. ##########################################################################################
  533. ############################################################
  534. # __run_benchmark(script, output_file)
  535. # Runs a single benchmark using the script which is a bash
  536. # template that uses weighttp to run the test. All the results
  537. # outputed to the output_file.
  538. ############################################################
  539. def __run_benchmark(self, script, output_file, err):
  540. with open(output_file, 'w') as raw_file:
  541. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  542. p.communicate(script)
  543. err.flush()
  544. ############################################################
  545. # End __run_benchmark
  546. ############################################################
  547. ############################################################
  548. # __generate_concurrency_script(url, port)
  549. # Generates the string containing the bash script that will
  550. # be run on the client to benchmark a single test. This
  551. # specifically works for the variable concurrency tests (JSON
  552. # and DB)
  553. ############################################################
  554. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  555. if len(intervals) == 0:
  556. intervals = self.benchmarker.concurrency_levels
  557. headers = self.__get_request_headers(accept_header)
  558. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  559. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  560. interval=" ".join("{}".format(item) for item in intervals),
  561. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  562. pipeline=pipeline)
  563. ############################################################
  564. # End __generate_concurrency_script
  565. ############################################################
  566. ############################################################
  567. # __generate_query_script(url, port)
  568. # Generates the string containing the bash script that will
  569. # be run on the client to benchmark a single test. This
  570. # specifically works for the variable query tests (Query)
  571. ############################################################
  572. def __generate_query_script(self, url, port, accept_header):
  573. headers = self.__get_request_headers(accept_header)
  574. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  575. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  576. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  577. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  578. ############################################################
  579. # End __generate_query_script
  580. ############################################################
  581. ############################################################
  582. # __get_request_headers(accept_header)
  583. # Generates the complete HTTP header string
  584. ############################################################
  585. def __get_request_headers(self, accept_header):
  586. return self.headers_template.format(accept=accept_header)
  587. ############################################################
  588. # End __format_request_headers
  589. ############################################################
  590. ############################################################
  591. # __curl_url
  592. # Dump HTTP response and headers. Throw exception if there
  593. # is an HTTP error.
  594. ############################################################
  595. def __curl_url(self, url, testType, out, err):
  596. # Use -i to output response with headers.
  597. # Don't use -f so that the HTTP response code is ignored.
  598. # Use --stderr - to redirect stderr to stdout so we get
  599. # error output for sure in stdout.
  600. # Use -sS to hide progress bar, but show errors.
  601. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  602. out.flush()
  603. err.flush()
  604. # HTTP output may not end in a newline, so add that here.
  605. out.write( "\n" )
  606. out.flush()
  607. # We need to get the respond body from the curl and return it.
  608. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  609. output = p.communicate()
  610. # In the curl invocation above we could not use -f because
  611. # then the HTTP response would not be output, so use -f in
  612. # an additional invocation so that if there is an HTTP error,
  613. # subprocess.CalledProcessError will be thrown. Note that this
  614. # uses check_output() instead of check_call() so that we can
  615. # ignore the HTTP response because we already output that in
  616. # the first curl invocation.
  617. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  618. out.flush()
  619. err.flush()
  620. # HTTP output may not end in a newline, so add that here.
  621. out.write( "\n" )
  622. out.flush()
  623. if output:
  624. # We have the response body - return it
  625. return output[0]
  626. ##############################################################
  627. # End __curl_url
  628. ##############################################################
  629. ##########################################################################################
  630. # Constructor
  631. ##########################################################################################
  632. def __init__(self, name, directory, benchmarker, runTests, args):
  633. self.name = name
  634. self.directory = directory
  635. self.benchmarker = benchmarker
  636. self.runTests = runTests
  637. self.__dict__.update(args)
  638. # ensure directory has __init__.py file so that we can use it as a Python package
  639. if not os.path.exists(os.path.join(directory, "__init__.py")):
  640. open(os.path.join(directory, "__init__.py"), 'w').close()
  641. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  642. ############################################################
  643. # End __init__
  644. ############################################################
  645. ############################################################
  646. # End FrameworkTest
  647. ############################################################
  648. ##########################################################################################
  649. # Static methods
  650. ##########################################################################################
  651. ##############################################################
  652. # parse_config(config, directory, benchmarker)
  653. # parses a config file and returns a list of FrameworkTest
  654. # objects based on that config file.
  655. ##############################################################
  656. def parse_config(config, directory, benchmarker):
  657. tests = []
  658. # The config object can specify multiple tests, we neep to loop
  659. # over them and parse them out
  660. for test in config['tests']:
  661. for key, value in test.iteritems():
  662. test_name = config['framework']
  663. runTests = dict()
  664. runTests["json"] = True if test["json_url"] else False
  665. runTests["db"] = True if test["db_url"] else False
  666. runTests["query"] = True if test["query_url"] else False
  667. runTests["fortune"] = True if test["fortune_url"] else False
  668. runTests["update"] = True if test["update_url"] else False
  669. runTests["plaintext"] = True if test["plaintext_url"] else False
  670. # if the test uses the 'defualt' keywork, then we don't
  671. # append anything to it's name. All configs should only have 1 default
  672. if key != 'default':
  673. # we need to use the key in the test_name
  674. test_name = test_name + "-" + key
  675. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  676. return tests
  677. ##############################################################
  678. # End parse_config
  679. ##############################################################