automated_benchmarking.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. # ################################################################
  2. # Copyright (c) Facebook, Inc.
  3. # All rights reserved.
  4. #
  5. # This source code is licensed under both the BSD-style license (found in the
  6. # LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. # in the COPYING file in the root directory of this source tree).
  8. # You may select, at your option, one of the above-listed licenses.
  9. # ##########################################################################
  10. import argparse
  11. import glob
  12. import json
  13. import os
  14. import time
  15. import pickle as pk
  16. import subprocess
  17. import urllib.request
  18. GITHUB_API_PR_URL = "https://api.github.com/repos/facebook/zstd/pulls?state=open"
  19. GITHUB_URL_TEMPLATE = "https://github.com/{}/zstd"
  20. RELEASE_BUILD = {"user": "facebook", "branch": "dev", "hash": None}
  21. # check to see if there are any new PRs every minute
  22. DEFAULT_MAX_API_CALL_FREQUENCY_SEC = 60
  23. PREVIOUS_PRS_FILENAME = "prev_prs.pk"
  24. # Not sure what the threshold for triggering alarms should be
  25. # 1% regression sounds like a little too sensitive but the desktop
  26. # that I'm running it on is pretty stable so I think this is fine
  27. CSPEED_REGRESSION_TOLERANCE = 0.01
  28. DSPEED_REGRESSION_TOLERANCE = 0.01
  29. def get_new_open_pr_builds(prev_state=True):
  30. prev_prs = None
  31. if os.path.exists(PREVIOUS_PRS_FILENAME):
  32. with open(PREVIOUS_PRS_FILENAME, "rb") as f:
  33. prev_prs = pk.load(f)
  34. data = json.loads(urllib.request.urlopen(GITHUB_API_PR_URL).read().decode("utf-8"))
  35. prs = {
  36. d["url"]: {
  37. "user": d["user"]["login"],
  38. "branch": d["head"]["ref"],
  39. "hash": d["head"]["sha"].strip(),
  40. }
  41. for d in data
  42. }
  43. with open(PREVIOUS_PRS_FILENAME, "wb") as f:
  44. pk.dump(prs, f)
  45. if not prev_state or prev_prs == None:
  46. return list(prs.values())
  47. return [pr for url, pr in prs.items() if url not in prev_prs or prev_prs[url] != pr]
  48. def get_latest_hashes():
  49. tmp = subprocess.run(["git", "log", "-1"], stdout=subprocess.PIPE).stdout.decode(
  50. "utf-8"
  51. )
  52. sha1 = tmp.split("\n")[0].split(" ")[1]
  53. tmp = subprocess.run(
  54. ["git", "show", "{}^1".format(sha1)], stdout=subprocess.PIPE
  55. ).stdout.decode("utf-8")
  56. sha2 = tmp.split("\n")[0].split(" ")[1]
  57. tmp = subprocess.run(
  58. ["git", "show", "{}^2".format(sha1)], stdout=subprocess.PIPE
  59. ).stdout.decode("utf-8")
  60. sha3 = "" if len(tmp) == 0 else tmp.split("\n")[0].split(" ")[1]
  61. return [sha1.strip(), sha2.strip(), sha3.strip()]
  62. def get_builds_for_latest_hash():
  63. hashes = get_latest_hashes()
  64. for b in get_new_open_pr_builds(False):
  65. if b["hash"] in hashes:
  66. return [b]
  67. return []
  68. def clone_and_build(build):
  69. if build["user"] != None:
  70. github_url = GITHUB_URL_TEMPLATE.format(build["user"])
  71. os.system(
  72. """
  73. rm -rf zstd-{user}-{sha} &&
  74. git clone {github_url} zstd-{user}-{sha} &&
  75. cd zstd-{user}-{sha} &&
  76. {checkout_command}
  77. make -j &&
  78. cd ../
  79. """.format(
  80. user=build["user"],
  81. github_url=github_url,
  82. sha=build["hash"],
  83. checkout_command="git checkout {} &&".format(build["hash"])
  84. if build["hash"] != None
  85. else "",
  86. )
  87. )
  88. return "zstd-{user}-{sha}/zstd".format(user=build["user"], sha=build["hash"])
  89. else:
  90. os.system("cd ../ && make -j && cd tests")
  91. return "../zstd"
  92. def parse_benchmark_output(output):
  93. idx = [i for i, d in enumerate(output) if d == "MB/s"]
  94. return [float(output[idx[0] - 1]), float(output[idx[1] - 1])]
  95. def benchmark_single(executable, level, filename):
  96. return parse_benchmark_output((
  97. subprocess.run(
  98. [executable, "-qb{}".format(level), filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
  99. )
  100. .stdout.decode("utf-8")
  101. .split(" ")
  102. ))
  103. def benchmark_n(executable, level, filename, n):
  104. speeds_arr = [benchmark_single(executable, level, filename) for _ in range(n)]
  105. cspeed, dspeed = max(b[0] for b in speeds_arr), max(b[1] for b in speeds_arr)
  106. print(
  107. "Bench (executable={} level={} filename={}, iterations={}):\n\t[cspeed: {} MB/s, dspeed: {} MB/s]".format(
  108. os.path.basename(executable),
  109. level,
  110. os.path.basename(filename),
  111. n,
  112. cspeed,
  113. dspeed,
  114. )
  115. )
  116. return (cspeed, dspeed)
  117. def benchmark(build, filenames, levels, iterations):
  118. executable = clone_and_build(build)
  119. return [
  120. [benchmark_n(executable, l, f, iterations) for f in filenames] for l in levels
  121. ]
  122. def benchmark_dictionary_single(executable, filenames_directory, dictionary_filename, level, iterations):
  123. cspeeds, dspeeds = [], []
  124. for _ in range(iterations):
  125. output = subprocess.run([executable, "-qb{}".format(level), "-D", dictionary_filename, "-r", filenames_directory], stdout=subprocess.PIPE).stdout.decode("utf-8").split(" ")
  126. cspeed, dspeed = parse_benchmark_output(output)
  127. cspeeds.append(cspeed)
  128. dspeeds.append(dspeed)
  129. max_cspeed, max_dspeed = max(cspeeds), max(dspeeds)
  130. print(
  131. "Bench (executable={} level={} filenames_directory={}, dictionary_filename={}, iterations={}):\n\t[cspeed: {} MB/s, dspeed: {} MB/s]".format(
  132. os.path.basename(executable),
  133. level,
  134. os.path.basename(filenames_directory),
  135. os.path.basename(dictionary_filename),
  136. iterations,
  137. max_cspeed,
  138. max_dspeed,
  139. )
  140. )
  141. return (max_cspeed, max_dspeed)
  142. def benchmark_dictionary(build, filenames_directory, dictionary_filename, levels, iterations):
  143. executable = clone_and_build(build)
  144. return [benchmark_dictionary_single(executable, filenames_directory, dictionary_filename, l, iterations) for l in levels]
  145. def parse_regressions_and_labels(old_cspeed, new_cspeed, old_dspeed, new_dspeed, baseline_build, test_build):
  146. cspeed_reg = (old_cspeed - new_cspeed) / old_cspeed
  147. dspeed_reg = (old_dspeed - new_dspeed) / old_dspeed
  148. baseline_label = "{}:{} ({})".format(
  149. baseline_build["user"], baseline_build["branch"], baseline_build["hash"]
  150. )
  151. test_label = "{}:{} ({})".format(
  152. test_build["user"], test_build["branch"], test_build["hash"]
  153. )
  154. return cspeed_reg, dspeed_reg, baseline_label, test_label
  155. def get_regressions(baseline_build, test_build, iterations, filenames, levels):
  156. old = benchmark(baseline_build, filenames, levels, iterations)
  157. new = benchmark(test_build, filenames, levels, iterations)
  158. regressions = []
  159. for j, level in enumerate(levels):
  160. for k, filename in enumerate(filenames):
  161. old_cspeed, old_dspeed = old[j][k]
  162. new_cspeed, new_dspeed = new[j][k]
  163. cspeed_reg, dspeed_reg, baseline_label, test_label = parse_regressions_and_labels(
  164. old_cspeed, new_cspeed, old_dspeed, new_dspeed, baseline_build, test_build
  165. )
  166. if cspeed_reg > CSPEED_REGRESSION_TOLERANCE:
  167. regressions.append(
  168. "[COMPRESSION REGRESSION] (level={} filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
  169. level,
  170. filename,
  171. baseline_label,
  172. test_label,
  173. old_cspeed,
  174. new_cspeed,
  175. cspeed_reg * 100.0,
  176. )
  177. )
  178. if dspeed_reg > DSPEED_REGRESSION_TOLERANCE:
  179. regressions.append(
  180. "[DECOMPRESSION REGRESSION] (level={} filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
  181. level,
  182. filename,
  183. baseline_label,
  184. test_label,
  185. old_dspeed,
  186. new_dspeed,
  187. dspeed_reg * 100.0,
  188. )
  189. )
  190. return regressions
  191. def get_regressions_dictionary(baseline_build, test_build, filenames_directory, dictionary_filename, levels, iterations):
  192. old = benchmark_dictionary(baseline_build, filenames_directory, dictionary_filename, levels, iterations)
  193. new = benchmark_dictionary(test_build, filenames_directory, dictionary_filename, levels, iterations)
  194. regressions = []
  195. for j, level in enumerate(levels):
  196. old_cspeed, old_dspeed = old[j]
  197. new_cspeed, new_dspeed = new[j]
  198. cspeed_reg, dspeed_reg, baesline_label, test_label = parse_regressions_and_labels(
  199. old_cspeed, new_cspeed, old_dspeed, new_dspeed, baseline_build, test_build
  200. )
  201. if cspeed_reg > CSPEED_REGRESSION_TOLERANCE:
  202. regressions.append(
  203. "[COMPRESSION REGRESSION] (level={} filenames_directory={} dictionary_filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
  204. level,
  205. filenames_directory,
  206. dictionary_filename,
  207. baseline_label,
  208. test_label,
  209. old_cspeed,
  210. new_cspeed,
  211. cspeed_reg * 100.0,
  212. )
  213. )
  214. if dspeed_reg > DSPEED_REGRESSION_TOLERANCE:
  215. regressions.append(
  216. "[DECOMPRESSION REGRESSION] (level={} filenames_directory={} dictionary_filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
  217. level,
  218. filenames_directory,
  219. dictionary_filename,
  220. baseline_label,
  221. test_label,
  222. old_dspeed,
  223. new_dspeed,
  224. dspeed_reg * 100.0,
  225. )
  226. )
  227. return regressions
  228. def main(filenames, levels, iterations, builds=None, emails=None, continuous=False, frequency=DEFAULT_MAX_API_CALL_FREQUENCY_SEC, dictionary_filename=None):
  229. if builds == None:
  230. builds = get_new_open_pr_builds()
  231. while True:
  232. for test_build in builds:
  233. if dictionary_filename == None:
  234. regressions = get_regressions(
  235. RELEASE_BUILD, test_build, iterations, filenames, levels
  236. )
  237. else:
  238. regressions = get_regressions_dictionary(
  239. RELEASE_BUILD, test_build, filenames, dictionary_filename, levels, iterations
  240. )
  241. body = "\n".join(regressions)
  242. if len(regressions) > 0:
  243. if emails != None:
  244. os.system(
  245. """
  246. echo "{}" | mutt -s "[zstd regression] caused by new pr" {}
  247. """.format(
  248. body, emails
  249. )
  250. )
  251. print("Emails sent to {}".format(emails))
  252. print(body)
  253. if not continuous:
  254. break
  255. time.sleep(frequency)
  256. if __name__ == "__main__":
  257. parser = argparse.ArgumentParser()
  258. parser.add_argument("--directory", help="directory with files to benchmark", default="golden-compression")
  259. parser.add_argument("--levels", help="levels to test eg ('1,2,3')", default="1")
  260. parser.add_argument("--iterations", help="number of benchmark iterations to run", default="1")
  261. parser.add_argument("--emails", help="email addresses of people who will be alerted upon regression. Only for continuous mode", default=None)
  262. parser.add_argument("--frequency", help="specifies the number of seconds to wait before each successive check for new PRs in continuous mode", default=DEFAULT_MAX_API_CALL_FREQUENCY_SEC)
  263. parser.add_argument("--mode", help="'fastmode', 'onetime', 'current', or 'continuous' (see README.md for details)", default="current")
  264. parser.add_argument("--dict", help="filename of dictionary to use (when set, this dictionary will be used to compress the files provided inside --directory)", default=None)
  265. args = parser.parse_args()
  266. filenames = args.directory
  267. levels = [int(l) for l in args.levels.split(",")]
  268. mode = args.mode
  269. iterations = int(args.iterations)
  270. emails = args.emails
  271. frequency = int(args.frequency)
  272. dictionary_filename = args.dict
  273. if dictionary_filename == None:
  274. filenames = glob.glob("{}/**".format(filenames))
  275. if (len(filenames) == 0):
  276. print("0 files found")
  277. quit()
  278. if mode == "onetime":
  279. main(filenames, levels, iterations, frequency=frequenc, dictionary_filename=dictionary_filename)
  280. elif mode == "current":
  281. builds = [{"user": None, "branch": "None", "hash": None}]
  282. main(filenames, levels, iterations, builds, frequency=frequency, dictionary_filename=dictionary_filename)
  283. elif mode == "fastmode":
  284. builds = [{"user": "facebook", "branch": "release", "hash": None}]
  285. main(filenames, levels, iterations, builds, frequency=frequency, dictionary_filename=dictionary_filename)
  286. else:
  287. main(filenames, levels, iterations, None, emails, True, frequency=frequency, dictionary_filename=dictionary_filename)