|
@@ -2,6 +2,7 @@ from setup.linux.installer import Installer
|
|
|
from setup.linux import setup_util
|
|
|
|
|
|
from benchmark import framework_test
|
|
|
+from benchmark.test_types import *
|
|
|
from utils import header
|
|
|
from utils import gather_tests
|
|
|
from utils import gather_frameworks
|
|
@@ -17,6 +18,7 @@ import sys
|
|
|
import logging
|
|
|
import socket
|
|
|
import threading
|
|
|
+from pprint import pprint
|
|
|
|
|
|
from multiprocessing import Process
|
|
|
|
|
@@ -325,7 +327,7 @@ class Benchmarker:
|
|
|
# off, rather than starting from the beginning
|
|
|
if os.path.isfile('current_benchmark.txt'):
|
|
|
with open('current_benchmark.txt', 'r') as interrupted_benchmark:
|
|
|
- interrupt_bench = interrupted_benchmark.read()
|
|
|
+ interrupt_bench = interrupted_benchmark.read().strip()
|
|
|
for index, atest in enumerate(tests):
|
|
|
if atest.name == interrupt_bench:
|
|
|
tests = tests[index:]
|
|
@@ -495,15 +497,8 @@ class Benchmarker:
|
|
|
pass
|
|
|
with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
|
|
|
open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
|
|
|
- if hasattr(test, 'skip'):
|
|
|
- if test.skip.lower() == "true":
|
|
|
- out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
|
|
|
- return exit_with_code(0)
|
|
|
|
|
|
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
|
|
|
- # the operating system requirements of this test for the
|
|
|
- # application server or the database server don't match
|
|
|
- # our current environment
|
|
|
out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
|
|
|
return exit_with_code(0)
|
|
|
|
|
@@ -511,11 +506,6 @@ class Benchmarker:
|
|
|
if self.exclude != None and test.name in self.exclude:
|
|
|
out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
|
|
|
return exit_with_code(0)
|
|
|
-
|
|
|
- # If the test does not contain an implementation of the current test-type, skip it
|
|
|
- if self.type != 'all' and not test.contains_type(self.type):
|
|
|
- out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
|
|
|
- return exit_with_code(0)
|
|
|
|
|
|
out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
|
|
|
out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
|
|
@@ -561,11 +551,13 @@ class Benchmarker:
|
|
|
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
|
|
|
return exit_with_code(1)
|
|
|
|
|
|
+ logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
|
|
|
time.sleep(self.sleep)
|
|
|
|
|
|
##########################
|
|
|
# Verify URLs
|
|
|
##########################
|
|
|
+ logging.info("Verifying framework URLs")
|
|
|
passed_verify = test.verify_urls(out, err)
|
|
|
out.flush()
|
|
|
err.flush()
|
|
@@ -574,6 +566,7 @@ class Benchmarker:
|
|
|
# Benchmark this test
|
|
|
##########################
|
|
|
if self.mode == "benchmark":
|
|
|
+ logging.info("Benchmarking")
|
|
|
out.write(header("Benchmarking %s" % test.name))
|
|
|
out.flush()
|
|
|
test.benchmark(out, err)
|
|
@@ -591,9 +584,15 @@ class Benchmarker:
|
|
|
time.sleep(5)
|
|
|
|
|
|
if self.__is_port_bound(test.port):
|
|
|
- self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
|
|
|
- err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
|
|
|
+ err.write("Port %s was not freed. Attempting to free it." % (test.port, ))
|
|
|
err.flush()
|
|
|
+ self.__forciblyEndPortBoundProcesses(test.port, out, err)
|
|
|
+ time.sleep(5)
|
|
|
+ if self.__is_port_bound(test.port):
|
|
|
+ err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
|
|
|
+ err.flush()
|
|
|
+ self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
|
|
|
+
|
|
|
return exit_with_code(1)
|
|
|
|
|
|
out.write(header("Stopped %s" % test.name))
|
|
@@ -680,6 +679,44 @@ class Benchmarker:
|
|
|
# End __is_port_bound
|
|
|
############################################################
|
|
|
|
|
|
+ def __forciblyEndPortBoundProcesses(self, test_port, out, err):
|
|
|
+ p = subprocess.Popen(['sudo', 'netstat', '-lnp'], stdout=subprocess.PIPE)
|
|
|
+ out, err = p.communicate()
|
|
|
+ for line in out.splitlines():
|
|
|
+ if 'tcp' in line:
|
|
|
+ splitline = line.split()
|
|
|
+ port = splitline[3].split(':')
|
|
|
+ port = int(port[len(port) - 1].strip())
|
|
|
+ if port > 6000:
|
|
|
+ err.write(textwrap.dedent(
|
|
|
+ """
|
|
|
+ A port that shouldn't be open is open. See the following line for netstat output.
|
|
|
+ {splitline}
|
|
|
+ """.format(splitline=splitline)))
|
|
|
+ err.flush()
|
|
|
+ if port == test_port:
|
|
|
+ try:
|
|
|
+ pid = splitline[6].split('/')[0].strip()
|
|
|
+ ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
|
|
|
+ # Store some info about this process
|
|
|
+ proc = ps.communicate()
|
|
|
+ os.kill(int(pid), 15)
|
|
|
+ # Sleep for 10 sec; kill can be finicky
|
|
|
+ time.sleep(10)
|
|
|
+ # Check that PID again
|
|
|
+ ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
|
|
|
+ dead = ps.communicate()
|
|
|
+ if dead in proc:
|
|
|
+ os.kill(int(pid), 9)
|
|
|
+ except OSError:
|
|
|
+ out.write( textwrap.dedent("""
|
|
|
+ -----------------------------------------------------
|
|
|
+ Error: Could not kill pid {pid}
|
|
|
+ -----------------------------------------------------
|
|
|
+ """.format(pid=str(pid))) )
|
|
|
+ # This is okay; likely we killed a parent that ended
|
|
|
+ # up automatically killing this before we could.
|
|
|
+
|
|
|
############################################################
|
|
|
# __parse_results
|
|
|
# Ensures that the system has all necessary software to run
|
|
@@ -712,22 +749,29 @@ class Benchmarker:
|
|
|
|
|
|
jsonResult = {}
|
|
|
for framework, testlist in frameworks.iteritems():
|
|
|
+ if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
|
|
|
+ logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
|
|
|
+ continue
|
|
|
+
|
|
|
# Unfortunately the source_code files use lines like
|
|
|
# ./cpoll_cppsp/www/fortune_old instead of
|
|
|
# ./www/fortune_old
|
|
|
# so we have to back our working dir up one level
|
|
|
wd = os.path.dirname(testlist[0].directory)
|
|
|
-
|
|
|
+
|
|
|
try:
|
|
|
command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
|
|
|
# Find the last instance of the word 'code' in the yaml output. This should
|
|
|
# be the line count for the sum of all listed files or just the line count
|
|
|
# for the last file in the case where there's only one file listed.
|
|
|
command = command + "| grep code | tail -1 | cut -d: -f 2"
|
|
|
+ logging.debug("Running \"%s\" (cwd=%s)", command, wd)
|
|
|
lineCount = subprocess.check_output(command, cwd=wd, shell=True)
|
|
|
jsonResult[framework] = int(lineCount)
|
|
|
except subprocess.CalledProcessError:
|
|
|
continue
|
|
|
+ except ValueError as ve:
|
|
|
+ logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
|
|
|
self.results['rawData']['slocCounts'] = jsonResult
|
|
|
############################################################
|
|
|
# End __count_sloc
|
|
@@ -845,7 +889,25 @@ class Benchmarker:
|
|
|
############################################################
|
|
|
def __init__(self, args):
|
|
|
|
|
|
+ # Map type strings to their objects
|
|
|
+ types = dict()
|
|
|
+ types['json'] = JsonTestType()
|
|
|
+ types['db'] = DBTestType()
|
|
|
+ types['query'] = QueryTestType()
|
|
|
+ types['fortune'] = FortuneTestType()
|
|
|
+ types['update'] = UpdateTestType()
|
|
|
+ types['plaintext'] = PlaintextTestType()
|
|
|
+
|
|
|
+ # Turn type into a map instead of a string
|
|
|
+ if args['type'] == 'all':
|
|
|
+ args['types'] = types
|
|
|
+ else:
|
|
|
+ args['types'] = { args['type'] : types[args['type']] }
|
|
|
+ del args['type']
|
|
|
+
|
|
|
self.__dict__.update(args)
|
|
|
+ # pprint(self.__dict__)
|
|
|
+
|
|
|
self.start_time = time.time()
|
|
|
self.run_test_timeout_seconds = 3600
|
|
|
|