|
@@ -29,10 +29,10 @@ from datetime import timedelta
|
|
|
|
|
|
class FrameworkTest:
|
|
class FrameworkTest:
|
|
headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
|
|
headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
|
|
-
|
|
|
|
|
|
+
|
|
# Used for test types that require no pipelining or query string params.
|
|
# Used for test types that require no pipelining or query string params.
|
|
concurrency_template = """
|
|
concurrency_template = """
|
|
-
|
|
|
|
|
|
+
|
|
echo ""
|
|
echo ""
|
|
echo "---------------------------------------------------------"
|
|
echo "---------------------------------------------------------"
|
|
echo " Running Primer {name}"
|
|
echo " Running Primer {name}"
|
|
@@ -41,7 +41,7 @@ class FrameworkTest:
|
|
echo ""
|
|
echo ""
|
|
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
|
|
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
|
|
sleep 5
|
|
sleep 5
|
|
-
|
|
|
|
|
|
+
|
|
echo ""
|
|
echo ""
|
|
echo "---------------------------------------------------------"
|
|
echo "---------------------------------------------------------"
|
|
echo " Running Warmup {name}"
|
|
echo " Running Warmup {name}"
|
|
@@ -75,7 +75,7 @@ class FrameworkTest:
|
|
"""
|
|
"""
|
|
# Used for test types that require pipelining.
|
|
# Used for test types that require pipelining.
|
|
pipeline_template = """
|
|
pipeline_template = """
|
|
-
|
|
|
|
|
|
+
|
|
echo ""
|
|
echo ""
|
|
echo "---------------------------------------------------------"
|
|
echo "---------------------------------------------------------"
|
|
echo " Running Primer {name}"
|
|
echo " Running Primer {name}"
|
|
@@ -84,7 +84,7 @@ class FrameworkTest:
|
|
echo ""
|
|
echo ""
|
|
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
|
|
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
|
|
sleep 5
|
|
sleep 5
|
|
-
|
|
|
|
|
|
+
|
|
echo ""
|
|
echo ""
|
|
echo "---------------------------------------------------------"
|
|
echo "---------------------------------------------------------"
|
|
echo " Running Warmup {name}"
|
|
echo " Running Warmup {name}"
|
|
@@ -116,11 +116,11 @@ class FrameworkTest:
|
|
sleep 2
|
|
sleep 2
|
|
done
|
|
done
|
|
"""
|
|
"""
|
|
- # Used for test types that require a database -
|
|
|
|
|
|
+ # Used for test types that require a database -
|
|
# These tests run at a static concurrency level and vary the size of
|
|
# These tests run at a static concurrency level and vary the size of
|
|
# the query sent with each request
|
|
# the query sent with each request
|
|
query_template = """
|
|
query_template = """
|
|
-
|
|
|
|
|
|
+
|
|
echo ""
|
|
echo ""
|
|
echo "---------------------------------------------------------"
|
|
echo "---------------------------------------------------------"
|
|
echo " Running Primer {name}"
|
|
echo " Running Primer {name}"
|
|
@@ -129,7 +129,7 @@ class FrameworkTest:
|
|
echo ""
|
|
echo ""
|
|
wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
|
|
wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
|
|
sleep 5
|
|
sleep 5
|
|
-
|
|
|
|
|
|
+
|
|
echo ""
|
|
echo ""
|
|
echo "---------------------------------------------------------"
|
|
echo "---------------------------------------------------------"
|
|
echo " Running Warmup {name}"
|
|
echo " Running Warmup {name}"
|
|
@@ -168,10 +168,10 @@ class FrameworkTest:
|
|
############################################################
|
|
############################################################
|
|
def start(self, out):
|
|
def start(self, out):
|
|
|
|
|
|
- # Setup environment variables
|
|
|
|
- logDir = os.path.join(self.fwroot, self.benchmarker.latest_results_directory, 'logs', self.name.lower())
|
|
|
|
|
|
+ # Setup environment variables
|
|
|
|
+ logDir = os.path.join(self.fwroot, self.benchmarker.full_results_directory(), 'logs', self.name.lower())
|
|
bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
|
|
bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
|
|
- setup_util.replace_environ(config='$FWROOT/config/benchmark_profile',
|
|
|
|
|
|
+ setup_util.replace_environ(config='$FWROOT/config/benchmark_profile',
|
|
command='''\
|
|
command='''\
|
|
export TROOT=%s && \
|
|
export TROOT=%s && \
|
|
export IROOT=%s && \
|
|
export IROOT=%s && \
|
|
@@ -180,9 +180,9 @@ class FrameworkTest:
|
|
export MAX_THREADS=%s && \
|
|
export MAX_THREADS=%s && \
|
|
export MAX_CONCURRENCY=%s \
|
|
export MAX_CONCURRENCY=%s \
|
|
''' % (
|
|
''' % (
|
|
- self.directory,
|
|
|
|
- self.install_root,
|
|
|
|
- self.database_host,
|
|
|
|
|
|
+ self.directory,
|
|
|
|
+ self.install_root,
|
|
|
|
+ self.database_host,
|
|
logDir,
|
|
logDir,
|
|
self.benchmarker.threads,
|
|
self.benchmarker.threads,
|
|
max(self.benchmarker.concurrency_levels)))
|
|
max(self.benchmarker.concurrency_levels)))
|
|
@@ -200,9 +200,9 @@ class FrameworkTest:
|
|
previousDir = os.getcwd()
|
|
previousDir = os.getcwd()
|
|
os.chdir(os.path.dirname(self.troot))
|
|
os.chdir(os.path.dirname(self.troot))
|
|
logging.info("Running setup module start (cwd=%s)", self.directory)
|
|
logging.info("Running setup module start (cwd=%s)", self.directory)
|
|
-
|
|
|
|
|
|
+
|
|
# Run the start script for the test as the "testrunner" user
|
|
# Run the start script for the test as the "testrunner" user
|
|
- #
|
|
|
|
|
|
+ #
|
|
# `sudo` - Switching user requires superuser privs
|
|
# `sudo` - Switching user requires superuser privs
|
|
# -u [username] The username
|
|
# -u [username] The username
|
|
# -E Preserves the current environment variables
|
|
# -E Preserves the current environment variables
|
|
@@ -214,21 +214,21 @@ class FrameworkTest:
|
|
# -e Force bash to exit on first error
|
|
# -e Force bash to exit on first error
|
|
# -x Turn on bash tracing e.g. print commands before running
|
|
# -x Turn on bash tracing e.g. print commands before running
|
|
#
|
|
#
|
|
- # Most servers do not output to stdout/stderr while serving
|
|
|
|
- # requests so there is no performance hit from disabling
|
|
|
|
- # output buffering. This disabling is necessary to
|
|
|
|
- # a) allow TFB to show output in real time and b) avoid loosing
|
|
|
|
- # output in the buffer when the testrunner processes are forcibly
|
|
|
|
|
|
+ # Most servers do not output to stdout/stderr while serving
|
|
|
|
+ # requests so there is no performance hit from disabling
|
|
|
|
+ # output buffering. This disabling is necessary to
|
|
|
|
+ # a) allow TFB to show output in real time and b) avoid loosing
|
|
|
|
+ # output in the buffer when the testrunner processes are forcibly
|
|
# killed
|
|
# killed
|
|
- #
|
|
|
|
|
|
+ #
|
|
# See http://www.pixelbeat.org/programming/stdio_buffering/
|
|
# See http://www.pixelbeat.org/programming/stdio_buffering/
|
|
# See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
|
|
# See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
|
|
# See http://eyalarubas.com/python-subproc-nonblock.html
|
|
# See http://eyalarubas.com/python-subproc-nonblock.html
|
|
command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
|
|
command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
|
|
self.benchmarker.runner_user,
|
|
self.benchmarker.runner_user,
|
|
- bash_functions_path,
|
|
|
|
|
|
+ bash_functions_path,
|
|
os.path.join(self.troot, self.setup_file))
|
|
os.path.join(self.troot, self.setup_file))
|
|
-
|
|
|
|
|
|
+
|
|
debug_command = '''\
|
|
debug_command = '''\
|
|
export FWROOT=%s && \\
|
|
export FWROOT=%s && \\
|
|
export TROOT=%s && \\
|
|
export TROOT=%s && \\
|
|
@@ -238,12 +238,12 @@ class FrameworkTest:
|
|
export MAX_THREADS=%s && \\
|
|
export MAX_THREADS=%s && \\
|
|
export MAX_CONCURRENCY=%s && \\
|
|
export MAX_CONCURRENCY=%s && \\
|
|
cd %s && \\
|
|
cd %s && \\
|
|
- %s''' % (self.fwroot,
|
|
|
|
- self.directory,
|
|
|
|
- self.install_root,
|
|
|
|
|
|
+ %s''' % (self.fwroot,
|
|
|
|
+ self.directory,
|
|
|
|
+ self.install_root,
|
|
self.database_host,
|
|
self.database_host,
|
|
logDir,
|
|
logDir,
|
|
- self.benchmarker.threads,
|
|
|
|
|
|
+ self.benchmarker.threads,
|
|
max(self.benchmarker.concurrency_levels),
|
|
max(self.benchmarker.concurrency_levels),
|
|
self.directory,
|
|
self.directory,
|
|
command)
|
|
command)
|
|
@@ -252,8 +252,8 @@ class FrameworkTest:
|
|
|
|
|
|
def tee_output(prefix, line):
|
|
def tee_output(prefix, line):
|
|
# Needs to be one atomic write
|
|
# Needs to be one atomic write
|
|
- # Explicitly use UTF-8 as it's the most common framework output
|
|
|
|
- # TODO improve encoding handling
|
|
|
|
|
|
+ # Explicitly use UTF-8 as it's the most common framework output
|
|
|
|
+ # TODO improve encoding handling
|
|
line = prefix.encode('utf-8') + line
|
|
line = prefix.encode('utf-8') + line
|
|
|
|
|
|
# Log to current terminal
|
|
# Log to current terminal
|
|
@@ -265,10 +265,10 @@ class FrameworkTest:
|
|
out.flush()
|
|
out.flush()
|
|
|
|
|
|
# Start the setup.sh command
|
|
# Start the setup.sh command
|
|
- p = subprocess.Popen(command, cwd=self.directory,
|
|
|
|
- shell=True, stdout=subprocess.PIPE,
|
|
|
|
|
|
+ p = subprocess.Popen(command, cwd=self.directory,
|
|
|
|
+ shell=True, stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT)
|
|
stderr=subprocess.STDOUT)
|
|
- nbsr = setup_util.NonBlockingStreamReader(p.stdout,
|
|
|
|
|
|
+ nbsr = setup_util.NonBlockingStreamReader(p.stdout,
|
|
"%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
|
|
"%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
|
|
|
|
|
|
# Set a limit on total execution time of setup.sh
|
|
# Set a limit on total execution time of setup.sh
|
|
@@ -278,30 +278,30 @@ class FrameworkTest:
|
|
# Need to print to stdout once every 10 minutes or Travis-CI will abort
|
|
# Need to print to stdout once every 10 minutes or Travis-CI will abort
|
|
travis_timeout = datetime.now() + timedelta(minutes = 5)
|
|
travis_timeout = datetime.now() + timedelta(minutes = 5)
|
|
|
|
|
|
- # Flush output until setup.sh work is finished. This is
|
|
|
|
|
|
+ # Flush output until setup.sh work is finished. This is
|
|
# either a) when setup.sh exits b) when the port is bound
|
|
# either a) when setup.sh exits b) when the port is bound
|
|
- # c) when we run out of time. Note that 'finished' doesn't
|
|
|
|
- # guarantee setup.sh process is dead - the OS may choose to make
|
|
|
|
|
|
+ # c) when we run out of time. Note that 'finished' doesn't
|
|
|
|
+ # guarantee setup.sh process is dead - the OS may choose to make
|
|
# setup.sh a zombie process if it still has living children
|
|
# setup.sh a zombie process if it still has living children
|
|
#
|
|
#
|
|
- # Note: child processes forked (using &) will remain alive
|
|
|
|
- # after setup.sh has exited. The will have inherited the
|
|
|
|
- # stdout/stderr descriptors and will be directing their
|
|
|
|
- # output to the pipes.
|
|
|
|
|
|
+ # Note: child processes forked (using &) will remain alive
|
|
|
|
+ # after setup.sh has exited. The will have inherited the
|
|
|
|
+ # stdout/stderr descriptors and will be directing their
|
|
|
|
+ # output to the pipes.
|
|
#
|
|
#
|
|
prefix = "Setup %s: " % self.name
|
|
prefix = "Setup %s: " % self.name
|
|
while (p.poll() is None
|
|
while (p.poll() is None
|
|
and not self.benchmarker.is_port_bound(self.port)
|
|
and not self.benchmarker.is_port_bound(self.port)
|
|
and not time_remaining.total_seconds() < 0):
|
|
and not time_remaining.total_seconds() < 0):
|
|
-
|
|
|
|
- # The conditions above are slow to check, so
|
|
|
|
|
|
+
|
|
|
|
+ # The conditions above are slow to check, so
|
|
# we will delay output substantially if we only
|
|
# we will delay output substantially if we only
|
|
- # print one line per condition check.
|
|
|
|
- # Adding a tight loop here mitigates the effect,
|
|
|
|
- # ensuring that most of the output directly from
|
|
|
|
|
|
+ # print one line per condition check.
|
|
|
|
+ # Adding a tight loop here mitigates the effect,
|
|
|
|
+ # ensuring that most of the output directly from
|
|
# setup.sh is sent to tee_output before the outer
|
|
# setup.sh is sent to tee_output before the outer
|
|
# loop exits and prints things like "setup.sh exited"
|
|
# loop exits and prints things like "setup.sh exited"
|
|
- #
|
|
|
|
|
|
+ #
|
|
for i in xrange(10):
|
|
for i in xrange(10):
|
|
try:
|
|
try:
|
|
line = nbsr.readline(0.05)
|
|
line = nbsr.readline(0.05)
|
|
@@ -323,12 +323,12 @@ class FrameworkTest:
|
|
travis_timeout = datetime.now() + timedelta(minutes = 5)
|
|
travis_timeout = datetime.now() + timedelta(minutes = 5)
|
|
|
|
|
|
# Did we time out?
|
|
# Did we time out?
|
|
- if time_remaining.total_seconds() < 0:
|
|
|
|
|
|
+ if time_remaining.total_seconds() < 0:
|
|
tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
|
|
tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
|
|
p.kill()
|
|
p.kill()
|
|
return 1
|
|
return 1
|
|
|
|
|
|
- # What's our return code?
|
|
|
|
|
|
+ # What's our return code?
|
|
# If setup.sh has terminated, use that code
|
|
# If setup.sh has terminated, use that code
|
|
# Otherwise, detect if the port was bound
|
|
# Otherwise, detect if the port was bound
|
|
tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
|
|
tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
|
|
@@ -339,11 +339,11 @@ class FrameworkTest:
|
|
elif self.benchmarker.is_port_bound(self.port):
|
|
elif self.benchmarker.is_port_bound(self.port):
|
|
tee_output(prefix, "Bound port detected on %s\n" % self.port)
|
|
tee_output(prefix, "Bound port detected on %s\n" % self.port)
|
|
|
|
|
|
- # Before we return control to the benchmarker, spin up a
|
|
|
|
- # thread to keep an eye on the pipes in case the running
|
|
|
|
|
|
+ # Before we return control to the benchmarker, spin up a
|
|
|
|
+ # thread to keep an eye on the pipes in case the running
|
|
# framework uses stdout/stderr. Once all processes accessing
|
|
# framework uses stdout/stderr. Once all processes accessing
|
|
- # the subprocess.PIPEs are dead, this thread will terminate.
|
|
|
|
- # Use a different prefix to indicate this is the framework
|
|
|
|
|
|
+ # the subprocess.PIPEs are dead, this thread will terminate.
|
|
|
|
+ # Use a different prefix to indicate this is the framework
|
|
# speaking
|
|
# speaking
|
|
prefix = "Server %s: " % self.name
|
|
prefix = "Server %s: " % self.name
|
|
def watch_child_pipes(nbsr, prefix):
|
|
def watch_child_pipes(nbsr, prefix):
|
|
@@ -371,17 +371,22 @@ class FrameworkTest:
|
|
|
|
|
|
############################################################
|
|
############################################################
|
|
# verify_urls
|
|
# verify_urls
|
|
- # Verifys each of the URLs for this test. THis will sinply
|
|
|
|
- # curl the URL and check for it's return status.
|
|
|
|
|
|
+ # Verifys each of the URLs for this test. THis will sinply
|
|
|
|
+ # curl the URL and check for it's return status.
|
|
# For each url, a flag will be set on this object for whether
|
|
# For each url, a flag will be set on this object for whether
|
|
# or not it passed
|
|
# or not it passed
|
|
# Returns True if all verifications succeeded
|
|
# Returns True if all verifications succeeded
|
|
############################################################
|
|
############################################################
|
|
- def verify_urls(self, verificationPath):
|
|
|
|
|
|
+ def verify_urls(self, logPath):
|
|
result = True
|
|
result = True
|
|
-
|
|
|
|
|
|
+
|
|
def verify_type(test_type):
|
|
def verify_type(test_type):
|
|
- with open(os.path.join(verificationPath, (test_type + '.txt')), 'w') as verification:
|
|
|
|
|
|
+ verificationPath = os.path.join(logPath, test_type)
|
|
|
|
+ try:
|
|
|
|
+ os.makedirs(verificationPath)
|
|
|
|
+ except OSError:
|
|
|
|
+ pass
|
|
|
|
+ with open(os.path.join(verificationPath, 'verification.txt'), 'w') as verification:
|
|
test = self.runTests[test_type]
|
|
test = self.runTests[test_type]
|
|
test.setup_out(verification)
|
|
test.setup_out(verification)
|
|
verification.write(header("VERIFYING %s" % test_type.upper()))
|
|
verification.write(header("VERIFYING %s" % test_type.upper()))
|
|
@@ -442,7 +447,7 @@ class FrameworkTest:
|
|
verify_type(test_type)
|
|
verify_type(test_type)
|
|
if self.runTests[test_type].failed:
|
|
if self.runTests[test_type].failed:
|
|
result = False
|
|
result = False
|
|
-
|
|
|
|
|
|
+
|
|
return result
|
|
return result
|
|
############################################################
|
|
############################################################
|
|
# End verify_urls
|
|
# End verify_urls
|
|
@@ -453,10 +458,15 @@ class FrameworkTest:
|
|
# Runs the benchmark for each type of test that it implements
|
|
# Runs the benchmark for each type of test that it implements
|
|
# JSON/DB/Query.
|
|
# JSON/DB/Query.
|
|
############################################################
|
|
############################################################
|
|
- def benchmark(self, benchmarkPath):
|
|
|
|
|
|
+ def benchmark(self, logPath):
|
|
|
|
|
|
def benchmark_type(test_type):
|
|
def benchmark_type(test_type):
|
|
- with open(os.path.join(benchmarkPath, (test_type + '.txt')), 'w') as out:
|
|
|
|
|
|
+ benchmarkPath = os.path.join(logPath, test_type)
|
|
|
|
+ try:
|
|
|
|
+ os.makedirs(benchmarkPath)
|
|
|
|
+ except OSError:
|
|
|
|
+ pass
|
|
|
|
+ with open(os.path.join(benchmarkPath, 'benchmark.txt'), 'w') as out:
|
|
out.write("BENCHMARKING %s ... " % test_type.upper())
|
|
out.write("BENCHMARKING %s ... " % test_type.upper())
|
|
|
|
|
|
test = self.runTests[test_type]
|
|
test = self.runTests[test_type]
|
|
@@ -494,13 +504,13 @@ class FrameworkTest:
|
|
self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
|
|
self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
|
|
out.write( "Complete\n" )
|
|
out.write( "Complete\n" )
|
|
out.flush()
|
|
out.flush()
|
|
-
|
|
|
|
|
|
+
|
|
for test_type in self.runTests:
|
|
for test_type in self.runTests:
|
|
benchmark_type(test_type)
|
|
benchmark_type(test_type)
|
|
############################################################
|
|
############################################################
|
|
# End benchmark
|
|
# End benchmark
|
|
############################################################
|
|
############################################################
|
|
-
|
|
|
|
|
|
+
|
|
############################################################
|
|
############################################################
|
|
# parse_all
|
|
# parse_all
|
|
# Method meant to be run for a given timestamp
|
|
# Method meant to be run for a given timestamp
|
|
@@ -523,7 +533,7 @@ class FrameworkTest:
|
|
results = dict()
|
|
results = dict()
|
|
results['results'] = []
|
|
results['results'] = []
|
|
stats = []
|
|
stats = []
|
|
-
|
|
|
|
|
|
+
|
|
if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
|
|
if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
|
|
with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
|
|
with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
|
|
is_warmup = True
|
|
is_warmup = True
|
|
@@ -546,7 +556,7 @@ class FrameworkTest:
|
|
#if "Requests/sec:" in line:
|
|
#if "Requests/sec:" in line:
|
|
# m = re.search("Requests/sec:\s+([0-9]+)", line)
|
|
# m = re.search("Requests/sec:\s+([0-9]+)", line)
|
|
# rawData['reportedResults'] = m.group(1)
|
|
# rawData['reportedResults'] = m.group(1)
|
|
-
|
|
|
|
|
|
+
|
|
# search for weighttp data such as succeeded and failed.
|
|
# search for weighttp data such as succeeded and failed.
|
|
if "Latency" in line:
|
|
if "Latency" in line:
|
|
m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
|
|
m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
|
|
@@ -555,7 +565,7 @@ class FrameworkTest:
|
|
rawData['latencyStdev'] = m[1]
|
|
rawData['latencyStdev'] = m[1]
|
|
rawData['latencyMax'] = m[2]
|
|
rawData['latencyMax'] = m[2]
|
|
# rawData['latencyStdevPercent'] = m[3]
|
|
# rawData['latencyStdevPercent'] = m[3]
|
|
-
|
|
|
|
|
|
+
|
|
#if "Req/Sec" in line:
|
|
#if "Req/Sec" in line:
|
|
# m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
|
|
# m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
|
|
# if len(m) == 4:
|
|
# if len(m) == 4:
|
|
@@ -563,10 +573,10 @@ class FrameworkTest:
|
|
# rawData['requestsStdev'] = m[1]
|
|
# rawData['requestsStdev'] = m[1]
|
|
# rawData['requestsMax'] = m[2]
|
|
# rawData['requestsMax'] = m[2]
|
|
# rawData['requestsStdevPercent'] = m[3]
|
|
# rawData['requestsStdevPercent'] = m[3]
|
|
-
|
|
|
|
|
|
+
|
|
#if "requests in" in line:
|
|
#if "requests in" in line:
|
|
# m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
|
|
# m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
|
|
- # if m != None:
|
|
|
|
|
|
+ # if m != None:
|
|
# # parse out the raw time, which may be in minutes or seconds
|
|
# # parse out the raw time, which may be in minutes or seconds
|
|
# raw_time = m.group(1)
|
|
# raw_time = m.group(1)
|
|
# if "ms" in raw_time:
|
|
# if "ms" in raw_time:
|
|
@@ -577,12 +587,12 @@ class FrameworkTest:
|
|
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
|
|
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
|
|
# elif "h" in raw_time:
|
|
# elif "h" in raw_time:
|
|
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
|
|
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
|
|
-
|
|
|
|
|
|
+
|
|
if "requests in" in line:
|
|
if "requests in" in line:
|
|
m = re.search("([0-9]+) requests in", line)
|
|
m = re.search("([0-9]+) requests in", line)
|
|
- if m != None:
|
|
|
|
|
|
+ if m != None:
|
|
rawData['totalRequests'] = int(m.group(1))
|
|
rawData['totalRequests'] = int(m.group(1))
|
|
-
|
|
|
|
|
|
+
|
|
if "Socket errors" in line:
|
|
if "Socket errors" in line:
|
|
if "connect" in line:
|
|
if "connect" in line:
|
|
m = re.search("connect ([0-9]+)", line)
|
|
m = re.search("connect ([0-9]+)", line)
|
|
@@ -596,10 +606,10 @@ class FrameworkTest:
|
|
if "timeout" in line:
|
|
if "timeout" in line:
|
|
m = re.search("timeout ([0-9]+)", line)
|
|
m = re.search("timeout ([0-9]+)", line)
|
|
rawData['timeout'] = int(m.group(1))
|
|
rawData['timeout'] = int(m.group(1))
|
|
-
|
|
|
|
|
|
+
|
|
if "Non-2xx" in line:
|
|
if "Non-2xx" in line:
|
|
m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
|
|
m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
|
|
- if m != None:
|
|
|
|
|
|
+ if m != None:
|
|
rawData['5xx'] = int(m.group(1))
|
|
rawData['5xx'] = int(m.group(1))
|
|
if "STARTTIME" in line:
|
|
if "STARTTIME" in line:
|
|
m = re.search("[0-9]+", line)
|
|
m = re.search("[0-9]+", line)
|
|
@@ -630,9 +640,9 @@ class FrameworkTest:
|
|
############################################################
|
|
############################################################
|
|
def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
|
|
def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
|
|
headers = self.headers_template.format(accept=accept_header)
|
|
headers = self.headers_template.format(accept=accept_header)
|
|
- return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
|
|
|
|
- max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
|
|
|
|
- levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
|
|
|
|
|
|
+ return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
|
|
|
|
+ max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
|
|
|
|
+ levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
|
|
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
|
|
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
|
|
|
|
|
|
############################################################
|
|
############################################################
|
|
@@ -642,9 +652,9 @@ class FrameworkTest:
|
|
############################################################
|
|
############################################################
|
|
def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
|
|
def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
|
|
headers = self.headers_template.format(accept=accept_header)
|
|
headers = self.headers_template.format(accept=accept_header)
|
|
- return self.pipeline_template.format(max_concurrency=16384,
|
|
|
|
- max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
|
|
|
|
- levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
|
|
|
|
|
|
+ return self.pipeline_template.format(max_concurrency=16384,
|
|
|
|
+ max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
|
|
|
|
+ levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
|
|
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
|
|
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
|
|
pipeline=16)
|
|
pipeline=16)
|
|
|
|
|
|
@@ -656,9 +666,9 @@ class FrameworkTest:
|
|
############################################################
|
|
############################################################
|
|
def __generate_query_script(self, url, port, accept_header):
|
|
def __generate_query_script(self, url, port, accept_header):
|
|
headers = self.headers_template.format(accept=accept_header)
|
|
headers = self.headers_template.format(accept=accept_header)
|
|
- return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
|
|
|
|
- max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
|
|
|
|
- levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
|
|
|
|
|
|
+ return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
|
|
|
|
+ max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
|
|
|
|
+ levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
|
|
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
|
|
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
|
|
|
|
|
|
############################################################
|
|
############################################################
|
|
@@ -684,7 +694,7 @@ class FrameworkTest:
|
|
|
|
|
|
##############################################################
|
|
##############################################################
|
|
# Begin __end_logging
|
|
# Begin __end_logging
|
|
- # Stops the logger thread and blocks until shutdown is complete.
|
|
|
|
|
|
+ # Stops the logger thread and blocks until shutdown is complete.
|
|
##############################################################
|
|
##############################################################
|
|
def __end_logging(self):
|
|
def __end_logging(self):
|
|
self.subprocess_handle.terminate()
|
|
self.subprocess_handle.terminate()
|
|
@@ -735,7 +745,7 @@ class FrameworkTest:
|
|
##############################################################
|
|
##############################################################
|
|
|
|
|
|
def __getattr__(self, name):
|
|
def __getattr__(self, name):
|
|
- """For backwards compatibility, we used to pass benchmarker
|
|
|
|
|
|
+ """For backwards compatibility, we used to pass benchmarker
|
|
as the argument to the setup.sh files"""
|
|
as the argument to the setup.sh files"""
|
|
try:
|
|
try:
|
|
x = getattr(self.benchmarker, name)
|
|
x = getattr(self.benchmarker, name)
|
|
@@ -757,13 +767,13 @@ class FrameworkTest:
|
|
# More may be added in the future. If they are, please update
|
|
# More may be added in the future. If they are, please update
|
|
# the above list.
|
|
# the above list.
|
|
# Note: raw_stats is directly from the __parse_stats method.
|
|
# Note: raw_stats is directly from the __parse_stats method.
|
|
- # Recall that this consists of a dictionary of timestamps,
|
|
|
|
|
|
+ # Recall that this consists of a dictionary of timestamps,
|
|
# each of which contain a dictionary of stat categories which
|
|
# each of which contain a dictionary of stat categories which
|
|
# contain a dictionary of stats
|
|
# contain a dictionary of stats
|
|
##############################################################
|
|
##############################################################
|
|
def __calculate_average_stats(self, raw_stats):
|
|
def __calculate_average_stats(self, raw_stats):
|
|
raw_stat_collection = dict()
|
|
raw_stat_collection = dict()
|
|
-
|
|
|
|
|
|
+
|
|
for timestamp, time_dict in raw_stats.items():
|
|
for timestamp, time_dict in raw_stats.items():
|
|
for main_header, sub_headers in time_dict.items():
|
|
for main_header, sub_headers in time_dict.items():
|
|
item_to_append = None
|
|
item_to_append = None
|
|
@@ -817,10 +827,10 @@ class FrameworkTest:
|
|
# End __calculate_average_stats
|
|
# End __calculate_average_stats
|
|
#########################################################################################
|
|
#########################################################################################
|
|
|
|
|
|
-
|
|
|
|
|
|
+
|
|
##########################################################################################
|
|
##########################################################################################
|
|
# Constructor
|
|
# Constructor
|
|
- ##########################################################################################
|
|
|
|
|
|
+ ##########################################################################################
|
|
def __init__(self, name, directory, benchmarker, runTests, args):
|
|
def __init__(self, name, directory, benchmarker, runTests, args):
|
|
self.name = name
|
|
self.name = name
|
|
self.directory = directory
|
|
self.directory = directory
|
|
@@ -840,15 +850,15 @@ class FrameworkTest:
|
|
self.display_name = ""
|
|
self.display_name = ""
|
|
self.notes = ""
|
|
self.notes = ""
|
|
self.versus = ""
|
|
self.versus = ""
|
|
-
|
|
|
|
|
|
+
|
|
# setup logging
|
|
# setup logging
|
|
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
|
|
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
|
|
-
|
|
|
|
|
|
+
|
|
self.install_root="%s/%s" % (self.fwroot, "installs")
|
|
self.install_root="%s/%s" % (self.fwroot, "installs")
|
|
if benchmarker.install_strategy is 'pertest':
|
|
if benchmarker.install_strategy is 'pertest':
|
|
self.install_root="%s/pertest/%s" % (self.install_root, name)
|
|
self.install_root="%s/pertest/%s" % (self.install_root, name)
|
|
|
|
|
|
- # Used in setup.sh scripts for consistency with
|
|
|
|
|
|
+ # Used in setup.sh scripts for consistency with
|
|
# the bash environment variables
|
|
# the bash environment variables
|
|
self.troot = self.directory
|
|
self.troot = self.directory
|
|
self.iroot = self.install_root
|
|
self.iroot = self.install_root
|
|
@@ -895,7 +905,7 @@ def validate_urls(test_name, test_keys):
|
|
Example `%s` url: \"%s\"
|
|
Example `%s` url: \"%s\"
|
|
""" % (test_url, test_name, key_value, test_url, example_urls[test_url])
|
|
""" % (test_url, test_name, key_value, test_url, example_urls[test_url])
|
|
raise Exception(errmsg)
|
|
raise Exception(errmsg)
|
|
-
|
|
|
|
|
|
+
|
|
|
|
|
|
def validate_test(test_name, test_keys, directory):
|
|
def validate_test(test_name, test_keys, directory):
|
|
"""
|
|
"""
|
|
@@ -989,14 +999,14 @@ def validate_test(test_name, test_keys, directory):
|
|
allowed = schema[key].get('allowed', [])
|
|
allowed = schema[key].get('allowed', [])
|
|
acceptable_values, descriptors = zip(*allowed)
|
|
acceptable_values, descriptors = zip(*allowed)
|
|
acceptable_values = [a.lower() for a in acceptable_values]
|
|
acceptable_values = [a.lower() for a in acceptable_values]
|
|
-
|
|
|
|
|
|
+
|
|
if val not in acceptable_values:
|
|
if val not in acceptable_values:
|
|
msg = ("Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
|
|
msg = ("Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
|
|
% (key, test_name, test_keys['framework']))
|
|
% (key, test_name, test_keys['framework']))
|
|
helpinfo = ('\n').join([" `%s` -- %s" % (v, desc) for (v, desc) in zip(acceptable_values, descriptors)])
|
|
helpinfo = ('\n').join([" `%s` -- %s" % (v, desc) for (v, desc) in zip(acceptable_values, descriptors)])
|
|
fullerr = msg + helpinfo + "\n"
|
|
fullerr = msg + helpinfo + "\n"
|
|
raise Exception(fullerr)
|
|
raise Exception(fullerr)
|
|
-
|
|
|
|
|
|
+
|
|
elif not has_predefined_acceptables and val == "":
|
|
elif not has_predefined_acceptables and val == "":
|
|
msg = ("Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n"
|
|
msg = ("Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n"
|
|
% (key, test_name, test_keys['framework']))
|
|
% (key, test_name, test_keys['framework']))
|
|
@@ -1017,13 +1027,13 @@ def parse_config(config, directory, benchmarker):
|
|
tests_to_run = [name for (name,keys) in test.iteritems()]
|
|
tests_to_run = [name for (name,keys) in test.iteritems()]
|
|
if "default" not in tests_to_run:
|
|
if "default" not in tests_to_run:
|
|
logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework'])
|
|
logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework'])
|
|
-
|
|
|
|
|
|
+
|
|
# Check that each test configuration is acceptable
|
|
# Check that each test configuration is acceptable
|
|
# Throw exceptions if a field is missing, or how to improve the field
|
|
# Throw exceptions if a field is missing, or how to improve the field
|
|
for test_name, test_keys in test.iteritems():
|
|
for test_name, test_keys in test.iteritems():
|
|
# Validates the benchmark_config entry
|
|
# Validates the benchmark_config entry
|
|
validate_test(test_name, test_keys, directory)
|
|
validate_test(test_name, test_keys, directory)
|
|
-
|
|
|
|
|
|
+
|
|
# Map test type to a parsed FrameworkTestType object
|
|
# Map test type to a parsed FrameworkTestType object
|
|
runTests = dict()
|
|
runTests = dict()
|
|
for type_name, type_obj in benchmarker.types.iteritems():
|
|
for type_name, type_obj in benchmarker.types.iteritems():
|
|
@@ -1033,7 +1043,7 @@ def parse_config(config, directory, benchmarker):
|
|
runTests[type_name] = type_obj.copy().parse(test_keys)
|
|
runTests[type_name] = type_obj.copy().parse(test_keys)
|
|
except AttributeError as ae:
|
|
except AttributeError as ae:
|
|
# This is quite common - most tests don't support all types
|
|
# This is quite common - most tests don't support all types
|
|
- # Quitely log it and move on (debug logging is on in travis and this causes
|
|
|
|
|
|
+ # Quitely log it and move on (debug logging is on in travis and this causes
|
|
# ~1500 lines of debug, so I'm totally ignoring it for now
|
|
# ~1500 lines of debug, so I'm totally ignoring it for now
|
|
# logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
|
|
# logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
|
|
pass
|
|
pass
|
|
@@ -1046,10 +1056,10 @@ def parse_config(config, directory, benchmarker):
|
|
|
|
|
|
# Prefix all test names with framework except 'default' test
|
|
# Prefix all test names with framework except 'default' test
|
|
# Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
|
|
# Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
|
|
- if test_name == 'default':
|
|
|
|
|
|
+ if test_name == 'default':
|
|
test_name = config['framework']
|
|
test_name = config['framework']
|
|
else:
|
|
else:
|
|
- test_name = "%s-%s" % (config['framework'], test_name)
|
|
|
|
|
|
+ test_name = "%s-%s" % (config['framework'], test_name)
|
|
|
|
|
|
# By passing the entire set of keys, each FrameworkTest will have a member for each key
|
|
# By passing the entire set of keys, each FrameworkTest will have a member for each key
|
|
tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
|
|
tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
|