* Add a --benchmark-env option to the toolset * add --benchmark-env travis for travis runs
@@ -127,4 +127,4 @@ script:
# run-ci.py runs the diffing to see if travis needs to test this framework. Ideally/eventually,
# we'd like to try and do the diffing before travis_clean & setup.
# This will run the tests exactly as you would in your own vm:
- - if [ "$RUN_TESTS" ]; then docker network create tfb > /dev/null 2>&1 && docker run --network=tfb -v /var/run/docker.sock:/var/run/docker.sock --mount type=bind,source=`pwd`,target=/FrameworkBenchmarks techempower/tfb --mode verify --test-dir $RUN_TESTS; else echo 'Skipping test verification.'; fi
+ - if [ "$RUN_TESTS" ]; then docker network create tfb > /dev/null 2>&1 && docker run --network=tfb -v /var/run/docker.sock:/var/run/docker.sock --mount type=bind,source=`pwd`,target=/FrameworkBenchmarks techempower/tfb --mode verify --test-dir $RUN_TESTS --benchmark-env travis; else echo 'Skipping test verification.'; fi
@@ -182,6 +182,10 @@ def main(argv=None):
nargs='+',
default=[1, 10, 20, 50, 100],
help='List of cached query levels to benchmark')
+ parser.add_argument(
+ '--benchmark-env',
+ default='none',
+ help='Hostname/IP for database server')
# Network options
parser.add_argument(
@@ -43,6 +43,7 @@ class BenchmarkConfig:
self.cached_query_levels = args.cached_query_levels
self.pipeline_concurrency_levels = args.pipeline_concurrency_levels
self.query_levels = args.query_levels
+ self.benchmark_env = args.benchmark_env
self.parse = args.parse
self.results_environment = args.results_environment
self.results_name = args.results_name
@@ -39,7 +39,11 @@ class DockerHelper:
tag=tag,
forcerm=True,
timeout=3600,
- pull=True)
+ pull=True,
+ buildargs=({
+ 'BENCHMARK_ENV': self.benchmarker.config.benchmark_env
+ })
+ )
buffer = ""
for token in output:
if token.startswith('{"stream":'):