Browse Source

Add a --benchmark-env option to the toolset (#4337)

* Add a --benchmark-env option to the toolset

* add --benchmark-env travis for travis runs
Nate 6 years ago
parent
commit
49d23471d7
4 changed files with 11 additions and 2 deletions
  1. 1 1
      .travis.yml
  2. 4 0
      toolset/run-tests.py
  3. 1 0
      toolset/utils/benchmark_config.py
  4. 5 1
      toolset/utils/docker_helper.py

+ 1 - 1
.travis.yml

@@ -127,4 +127,4 @@ script:
   # run-ci.py runs the diffing to see if travis needs to test this framework. Ideally/eventually,
   # run-ci.py runs the diffing to see if travis needs to test this framework. Ideally/eventually,
   # we'd like to try and do the diffing before travis_clean & setup.
   # we'd like to try and do the diffing before travis_clean & setup.
   # This will run the tests exactly as you would in your own vm:
   # This will run the tests exactly as you would in your own vm:
-  - if [ "$RUN_TESTS" ]; then docker network create tfb > /dev/null 2>&1 && docker run --network=tfb -v /var/run/docker.sock:/var/run/docker.sock --mount type=bind,source=`pwd`,target=/FrameworkBenchmarks techempower/tfb --mode verify --test-dir $RUN_TESTS; else echo 'Skipping test verification.'; fi
+  - if [ "$RUN_TESTS" ]; then docker network create tfb > /dev/null 2>&1 && docker run --network=tfb -v /var/run/docker.sock:/var/run/docker.sock --mount type=bind,source=`pwd`,target=/FrameworkBenchmarks techempower/tfb --mode verify --test-dir $RUN_TESTS --benchmark-env travis; else echo 'Skipping test verification.'; fi

+ 4 - 0
toolset/run-tests.py

@@ -182,6 +182,10 @@ def main(argv=None):
         nargs='+',
         nargs='+',
         default=[1, 10, 20, 50, 100],
         default=[1, 10, 20, 50, 100],
         help='List of cached query levels to benchmark')
         help='List of cached query levels to benchmark')
+    parser.add_argument(
+        '--benchmark-env',
+        default='none',
+        help='Hostname/IP for database server')
 
 
     # Network options
     # Network options
     parser.add_argument(
     parser.add_argument(

+ 1 - 0
toolset/utils/benchmark_config.py

@@ -43,6 +43,7 @@ class BenchmarkConfig:
         self.cached_query_levels = args.cached_query_levels
         self.cached_query_levels = args.cached_query_levels
         self.pipeline_concurrency_levels = args.pipeline_concurrency_levels
         self.pipeline_concurrency_levels = args.pipeline_concurrency_levels
         self.query_levels = args.query_levels
         self.query_levels = args.query_levels
+        self.benchmark_env = args.benchmark_env
         self.parse = args.parse
         self.parse = args.parse
         self.results_environment = args.results_environment
         self.results_environment = args.results_environment
         self.results_name = args.results_name
         self.results_name = args.results_name

+ 5 - 1
toolset/utils/docker_helper.py

@@ -39,7 +39,11 @@ class DockerHelper:
                     tag=tag,
                     tag=tag,
                     forcerm=True,
                     forcerm=True,
                     timeout=3600,
                     timeout=3600,
-                    pull=True)
+                    pull=True,
+                    buildargs=({
+                      'BENCHMARK_ENV': self.benchmarker.config.benchmark_env
+                    })
+                )
                 buffer = ""
                 buffer = ""
                 for token in output:
                 for token in output:
                     if token.startswith('{"stream":'):
                     if token.startswith('{"stream":'):