Browse Source

Removed support for docker_files (#3526)

yolo
Mike Smith 7 years ago
parent
commit
10c99cbb15

+ 2 - 1
.gitattributes

@@ -1 +1,2 @@
-* text eol=lf
+# Docker relies on lf - force it
+* text eol=lf

+ 0 - 6
README.md

@@ -22,12 +22,6 @@ To get started developing you'll need to install [docker](https://docs.docker.co
 
 
         $ git clone https://github.com/TechEmpower/FrameworkBenchmarks.git
         $ git clone https://github.com/TechEmpower/FrameworkBenchmarks.git
 
 
-#### A note on Windows:
-
-Git on Windows will, by default, automatically convert line endings from `lf` to `crlf`. This is problematic for the Docker images we build for tests targeting Linux operating systems. Therefore, in order to run tests on Windows, you must set `autocrlf` as either `true` or `input` **prior** to cloning.
-
-See [this writeup](https://help.github.com/articles/dealing-with-line-endings/) for more information.
-
 2. Create the TFB Docker virtual network
 2. Create the TFB Docker virtual network
 
 
         $ docker network create tfb
         $ docker network create tfb

+ 4 - 4
toolset/benchmark/benchmarker.py

@@ -30,7 +30,7 @@ class Benchmarker:
         '''
         '''
         This process involves setting up the client/server machines
         This process involves setting up the client/server machines
         with any necessary change. Then going through each test,
         with any necessary change. Then going through each test,
-        running their setup script, verifying the URLs, and
+        running their docker build and run, verifying the URLs, and
         running benchmarks against them.
         running benchmarks against them.
         '''
         '''
         # Generate metadata
         # Generate metadata
@@ -157,7 +157,7 @@ class Benchmarker:
             while not accepting_requests and slept < max_sleep:
             while not accepting_requests and slept < max_sleep:
                 accepting_requests = test.is_accepting_requests()
                 accepting_requests = test.is_accepting_requests()
                 if not docker_helper.successfully_running_containers(
                 if not docker_helper.successfully_running_containers(
-                        self.config, test.get_docker_files(), benchmark_log):
+                        self.config, test, benchmark_log):
                     docker_helper.stop(self.config, containers,
                     docker_helper.stop(self.config, containers,
                                        database_container, test)
                                        database_container, test)
                     log("ERROR: One or more expected docker container exited early",
                     log("ERROR: One or more expected docker container exited early",
@@ -218,8 +218,8 @@ class Benchmarker:
                 return False
                 return False
         except (OSError, IOError, subprocess.CalledProcessError) as e:
         except (OSError, IOError, subprocess.CalledProcessError) as e:
             tb = traceback.format_exc()
             tb = traceback.format_exc()
-            self.results.write_intermediate(
-                test.name, "error during test setup: " + str(e))
+            self.results.write_intermediate(test.name,
+                                            "error during test: " + str(e))
             log("Subprocess Error %s" % test.name,
             log("Subprocess Error %s" % test.name,
                 file=benchmark_log,
                 file=benchmark_log,
                 border='-',
                 border='-',

+ 0 - 1
toolset/benchmark/fortune_html_parser.py

@@ -8,7 +8,6 @@ from toolset.utils.output_helper import log
 
 
 
 
 class FortuneHTMLParser(HTMLParser):
 class FortuneHTMLParser(HTMLParser):
-
     def __init__(self):
     def __init__(self):
         HTMLParser.__init__(self)
         HTMLParser.__init__(self)
         self.body = []
         self.body = []

+ 1 - 23
toolset/benchmark/framework_test.py

@@ -20,7 +20,6 @@ class FrameworkTest:
         self.benchmarker_config = benchmarker_config
         self.benchmarker_config = benchmarker_config
         self.results = results
         self.results = results
         self.runTests = runTests
         self.runTests = runTests
-        self.fwroot = benchmarker_config.fwroot
         self.approach = ""
         self.approach = ""
         self.classification = ""
         self.classification = ""
         self.database = ""
         self.database = ""
@@ -35,11 +34,6 @@ class FrameworkTest:
         self.notes = ""
         self.notes = ""
         self.port = ""
         self.port = ""
         self.versus = ""
         self.versus = ""
-        self.docker_files = None
-
-        # Used in setup.sh scripts for consistency with
-        # the bash environment variables
-        self.troot = self.directory
 
 
         self.__dict__.update(args)
         self.__dict__.update(args)
 
 
@@ -51,7 +45,6 @@ class FrameworkTest:
         '''
         '''
         Start the test implementation
         Start the test implementation
         '''
         '''
-        test_docker_files = self.get_docker_files()
         test_log_dir = os.path.join(self.results.directory, self.name.lower())
         test_log_dir = os.path.join(self.results.directory, self.name.lower())
         build_log_dir = os.path.join(test_log_dir, 'build')
         build_log_dir = os.path.join(test_log_dir, 'build')
         run_log_dir = os.path.join(test_log_dir, 'run')
         run_log_dir = os.path.join(test_log_dir, 'run')
@@ -70,8 +63,7 @@ class FrameworkTest:
         if result != 0:
         if result != 0:
             return None
             return None
 
 
-        return docker_helper.run(self.benchmarker_config, test_docker_files,
-                                 run_log_dir)
+        return docker_helper.run(self.benchmarker_config, self, run_log_dir)
 
 
     def is_accepting_requests(self):
     def is_accepting_requests(self):
         '''
         '''
@@ -90,20 +82,6 @@ class FrameworkTest:
         return docker_helper.test_client_connection(self.benchmarker_config,
         return docker_helper.test_client_connection(self.benchmarker_config,
                                                     url)
                                                     url)
 
 
-    def get_docker_files(self):
-        '''
-        Returns all the docker_files for this test.
-        '''
-        test_docker_files = ["%s.dockerfile" % self.name]
-        if self.docker_files is not None:
-            if type(self.docker_files) is list:
-                test_docker_files.extend(self.docker_files)
-            else:
-                raise Exception(
-                    "docker_files in benchmark_config.json must be an array")
-
-        return test_docker_files
-
     def verify_urls(self):
     def verify_urls(self):
         '''
         '''
         Verifys each of the URLs for this test. This will simply curl the URL and 
         Verifys each of the URLs for this test. This will simply curl the URL and 

+ 1 - 3
toolset/scaffolding/README.md

@@ -16,9 +16,7 @@ A metric we capture, in addition to the actual benchmark numbers, is the signifi
 
 
 3. Edit `benchmark_config.json`
 3. Edit `benchmark_config.json`
 
 
-The initialization process made some assumptions about your test implementation that may or may not be true. For example, it laid out two separate tests: the non-database tests; and the database tests. You, on the other hand, may only want to implement the `JSON` test, so you will need alter `benchmark_config.json`.
-
-Additionally, `benchmark_config.json` has, for each test, a key called "setup_file". This value refers to the next bullet.
+You will need alter `benchmark_config.json` to have the appropriate end-points and port specified.
 
 
 4. Create `$NAME.dockerfile`
 4. Create `$NAME.dockerfile`
 
 

+ 186 - 203
toolset/utils/docker_helper.py

@@ -66,8 +66,7 @@ def publish(benchmarker_config):
     benchmarker_config.build = True
     benchmarker_config.build = True
     tests = gather_tests(benchmarker_config=benchmarker_config)
     tests = gather_tests(benchmarker_config=benchmarker_config)
     for test in tests:
     for test in tests:
-        __build_dependencies(benchmarker_config, test,
-                             ["%s.dockerfile" % test.name], docker_buildargs)
+        __build_dependencies(benchmarker_config, test, docker_buildargs)
 
 
     client = docker.DockerClient(
     client = docker.DockerClient(
         base_url=benchmarker_config.server_docker_host)
         base_url=benchmarker_config.server_docker_host)
@@ -99,57 +98,48 @@ def build(benchmarker_config, test_names, build_log_dir=os.devnull):
     for test in tests:
     for test in tests:
         log_prefix = "%s: " % test.name
         log_prefix = "%s: " % test.name
 
 
-        test_docker_files = ["%s.dockerfile" % test.name]
-        if test.docker_files is not None:
-            if type(test.docker_files) is list:
-                test_docker_files.extend(test.docker_files)
-            else:
-                raise Exception(
-                    "docker_files in benchmark_config.json must be an array")
-
-        if __build_dependencies(benchmarker_config, test, test_docker_files,
-                                docker_buildargs, build_log_dir) > 0:
+        if __build_dependencies(benchmarker_config, test, docker_buildargs,
+                                build_log_dir) > 0:
             return 1
             return 1
 
 
-        # Build the test images
-        for test_docker_file in test_docker_files:
-            build_log_file = build_log_dir
-            if build_log_dir is not os.devnull:
-                build_log_file = os.path.join(
-                    build_log_dir, "%s.log" % test_docker_file.replace(
-                        ".dockerfile", "").lower())
-            with open(build_log_file, 'w') as build_log:
-                try:
-                    for line in docker.APIClient(
-                            base_url=benchmarker_config.server_docker_host
-                    ).build(
+        # Build the test image
+        test_docker_file = "%s.dockerfile" % test.name
+        build_log_file = build_log_dir
+        if build_log_dir is not os.devnull:
+            build_log_file = os.path.join(
+                build_log_dir,
+                "%s.log" % test_docker_file.replace(".dockerfile", "").lower())
+        with open(build_log_file, 'w') as build_log:
+            try:
+                for line in docker.APIClient(
+                        base_url=benchmarker_config.server_docker_host).build(
                             path=test.directory,
                             path=test.directory,
                             dockerfile=test_docker_file,
                             dockerfile=test_docker_file,
                             tag="techempower/tfb.test.%s" %
                             tag="techempower/tfb.test.%s" %
                             test_docker_file.replace(".dockerfile", ""),
                             test_docker_file.replace(".dockerfile", ""),
                             buildargs=docker_buildargs,
                             buildargs=docker_buildargs,
                             forcerm=True):
                             forcerm=True):
-                        if line.startswith('{"stream":'):
-                            line = json.loads(line)
-                            line = line[line.keys()[0]].encode('utf-8')
-                            log(line,
-                                prefix=log_prefix,
-                                file=build_log,
-                                color=Fore.WHITE + Style.BRIGHT \
-                                    if re.match(r'^Step \d+\/\d+', line) else '')
-                except Exception:
-                    tb = traceback.format_exc()
-                    log("Docker build failed; terminating",
-                        prefix=log_prefix,
-                        file=build_log,
-                        color=Fore.RED)
-                    log(tb, prefix=log_prefix, file=build_log)
-                    return 1
+                    if line.startswith('{"stream":'):
+                        line = json.loads(line)
+                        line = line[line.keys()[0]].encode('utf-8')
+                        log(line,
+                            prefix=log_prefix,
+                            file=build_log,
+                            color=Fore.WHITE + Style.BRIGHT \
+                                if re.match(r'^Step \d+\/\d+', line) else '')
+            except Exception:
+                tb = traceback.format_exc()
+                log("Docker build failed; terminating",
+                    prefix=log_prefix,
+                    file=build_log,
+                    color=Fore.RED)
+                log(tb, prefix=log_prefix, file=build_log)
+                return 1
 
 
     return 0
     return 0
 
 
 
 
-def run(benchmarker_config, docker_files, run_log_dir):
+def run(benchmarker_config, test, run_log_dir):
     '''
     '''
     Run the given Docker container(s)
     Run the given Docker container(s)
     '''
     '''
@@ -157,104 +147,96 @@ def run(benchmarker_config, docker_files, run_log_dir):
         base_url=benchmarker_config.server_docker_host)
         base_url=benchmarker_config.server_docker_host)
     containers = []
     containers = []
 
 
-    for docker_file in docker_files:
-        log_prefix = "%s: " % docker_file.replace(".dockerfile", "")
-        try:
+    log_prefix = "%s: " % test.name
+    try:
 
 
-            def watch_container(container, docker_file):
-                with open(
-                        os.path.join(
-                            run_log_dir, "%s.log" % docker_file.replace(
-                                ".dockerfile", "").lower()), 'w') as run_log:
-                    for line in container.logs(stream=True):
-                        log(line, prefix=log_prefix, file=run_log)
-
-            extra_hosts = None
-            name = "tfb-server"
-
-            if benchmarker_config.network is None:
-                extra_hosts = {
-                    socket.gethostname(): str(benchmarker_config.server_host),
-                    'tfb-server': str(benchmarker_config.server_host),
-                    'tfb-database': str(benchmarker_config.database_host)
-                }
-                name = None
-
-            sysctl = {'net.core.somaxconn': 65535}
-
-            ulimit = [{
-                'name': 'nofile',
-                'hard': 200000,
-                'soft': 200000
-            }, {
-                'name': 'rtprio',
-                'hard': 99,
-                'soft': 99
-            }]
-
-            container = client.containers.run(
-                "techempower/tfb.test.%s" % docker_file.replace(
-                    ".dockerfile", ""),
-                name=name,
-                network=benchmarker_config.network,
-                network_mode=benchmarker_config.network_mode,
-                stderr=True,
-                detach=True,
-                init=True,
-                extra_hosts=extra_hosts,
-                privileged=True,
-                ulimits=ulimit,
-                sysctls=sysctl)
-
-            containers.append(container)
-
-            watch_thread = Thread(
-                target=watch_container, args=(
-                    container,
-                    docker_file,
-                ))
-            watch_thread.daemon = True
-            watch_thread.start()
-
-        except Exception:
+        def watch_container(container, docker_file):
             with open(
             with open(
                     os.path.join(run_log_dir, "%s.log" % docker_file.replace(
                     os.path.join(run_log_dir, "%s.log" % docker_file.replace(
                         ".dockerfile", "").lower()), 'w') as run_log:
                         ".dockerfile", "").lower()), 'w') as run_log:
-                tb = traceback.format_exc()
-                log("Running docker cointainer: %s failed" % docker_file,
-                    prefix=log_prefix,
-                    file=run_log)
-                log(tb, prefix=log_prefix, file=run_log)
+                for line in container.logs(stream=True):
+                    log(line, prefix=log_prefix, file=run_log)
+
+        extra_hosts = None
+        name = "tfb-server"
+
+        if benchmarker_config.network is None:
+            extra_hosts = {
+                socket.gethostname(): str(benchmarker_config.server_host),
+                'tfb-server': str(benchmarker_config.server_host),
+                'tfb-database': str(benchmarker_config.database_host)
+            }
+            name = None
+
+        sysctl = {'net.core.somaxconn': 65535}
+
+        ulimit = [{
+            'name': 'nofile',
+            'hard': 200000,
+            'soft': 200000
+        }, {
+            'name': 'rtprio',
+            'hard': 99,
+            'soft': 99
+        }]
+
+        container = client.containers.run(
+            "techempower/tfb.test.%s" % test.name,
+            name=name,
+            network=benchmarker_config.network,
+            network_mode=benchmarker_config.network_mode,
+            stderr=True,
+            detach=True,
+            init=True,
+            extra_hosts=extra_hosts,
+            privileged=True,
+            ulimits=ulimit,
+            sysctls=sysctl)
+
+        containers.append(container)
+
+        watch_thread = Thread(
+            target=watch_container,
+            args=(
+                container,
+                "%s.dockerfile" % test.name,
+            ))
+        watch_thread.daemon = True
+        watch_thread.start()
+
+    except Exception:
+        with open(
+                os.path.join(run_log_dir, "%s.log" % test.name.lower()),
+                'w') as run_log:
+            tb = traceback.format_exc()
+            log("Running docker cointainer: %s.dockerfile failed" % test.name,
+                prefix=log_prefix,
+                file=run_log)
+            log(tb, prefix=log_prefix, file=run_log)
 
 
     return containers
     return containers
 
 
 
 
-def successfully_running_containers(benchmarker_config, docker_files, out):
+def successfully_running_containers(benchmarker_config, test, out):
     '''
     '''
     Returns whether all the expected containers for the given docker_files are
     Returns whether all the expected containers for the given docker_files are
     running.
     running.
     '''
     '''
     client = docker.DockerClient(
     client = docker.DockerClient(
         base_url=benchmarker_config.server_docker_host)
         base_url=benchmarker_config.server_docker_host)
-    expected_running_container_images = []
-    for docker_file in docker_files:
-        # 'gemini.dockerfile' -> 'gemini'
-        image_tag = '.'.join(docker_file.split('.')[:-1])
-        expected_running_container_images.append(image_tag)
     running_container_images = []
     running_container_images = []
     for container in client.containers.list():
     for container in client.containers.list():
         # 'techempower/tfb.test.gemini:0.1' -> 'gemini'
         # 'techempower/tfb.test.gemini:0.1' -> 'gemini'
         image_tag = container.image.tags[0].split(':')[0][21:]
         image_tag = container.image.tags[0].split(':')[0][21:]
         running_container_images.append(image_tag)
         running_container_images.append(image_tag)
 
 
-    for image_name in expected_running_container_images:
-        if image_name not in running_container_images:
-            log_prefix = "%s: " % image_name
-            log("ERROR: Expected techempower/tfb.test.%s to be running container"
-                % image_name,
-                prefix=log_prefix,
-                file=out)
-            return False
+    if test.name not in running_container_images:
+        log_prefix = "%s: " % test.name
+        log("ERROR: Expected techempower/tfb.test.%s to be running container" %
+            test.name,
+            prefix=log_prefix,
+            file=out)
+        return False
     return True
     return True
 
 
 
 
@@ -453,14 +435,14 @@ def benchmark(benchmarker_config, script, variables, raw_file):
             sysctls=sysctl), raw_file)
             sysctls=sysctl), raw_file)
 
 
 
 
-def __gather_dependencies(docker_file):
+def __gather_dependencies(benchmarker_config, docker_file):
     '''
     '''
     Gathers all the known docker dependencies for the given docker image.
     Gathers all the known docker dependencies for the given docker image.
     '''
     '''
     deps = []
     deps = []
 
 
-    docker_dir = os.path.join(
-        os.getenv('FWROOT'), "toolset", "setup", "docker")
+    docker_dir = os.path.join(benchmarker_config.fwroot, "toolset", "setup",
+                              "docker")
 
 
     if os.path.exists(docker_file):
     if os.path.exists(docker_file):
         with open(docker_file) as fp:
         with open(docker_file) as fp:
@@ -482,102 +464,103 @@ def __gather_dependencies(docker_file):
                         if not os.path.exists(dep_docker_file):
                         if not os.path.exists(dep_docker_file):
                             dep_docker_file = find(docker_dir,
                             dep_docker_file = find(docker_dir,
                                                    depToken + ".dockerfile")
                                                    depToken + ".dockerfile")
-                        deps.extend(__gather_dependencies(dep_docker_file))
+                        deps.extend(
+                            __gather_dependencies(benchmarker_config,
+                                                  dep_docker_file))
 
 
     return deps
     return deps
 
 
 
 
 def __build_dependencies(benchmarker_config,
 def __build_dependencies(benchmarker_config,
                          test,
                          test,
-                         test_docker_files,
                          docker_buildargs,
                          docker_buildargs,
                          build_log_dir=os.devnull):
                          build_log_dir=os.devnull):
     '''
     '''
     Builds all the dependency docker images for the given test.
     Builds all the dependency docker images for the given test.
     Does not build the test docker image.
     Does not build the test docker image.
     '''
     '''
-    for test_docker_file in test_docker_files:
-        dependencies = OrderedSet(
-            list(
-                reversed(
-                    __gather_dependencies(
-                        os.path.join(test.directory, test_docker_file)))))
-
-        docker_dir = os.path.join(
-            os.getenv('FWROOT'), "toolset", "setup", "docker")
-        for dep in dependencies:
-            log_prefix = dep + ": "
-            pulled = False
-
-            # Do not pull techempower/ images if we are building specifically
-            if not benchmarker_config.build and 'techempower/' not in dep:
-                client = docker.DockerClient(
-                    base_url=benchmarker_config.server_docker_host)
+    dependencies = OrderedSet(
+        list(
+            reversed(
+                __gather_dependencies(
+                    benchmarker_config,
+                    os.path.join(test.directory,
+                                 "%s.dockerfile" % test.name)))))
+
+    docker_dir = os.path.join(benchmarker_config.fwroot, "toolset", "setup",
+                              "docker")
+    for dep in dependencies:
+        log_prefix = dep + ": "
+        pulled = False
+
+        # Do not pull techempower/ images if we are building specifically
+        if not benchmarker_config.build and 'techempower/' not in dep:
+            client = docker.DockerClient(
+                base_url=benchmarker_config.server_docker_host)
+            try:
+                # If we have it, use it
+                client.images.get(dep)
+                pulled = True
+                log("Found published image; skipping build", prefix=log_prefix)
+            except:
+                # Pull the dependency image
                 try:
                 try:
-                    # If we have it, use it
-                    client.images.get(dep)
+                    log("Attempting docker pull for image (this can take some time)",
+                        prefix=log_prefix)
+                    client.images.pull(dep)
                     pulled = True
                     pulled = True
                     log("Found published image; skipping build",
                     log("Found published image; skipping build",
                         prefix=log_prefix)
                         prefix=log_prefix)
                 except:
                 except:
-                    # Pull the dependency image
-                    try:
-                        log("Attempting docker pull for image (this can take some time)",
-                            prefix=log_prefix)
-                        client.images.pull(dep)
-                        pulled = True
-                        log("Found published image; skipping build",
-                            prefix=log_prefix)
-                    except:
-                        log("Docker pull failed; %s could not be found; terminating" % dep,
-                            prefix=log_prefix,
-                            color=Fore.RED)
-                        return 1
-
-            if not pulled:
-                dep_ref = dep.strip().split(':')[0].strip()
-                dependency = dep_ref.split('/')[1]
-                build_log_file = build_log_dir
-                if build_log_dir is not os.devnull:
-                    build_log_file = os.path.join(
-                        build_log_dir, "%s.log" % dependency.lower())
-                with open(build_log_file, 'w') as build_log:
-                    docker_file = os.path.join(test.directory,
-                                               dependency + ".dockerfile")
-                    if not docker_file or not os.path.exists(docker_file):
-                        docker_file = find(docker_dir,
+                    log("Docker pull failed; %s could not be found; terminating"
+                        % dep,
+                        prefix=log_prefix,
+                        color=Fore.RED)
+                    return 1
+
+        if not pulled:
+            dep_ref = dep.strip().split(':')[0].strip()
+            dependency = dep_ref.split('/')[1]
+            build_log_file = build_log_dir
+            if build_log_dir is not os.devnull:
+                build_log_file = os.path.join(build_log_dir,
+                                              "%s.log" % dependency.lower())
+            with open(build_log_file, 'w') as build_log:
+                docker_file = os.path.join(test.directory,
                                            dependency + ".dockerfile")
                                            dependency + ".dockerfile")
-                    if not docker_file:
-                        log("Docker build failed; %s could not be found; terminating"
-                            % (dependency + ".dockerfile"),
-                            prefix=log_prefix,
-                            file=build_log,
-                            color=Fore.RED)
-                        return 1
-
-                    # Build the dependency image
-                    try:
-                        for line in docker.APIClient(
-                                base_url=benchmarker_config.server_docker_host
-                        ).build(
-                                path=os.path.dirname(docker_file),
-                                dockerfile="%s.dockerfile" % dependency,
-                                tag=dep,
-                                buildargs=docker_buildargs,
-                                forcerm=True):
-                            if line.startswith('{"stream":'):
-                                line = json.loads(line)
-                                line = line[line.keys()[0]].encode('utf-8')
-                                log(line,
-                                    prefix=log_prefix,
-                                    file=build_log,
-                                    color=Fore.WHITE + Style.BRIGHT \
-                                        if re.match(r'^Step \d+\/\d+', line) else '')
-                    except Exception:
-                        tb = traceback.format_exc()
-                        log("Docker dependency build failed; terminating",
-                            prefix=log_prefix,
-                            file=build_log,
-                            color=Fore.RED)
-                        log(tb, prefix=log_prefix, file=build_log)
-                        return 1
+                if not docker_file or not os.path.exists(docker_file):
+                    docker_file = find(docker_dir, dependency + ".dockerfile")
+                if not docker_file:
+                    log("Docker build failed; %s could not be found; terminating"
+                        % (dependency + ".dockerfile"),
+                        prefix=log_prefix,
+                        file=build_log,
+                        color=Fore.RED)
+                    return 1
+
+                # Build the dependency image
+                try:
+                    for line in docker.APIClient(
+                            base_url=benchmarker_config.server_docker_host
+                    ).build(
+                            path=os.path.dirname(docker_file),
+                            dockerfile="%s.dockerfile" % dependency,
+                            tag=dep,
+                            buildargs=docker_buildargs,
+                            forcerm=True):
+                        if line.startswith('{"stream":'):
+                            line = json.loads(line)
+                            line = line[line.keys()[0]].encode('utf-8')
+                            log(line,
+                                prefix=log_prefix,
+                                file=build_log,
+                                color=Fore.WHITE + Style.BRIGHT \
+                                    if re.match(r'^Step \d+\/\d+', line) else '')
+                except Exception:
+                    tb = traceback.format_exc()
+                    log("Docker dependency build failed; terminating",
+                        prefix=log_prefix,
+                        file=build_log,
+                        color=Fore.RED)
+                    log(tb, prefix=log_prefix, file=build_log)
+                    return 1

+ 2 - 4
toolset/utils/metadata_helper.py

@@ -1,21 +1,19 @@
-import ConfigParser
 import os
 import os
 import glob
 import glob
 import json
 import json
 
 
-from ast import literal_eval
 from collections import OrderedDict
 from collections import OrderedDict
 
 
 from toolset.utils.output_helper import log
 from toolset.utils.output_helper import log
 
 
 
 
-def gather_langauges():
+def gather_langauges(benchmarker_config):
     '''
     '''
     Gathers all the known languages in the suite via the folder names
     Gathers all the known languages in the suite via the folder names
     beneath FWROOT.
     beneath FWROOT.
     '''
     '''
 
 
-    lang_dir = os.path.join(os.getenv('FWROOT'), "frameworks")
+    lang_dir = os.path.join(benchmarker_config.fwroot, "frameworks")
     langs = []
     langs = []
     for dir in glob.glob(os.path.join(lang_dir, "*")):
     for dir in glob.glob(os.path.join(lang_dir, "*")):
         langs.append(dir.replace(lang_dir, "")[1:])
         langs.append(dir.replace(lang_dir, "")[1:])

+ 12 - 6
toolset/utils/results_helper.py

@@ -1,5 +1,5 @@
 from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
 from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
-from toolset.utils.output_helper import log, FNULL
+from toolset.utils.output_helper import log
 
 
 import os
 import os
 import subprocess
 import subprocess
@@ -289,8 +289,10 @@ class Results:
             # Normally you don't have to use Fore.BLUE before each line, but
             # Normally you don't have to use Fore.BLUE before each line, but
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # or stream flush, so we have to ensure that the color code is printed repeatedly
             # or stream flush, so we have to ensure that the color code is printed repeatedly
-            log(
-                "Verification Summary", border='=', border_bottom='-', color=Fore.CYAN)
+            log("Verification Summary",
+                border='=',
+                border_bottom='-',
+                color=Fore.CYAN)
             for test in tests:
             for test in tests:
                 log(Fore.CYAN + "| {!s}".format(test.name))
                 log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
                 if test.name in self.verify.keys():
@@ -444,21 +446,25 @@ class Results:
         '''
         '''
         Get the git commit id for this benchmark
         Get the git commit id for this benchmark
         '''
         '''
-        return subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=self.config.fwroot).strip()
+        return subprocess.check_output(
+            ["git", "rev-parse", "HEAD"], cwd=self.config.fwroot).strip()
 
 
     def __get_git_repository_url(self):
     def __get_git_repository_url(self):
         '''
         '''
         Gets the git repository url for this benchmark
         Gets the git repository url for this benchmark
         '''
         '''
         return subprocess.check_output(
         return subprocess.check_output(
-            ["git", "config", "--get", "remote.origin.url"], cwd=self.config.fwroot).strip()
+            ["git", "config", "--get", "remote.origin.url"],
+            cwd=self.config.fwroot).strip()
 
 
     def __get_git_branch_name(self):
     def __get_git_branch_name(self):
         '''
         '''
         Gets the git branch name for this benchmark
         Gets the git branch name for this benchmark
         '''
         '''
         return subprocess.check_output(
         return subprocess.check_output(
-            'git rev-parse --abbrev-ref HEAD', shell=True, cwd=self.config.fwroot).strip()
+            'git rev-parse --abbrev-ref HEAD',
+            shell=True,
+            cwd=self.config.fwroot).strip()
 
 
     def __parse_stats(self, framework_test, test_type, start_time, end_time,
     def __parse_stats(self, framework_test, test_type, start_time, end_time,
                       interval):
                       interval):

+ 1 - 1
toolset/utils/scaffolding.py

@@ -70,7 +70,7 @@ class Scaffolding:
     def __prompt_language(self):
     def __prompt_language(self):
         self.language = raw_input("Language: ").strip()
         self.language = raw_input("Language: ").strip()
 
 
-        known_languages = gather_langauges()
+        known_languages = gather_langauges(benchmarker_config)
         language = None
         language = None
         for lang in known_languages:
         for lang in known_languages:
             if lang.lower() == self.language.lower():
             if lang.lower() == self.language.lower():