Browse Source

[ci skip] Fixed --new flag (#3501)

Mike Smith 7 years ago
parent
commit
1aaee52f94

+ 1 - 1
toolset/run-tests.py

@@ -214,7 +214,7 @@ def main(argv=None):
     results = Results(config)
 
     if config.new:
-        Scaffolding()
+        Scaffolding(config)
 
     elif config.publish:
         docker_helper.publish(config)

+ 7 - 11
toolset/setup/scaffolding/README.md → toolset/scaffolding/README.md

@@ -14,23 +14,19 @@ You will need to ensure that your source code is beneath this directory. The mos
 
 A metric we capture, in addition to the actual benchmark numbers, is the significant lines of code required to run your application. To help our suite identify your source code, we require you to list your source files in `source_files`.
 
-3. Edit `.gitignore`
-
-It is very important that any files created by building or running your application are included in your `.gitignore`. The repository **must** be only source files and the files the suite requires for starting your test application.
-
-4. Edit `benchmark_config.json`
+3. Edit `benchmark_config.json`
 
 The initialization process made some assumptions about your test implementation that may or may not be true. For example, it laid out two separate tests: the non-database tests; and the database tests. You, on the other hand, may only want to implement the `JSON` test, so you will need alter `benchmark_config.json`.
 
 Additionally, `benchmark_config.json` has, for each test, a key called "setup_file". This value refers to the next bullet.
 
-5. Edit `setup.sh`
+4. Create `$NAME.dockerfile`
 
-This is the script that is executed when a benchmark test is run. Specifically, this file tells the suite how to build and start your test application.
+This is the dockerfile that is built into a docker image and run when a benchmark test is run. Specifically, this file tells the suite how to build and start your test application.
 
-In this file, there are detailed instructions on what is expected and what is available to help you start your test application.
+You can create multiple implementations and they will all conform to `[name in benchmark_config.json].dockerfile`. For example, the `default` implementation in `benchmark_config.json` will be `$NAME.dockerfile`, but if you wanted to make another implementation that did only the database tests for MySQL, you could make `$NAME-mysql.dockerfile` and have an entry in your `benchmark_config.json` for `$NAME-mysql`.
 
-6. Test your application
+5. Test your application
 
         $ tfb --mode verify --test $NAME
 
@@ -38,11 +34,11 @@ This will run the suite in `verify` mode for your test. This means that no bench
 
 Once you are able to successfully run your test through our suite in this way **and** your test passes our validation, you may move on to the next step.
 
-7. Add your test to `.travis.yml`
+6. Add your test to `.travis.yml`
 
 Edit `.travis.yml` to ensure that Travis-CI will automatically run our verification tests against your new test. This file is kept in alphabetical order, so find where `TESTDIR=$LANGUAGE/$NAME` should be inserted under `env > matrix` and put it there.
 
-8. Fix this `README.md` and open a pull request
+7. Fix this `README.md` and open a pull request
 
 Starting on line 59 is your actual `README.md` that will sit with your test implementation. Update all the dummy values to their correct values so that when people visit your test in our Github repository, they will be greated with information on how your test implementation works and where to look for useful source code.
 

+ 26 - 0
toolset/scaffolding/benchmark_config.json

@@ -0,0 +1,26 @@
+{
+  "framework": "$NAME",
+  "tests": [
+    {
+      "default": {
+        "json_url": "/json",
+        "plaintext_url": "/plaintext",
+        "port": 8080,
+        "approach": "$APPROACH",
+        "classification": "$CLASSIFICATION",
+        "database": "None",
+        "framework": "$FRAMEWORK",
+        "language": "$LANGUAGE",
+        "flavor": "None",
+        "orm": "$ORM",
+        "platform": "$PLATFORM",
+        "webserver": "$WEBSERVER",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "$DISPLAY_NAME",
+        "notes": "",
+        "versus": "$VERSUS"
+      }
+    }
+  ]
+}

+ 0 - 0
toolset/setup/scaffolding/source_code → toolset/scaffolding/source_code


+ 0 - 3
toolset/setup/scaffolding/.gitignore

@@ -1,3 +0,0 @@
-# Add any files that are created at build/run-time
-#
-# Example: *.class, *.pyc, bin/

+ 0 - 50
toolset/setup/scaffolding/benchmark_config.json

@@ -1,50 +0,0 @@
-{
-  "framework": "$NAME",
-  "tests": [
-    {
-      "default": {
-        "setup_file": "setup",
-        "json_url": "/json",
-        "plaintext_url": "/plaintext",
-        "port": 8080,
-        "approach": "$APPROACH",
-        "classification": "$CLASSIFICATION",
-        "database": "None",
-        "framework": "$FRAMEWORK",
-        "language": "$LANGUAGE",
-        "flavor": "None",
-        "orm": "$ORM",
-        "platform": "$PLATFORM",
-        "webserver": "$WEBSERVER",
-        "os": "Linux",
-        "database_os": "Linux",
-        "display_name": "$DISPLAY_NAME",
-        "notes": "",
-        "versus": "$VERSUS"
-      },
-      "mysql": {
-        "setup_file": "setup_mysql",
-        "db_url": "/db",
-        "query_url": "/query?queries=",
-        "cached_query_url": "/cached_query?queries=",
-        "fortune_url": "/fortunes",
-        "update_url": "/update?queries=",
-        "port": 8080,
-        "approach": "$APPROACH",
-        "classification": "$CLASSIFICATION",
-        "database": "MySQL",
-        "framework": "$FRAMEWORK",
-        "language": "$LANGUAGE",
-        "flavor": "None",
-        "orm": "$ORM",
-        "platform": "$PLATFORM",
-        "webserver": "$WEBSERVER",
-        "os": "Linux",
-        "database_os": "Linux",
-        "display_name": "$DISPLAY_NAME",
-        "notes": "",
-        "versus": "$VERSUS"
-      }
-    }
-  ]
-}

+ 0 - 56
toolset/setup/scaffolding/setup.sh

@@ -1,56 +0,0 @@
-#!/bin/bash
-
-# This file describes how to gather the prerequisites of your test implementation, set
-# them up, and finally execute your test application(s) and exit in a state where the
-# test is read to respond to HTTP requests to the port/urls described in your
-# benchmark_config.json
-
-
-# fw_depends will search toolset/setup/linux/** for named shell files and execute them.
-# These files will set up the sandboxed runtime to have the softwares required when your
-# test is ready to go. For example:
-#
-#   fw_depends Java
-# 
-# If you are adding a new piece of software, ensure that you first create the setup 
-# script in the appropriate place and that it follows the same paradigms illustrated
-# in the existing scripts.
-
-
-# Three HOST entries are provided to this script:
-#
-#   tfb-database = The IP address of the database 
-#   TFB-client   = The IP address of the client making the HTTP requests
-#   TFB-server   = The IP address of this machine servicing HTTP requests
-#
-# This is the preferred way of resolving these IP addresses. However, some applications
-# do not support internal name resolution and will bypass the system's HOST file and
-# attempt to resolve externally (which will fail). In those cases, use the $DBHOST
-# environment variable described below.
-
-
-# Very often, you will need variables to be set up in order to run your application
-# implementation. The suite provides several variables to this shell script to be used
-# for just this reason.
-#
-#   $DBHOST = the IP address of the database machine
-#   $TROOT  = the test's root directory (the directory in which this file resides)
-#   $FWROOT = the framework benchmark root (the root of this repository)
-#   $IROOT  = the sandbox installation root directory (your installed software as well
-#             as anything installed via fw_depends is inside this dir)
-#   $MAX_CONCURRENCY
-#           = the concurrently levels set from the suite configuration file
-#
-# Below is an example of how to replace a connect string in an application config file
-# so that the application will start up with the correct IP:
-#
-#   sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' app.conf
-
-
-# Lastly, you will need to start your test implementation application in a daemon or
-# detached mode. For example:
-#
-#   go run hello.go &
-
-
-# Note: all comments except for the first line of this file can be deleted.

+ 0 - 56
toolset/setup/scaffolding/setup_mysql.sh

@@ -1,56 +0,0 @@
-#!/bin/bash
-
-# This file describes how to gather the prerequisites of your test implementation, set
-# them up, and finally execute your test application(s) and exit in a state where the
-# test is read to respond to HTTP requests to the port/urls described in your
-# benchmark_config.json
-
-
-# fw_depends will search toolset/setup/linux/** for named shell files and execute them.
-# These files will set up the sandboxed runtime to have the softwares required when your
-# test is ready to go. For example:
-#
-#   fw_depends Java
-# 
-# If you are adding a new piece of software, ensure that you first create the setup 
-# script in the appropriate place and that it follows the same paradigms illustrated
-# in the existing scripts.
-
-
-# Three HOST entries are provided to this script:
-#
-#   tfb-database = The IP address of the database 
-#   TFB-client   = The IP address of the client making the HTTP requests
-#   TFB-server   = The IP address of this machine servicing HTTP requests
-#
-# This is the preferred way of resolving these IP addresses. However, some applications
-# do not support internal name resolution and will bypass the system's HOST file and
-# attempt to resolve externally (which will fail). In those cases, use the $DBHOST
-# environment variable described below.
-
-
-# Very often, you will need variables to be set up in order to run your application
-# implementation. The suite provides several variables to this shell script to be used
-# for just this reason.
-#
-#   $DBHOST = the IP address of the database machine
-#   $TROOT  = the test's root directory (the directory in which this file resides)
-#   $FWROOT = the framework benchmark root (the root of this repository)
-#   $IROOT  = the sandbox installation root directory (your installed software as well
-#             as anything installed via fw_depends is inside this dir)
-#   $MAX_CONCURRENCY
-#           = the concurrently levels set from the suite configuration file
-#
-# Below is an example of how to replace a connect string in an application config file
-# so that the application will start up with the correct IP:
-#
-#   sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' app.conf
-
-
-# Lastly, you will need to start your test implementation application in a daemon or
-# detached mode. For example:
-#
-#   go run hello.go &
-
-
-# Note: all comments except for the first line of this file can be deleted.

+ 19 - 18
toolset/utils/scaffolding.py

@@ -5,7 +5,7 @@ from toolset.utils.metadata_helper import gather_frameworks, gather_langauges
 
 
 class Scaffolding:
-    def __init__(self):
+    def __init__(self, benchmarker_config):
         print("""
 -------------------------------------------------------------------------------
     This wizard is intended to help build the scaffolding required for a new 
@@ -16,6 +16,8 @@ class Scaffolding:
 -------------------------------------------------------------------------------"""
               )
 
+        self.benchmarker_config = benchmarker_config
+
         try:
             self.__gather_display_name()
             self.__gather_language()
@@ -41,10 +43,10 @@ class Scaffolding:
         self.name = self.display_name.lower()
 
     def __prompt_display_name(self):
-        self.display_name = input("Name: ").strip()
+        self.display_name = raw_input("Name: ").strip()
 
         found = False
-        for framework in gather_frameworks():
+        for framework in gather_frameworks(config=self.benchmarker_config):
             if framework.lower() == self.display_name.lower():
                 found = True
 
@@ -66,7 +68,7 @@ class Scaffolding:
             self.__prompt_language()
 
     def __prompt_language(self):
-        self.language = input("Language: ").strip()
+        self.language = raw_input("Language: ").strip()
 
         known_languages = gather_langauges()
         language = None
@@ -103,8 +105,8 @@ class Scaffolding:
         return self.language
 
     def __prompt_confirm_new_language(self, known_languages):
-        self.confirm_new_lang = input("Create New Language '%s' (y/n): " %
-                                      self.language).strip().lower()
+        self.confirm_new_lang = raw_input("Create New Language '%s' (y/n): " %
+                                          self.language).strip().lower()
         return self.confirm_new_lang == 'y' or self.confirm_new_lang == 'n'
 
     def __gather_approach(self):
@@ -128,7 +130,7 @@ class Scaffolding:
             valid = self.__prompt_approach()
 
     def __prompt_approach(self):
-        self.approach = input("Approach [1/2]: ").strip()
+        self.approach = raw_input("Approach [1/2]: ").strip()
         if self.approach == '1':
             self.approach = 'Realistic'
         if self.approach == '2':
@@ -162,7 +164,7 @@ class Scaffolding:
             self.__gather_platform()
 
     def __prompt_classification(self):
-        self.classification = input("Classification [1/2/3]: ").strip()
+        self.classification = raw_input("Classification [1/2/3]: ").strip()
         if self.classification == '1':
             self.classification = 'Fullstack'
         if self.classification == '2':
@@ -189,7 +191,7 @@ class Scaffolding:
         self.__prompt_platform()
 
     def __prompt_platform(self):
-        self.platform = input("Platform (optional): ").strip()
+        self.platform = raw_input("Platform (optional): ").strip()
         if self.platform == '':
             self.platform = 'None'
 
@@ -210,7 +212,7 @@ class Scaffolding:
             valid = self.__prompt_orm()
 
     def __prompt_orm(self):
-        self.orm = input("ORM [1/2/3]: ").strip()
+        self.orm = raw_input("ORM [1/2/3]: ").strip()
         if self.orm == '1':
             self.orm = 'Full'
         if self.orm == '2':
@@ -233,7 +235,7 @@ class Scaffolding:
         self.__prompt_webserver()
 
     def __prompt_webserver(self):
-        self.webserver = input("Webserver (optional): ").strip()
+        self.webserver = raw_input("Webserver (optional): ").strip()
         if self.webserver == '':
             self.webserver = 'None'
 
@@ -251,7 +253,7 @@ class Scaffolding:
         self.__prompt_versus()
 
     def __prompt_versus(self):
-        self.versus = input("Versus (optional): ").strip()
+        self.versus = raw_input("Versus (optional): ").strip()
         if self.versus == '':
             self.versus = 'None'
 
@@ -283,7 +285,7 @@ class Scaffolding:
             print('Aborting')
 
     def __prompt_confirmation(self):
-        self.confirmation = input("Initialize [y/n]: ").strip().lower()
+        self.confirmation = raw_input("Initialize [y/n]: ").strip().lower()
         return self.confirmation == 'y' or self.confirmation == 'n'
 
     def __build_scaffolding(self):
@@ -292,7 +294,8 @@ class Scaffolding:
             self.__edit_scaffold_files()
 
     def __create_test_folder(self):
-        self.language_dir = os.path.join("frameworks", self.language)
+        self.language_dir = os.path.join(self.benchmarker_config.fwroot,
+                                         "frameworks", self.language)
         self.test_dir = os.path.join(self.language_dir, self.name)
 
         if os.path.exists(self.test_dir):
@@ -302,7 +305,8 @@ class Scaffolding:
         return True
 
     def __copy_scaffold_files(self):
-        self.scaffold_dir = os.path.join("toolset", "setup", "scaffolding")
+        self.scaffold_dir = os.path.join(self.benchmarker_config.fwroot,
+                                         "toolset", "scaffolding")
         copytree(self.scaffold_dir, self.test_dir)
 
     def __edit_scaffold_files(self):
@@ -343,11 +347,8 @@ class Scaffolding:
     frameworks
         └─── %s
               └─── %s
-                    ├─── .gitignore
                     ├─── benchmark_config.json
                     ├─── README.md
-                    ├─── setup.sh
-                    ├─── setup_mysql.sh
                     └─── source_code
 
   The next step is to read through your README.md and follow the instructions