Browse Source

Remove the name attribute with default ec2

Julia Nething 9 years ago
parent
commit
23d2c200fe

+ 0 - 1
benchmark.cfg.example

@@ -20,7 +20,6 @@ concurrency_levels=[8, 16, 32, 64, 128, 256]
 query_levels=[1, 5,10,15,20]
 threads=8
 mode=benchmark
-name=ec2
 os=linux
 password_prompt=False
 server_host=127.0.0.1

+ 1 - 1
deployment/vagrant-common/custom_motd.sh

@@ -6,7 +6,7 @@ echo ""
 echo "  To get started, perhaps try this:"
 echo "    \$ cd FrameworkBenchmarks"
 echo "    \$ toolset/run-tests.py --install server --test go"
-echo "    \$ cat results/ec2/latest/logs/go/out.txt"
+echo "    \$ cat results/latest/logs/go/out.txt"
 echo ""
 echo "  You can get lots of help:"
 echo "    \$ toolset/run-tests.py --help"

+ 2 - 2
deployment/vagrant-development/README.md

@@ -51,7 +51,7 @@ Welcome to the FrameworkBenchmarks project!
 To get started, perhaps try this:
    $ cd FrameworkBenchmarks
    $ toolset/run-tests.py --install server --test go
-   $ cat results/ec2/latest/logs/go/out.txt
+   $ cat results/latest/logs/go/out.txt
 
 You can get lots of help:
    $ toolset/run-tests.py --help
@@ -71,7 +71,7 @@ usernames, or private key files, as so:
     <log for installing software needed for go test>
     <log for launching go framework>
     <log for verifying go framework meets requirements>
-    $ cat results/ec2/latest/logs/go/out.txt
+    $ cat results/latest/logs/go/out.txt
     <log with stdout from framework go>
 
 # Using Amazon-powered Virtual Machine

+ 1 - 1
deployment/vagrant-production/README.md

@@ -46,7 +46,7 @@ Welcome to the FrameworkBenchmarks project!
 To get started, perhaps try this:
    $ cd FrameworkBenchmarks
    $ toolset/run-tests.py --install server --test go
-   $ cat results/ec2/latest/logs/go/out.txt
+   $ cat results/latest/logs/go/out.txt
 
 You can get lots of help:
    $ toolset/run-tests.py --help

+ 2 - 3
toolset/benchmark/benchmarker.py

@@ -963,7 +963,7 @@ class Benchmarker:
     self.fwroot = setup_util.get_fwroot()
 
     # setup results and latest_results directories 
-    self.result_directory = os.path.join("results", self.name)
+    self.result_directory = os.path.join("results")
     if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
         shutil.rmtree(os.path.join(self.fwroot, "results"))
     self.latest_results_directory = self.latest_results_directory()
@@ -985,11 +985,10 @@ class Benchmarker:
         #Load json file into results object
         self.results = json.load(f)
     except IOError:
-      logging.warn("results.json for test %s not found.",self.name) 
+      logging.warn("results.json for test not found.")
     
     if self.results == None:
       self.results = dict()
-      self.results['name'] = self.name
       self.results['concurrencyLevels'] = self.concurrency_levels
       self.results['queryIntervals'] = self.query_levels
       self.results['frameworks'] = [t.name for t in self.__gather_tests]

+ 0 - 1
toolset/run-tests.py

@@ -156,7 +156,6 @@ def main(argv=None):
     parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
     parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
     parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
-    parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
     parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
                         'this binary')
     parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')