Browse Source

update args

Nate Brady 8 years ago
parent
commit
d307391be5

+ 6 - 7
benchmark.cfg.example → benchmark.cfg

@@ -1,10 +1,12 @@
 [Defaults]
-# Available Keys: 
+# Available Keys:
+os=linux
+server_host=TFB-server
 client_host=TFB-client
-client_identity_file=None
+client_identity_file=/home/techempower/.ssh/id_rsa
 client_user=techempower
 database_host=TFB-database
-database_identity_file=None
+database_identity_file=/home/techempower/.ssh/id_rsa
 database_os=linux
 database_user=techempower
 duration=60
@@ -13,17 +15,14 @@ install=server
 install_error_action=continue
 install_strategy=unified
 install_only=False
-list_test_metadata=False
 list_tests=False
 concurrency_levels=[8, 16, 32, 64, 128, 256]
 query_levels=[1, 5,10,15,20]
 threads=8
 mode=benchmark
-os=linux
-server_host=TFB-server
 sleep=60
 test=None
 type=all
 verbose=True
 clean=False
-clean_all=False
+clean_all=False

+ 8 - 27
deployment/vagrant-common/bootstrap.sh

@@ -46,29 +46,7 @@ GH_BRANCH=${TFB_AWS_REPO_BRANCH:-master}
 # A shell provisioner is called multiple times
 if [ ! -e "~/.firstboot" ]; then
 
-  # Setup some nice TFB defaults
-  if [ "$ROLE" == "all" ]; then
-    echo "export TFB_CLIENT_IDENTITY_FILE=$HOME/.ssh/id_rsa" >> ~/.bash_profile
-    echo "export TFB_DATABASE_IDENTITY_FILE=$HOME/.ssh/id_rsa" >> ~/.bash_profile
-  else
-    echo "export TFB_CLIENT_IDENTITY_FILE=$HOME/.ssh/client" >> ~/.bash_profile
-    echo "export TFB_DATABASE_IDENTITY_FILE=$HOME/.ssh/database" >> ~/.bash_profile
-  fi
-  echo "export TFB_SERVER_HOST=$SERVER_IP" >> ~/.bash_profile
-  echo "export TFB_CLIENT_HOST=$CLIENT_IP" >> ~/.bash_profile
-  echo "export TFB_DATABASE_HOST=$DATABA_IP" >> ~/.bash_profile
-  echo "export TFB_CLIENT_USER=$USER" >> ~/.bash_profile
-  echo "export TFB_DATABASE_USER=$USER" >> ~/.bash_profile
-  echo "export FWROOT=$HOME/FrameworkBenchmarks" >> ~/.bash_profile 
-  source ~/.bash_profile
-
-  # Ensure their host-local benchmark.cfg is not picked up on the remote host
-  if [ -e "~/FrameworkBenchmarks/benchmark.cfg" ]; then
-    echo "You have a benchmark.cfg file that will interfere with Vagrant, moving to benchmark.cfg.bak"
-    mv ~/FrameworkBenchmarks/benchmark.cfg ~/FrameworkBenchmarks/benchmark.cfg.bak
-  fi
-
-  # Setup hosts 
+  # Setup hosts
   echo "Setting up convenience hosts entries"
   echo $DATABA_IP TFB-database | sudo tee --append /etc/hosts
   echo $CLIENT_IP TFB-client   | sudo tee --append /etc/hosts
@@ -81,10 +59,13 @@ if [ ! -e "~/.firstboot" ]; then
     myhost=TFB-${ROLE}
     echo $myhost | sudo tee --append /etc/hostname
     sudo hostname $myhost
-    echo Updated /etc/hosts file to be: 
+    echo Updated /etc/hosts file to be:
     cat /etc/hosts
   fi
 
+  # Update the benchmark.cfg for vagrant
+  sed -i s/techempower/vagrant/g ~/FrameworkBenchmarks/benchmark.cfg
+
   # Workaround mitchellh/vagrant#289
   echo "grub-pc grub-pc/install_devices multiselect     /dev/sda" | sudo debconf-set-selections
 
@@ -102,7 +83,7 @@ if [ ! -e "~/.firstboot" ]; then
     #echo "Removing your current results folder to avoid interference"
     #rm -rf $FWROOT/installs $FWROOT/results
 
-    # vboxfs does not support chown or chmod, which we need. 
+    # vboxfs does not support chown or chmod, which we need.
     # We therefore bind-mount a normal linux directory so we can
     # use these operations.
     #echo "Mounting over your installs folder"
@@ -125,7 +106,7 @@ if [ ! -e "~/.firstboot" ]; then
 
   # Enable remote SSH access if we are running production environment
   # Note : this is always copied from the local working copy using a
-  #        file provisioner. While they exist in the git clone we just 
+  #        file provisioner. While they exist in the git clone we just
   #        created (so we could use those), we want to let the user
   #        have the option of replacing the keys in their working copy
   #        and ensuring that only they can ssh into the machines
@@ -149,4 +130,4 @@ if [ ! -e "~/.firstboot" ]; then
   sudo rm -f /etc/update-motd.d/51-cloudguest
   sudo rm -f /etc/update-motd.d/98-cloudguest
   sudo mv ~/.custom_motd.sh /etc/update-motd.d/55-tfbwelcome
-fi
+fi

+ 984 - 986
toolset/benchmark/benchmarker.py

@@ -35,348 +35,346 @@ import progressbar
 
 class Benchmarker:
 
-  ##########################################################################################
-  # Public methods
-  ##########################################################################################
-
-  ############################################################
-  # Prints all the available tests
-  ############################################################
-  def run_list_tests(self):
-    all_tests = self.__gather_tests
-
-    for test in all_tests:
-      print test.name
-
-    self.__finish()
-  ############################################################
-  # End run_list_tests
-  ############################################################
-
-  ############################################################
-  # Prints the metadata for all the available tests
-  ############################################################
-  def run_list_test_metadata(self, run_finish=True):
-    all_tests = self.__gather_tests
-    all_tests_json = json.dumps(map(lambda test: {
-      "name": test.name,
-      "approach": test.approach,
-      "classification": test.classification,
-      "database": test.database,
-      "framework": test.framework,
-      "language": test.language,
-      "orm": test.orm,
-      "platform": test.platform,
-      "webserver": test.webserver,
-      "os": test.os,
-      "database_os": test.database_os,
-      "display_name": test.display_name,
-      "notes": test.notes,
-      "versus": test.versus
-    }, all_tests))
-
-    with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
-      f.write(all_tests_json)
-    if run_finish:
-      self.__finish()
-
-  ############################################################
-  # End run_list_test_metadata
-  ############################################################
-
-  ############################################################
-  # parse_timestamp
-  # Re-parses the raw data for a given timestamp
-  ############################################################
-  def parse_timestamp(self):
-    all_tests = self.__gather_tests
-
-    for test in all_tests:
-      test.parse_all()
-
-    self.__parse_results(all_tests)
-
-    self.__finish()
-
-  ############################################################
-  # End parse_timestamp
-  ############################################################
-
-  ############################################################
-  # Run the tests:
-  # This process involves setting up the client/server machines
-  # with any necessary change. Then going through each test,
-  # running their setup script, verifying the URLs, and
-  # running benchmarks against them.
-  ############################################################
-  def run(self):
-    ##########################
-    # Generate metadata
-    ##########################
-    self.run_list_test_metadata(False)
-    ##########################
-    # Get a list of all known
-    # tests that we can run.
-    ##########################
-    all_tests = self.__gather_tests
-    ##########################
-    # Setup client/server
-    ##########################
-    print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
-    self.__setup_server()
-    self.__setup_database()
-    self.__setup_client()
-
-    ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
-    #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
-    #  raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
-
-    ##########################
-    # Run tests
-    ##########################
-    print header("Running Tests...", top='=', bottom='=')
-    result = self.__run_tests(all_tests)
-
-    ##########################
-    # Parse results
-    ##########################
-    if self.mode == "benchmark":
-      print header("Parsing Results ...", top='=', bottom='=')
-      self.__parse_results(all_tests)
-
-
-    self.__finish()
-    return result
-
-  ############################################################
-  # End run
-  ############################################################
-
-  ############################################################
-  # database_sftp_string(batch_file)
-  # generates a fully qualified URL for sftp to database
-  ############################################################
-  def database_sftp_string(self, batch_file):
-    sftp_string =  "sftp -oStrictHostKeyChecking=no "
-    if batch_file != None: sftp_string += " -b " + batch_file + " "
-
-    if self.database_identity_file != None:
-      sftp_string += " -i " + self.database_identity_file + " "
-
-    return sftp_string + self.database_user + "@" + self.database_host
-  ############################################################
-  # End database_sftp_string
-  ############################################################
-
-  ############################################################
-  # client_sftp_string(batch_file)
-  # generates a fully qualified URL for sftp to client
-  ############################################################
-  def client_sftp_string(self, batch_file):
-    sftp_string =  "sftp -oStrictHostKeyChecking=no "
-    if batch_file != None: sftp_string += " -b " + batch_file + " "
-
-    if self.client_identity_file != None:
-      sftp_string += " -i " + self.client_identity_file + " "
-
-    return sftp_string + self.client_user + "@" + self.client_host
-  ############################################################
-  # End client_sftp_string
-  ############################################################
-
-  ############################################################
-  # generate_url(url, port)
-  # generates a fully qualified URL for accessing a test url
-  ############################################################
-  def generate_url(self, url, port):
-    return self.server_host + ":" + str(port) + url
-  ############################################################
-  # End generate_url
-  ############################################################
-
-  ############################################################
-  # get_output_file(test_name, test_type)
-  # returns the output file name for this test_name and
-  # test_type timestamp/test_type/test_name/raw
-  ############################################################
-  def get_output_file(self, test_name, test_type):
-    return os.path.join(self.result_directory, self.timestamp, test_name, test_type, "raw")
-  ############################################################
-  # End get_output_file
-  ############################################################
-
-  ############################################################
-  # output_file(test_name, test_type)
-  # returns the output file for this test_name and test_type
-  # timestamp/test_type/test_name/raw
-  ############################################################
-  def output_file(self, test_name, test_type):
-    path = self.get_output_file(test_name, test_type)
-    try:
-      os.makedirs(os.path.dirname(path))
-    except OSError:
-      pass
-    return path
-  ############################################################
-  # End output_file
-  ############################################################
-
-
-  ############################################################
-  # get_stats_file(test_name, test_type)
-  # returns the stats file name for this test_name and
-  # test_type timestamp/test_type/test_name/raw
-  ############################################################
-  def get_stats_file(self, test_name, test_type):
-    return os.path.join(self.result_directory, self.timestamp, test_name, test_type, "stats")
-  ############################################################
-  # End get_stats_file
-  ############################################################
-
-
-  ############################################################
-  # stats_file(test_name, test_type)
-  # returns the stats file for this test_name and test_type
-  # timestamp/test_type/test_name/raw
-  ############################################################
-  def stats_file(self, test_name, test_type):
-      path = self.get_stats_file(test_name, test_type)
-      try:
-        os.makedirs(os.path.dirname(path))
-      except OSError:
-        pass
-      return path
-  ############################################################
-  # End stats_file
-  ############################################################
-
-
-  ############################################################
-  # full_results_directory
-  ############################################################
-  def full_results_directory(self):
-    path = os.path.join(self.fwroot, self.result_directory, self.timestamp)
-    try:
-      os.makedirs(path)
-    except OSError:
-      pass
-    return path
-  ############################################################
-  # End full_results_directory
-  ############################################################
-
-  ############################################################
-  # report_verify_results
-  # Used by FrameworkTest to add verification details to our results
-  #
-  # TODO: Technically this is an IPC violation - we are accessing
-  # the parent process' memory from the child process
-  ############################################################
-  def report_verify_results(self, framework, test, result):
-    if framework.name not in self.results['verify'].keys():
-      self.results['verify'][framework.name] = dict()
-    self.results['verify'][framework.name][test] = result
-
-  ############################################################
-  # report_benchmark_results
-  # Used by FrameworkTest to add benchmark data to this
-  #
-  # TODO: Technically this is an IPC violation - we are accessing
-  # the parent process' memory from the child process
-  ############################################################
-  def report_benchmark_results(self, framework, test, results):
-    if test not in self.results['rawData'].keys():
-      self.results['rawData'][test] = dict()
-
-    # If results has a size from the parse, then it succeeded.
-    if results:
-      self.results['rawData'][test][framework.name] = results
-
-      # This may already be set for single-tests
-      if framework.name not in self.results['succeeded'][test]:
-        self.results['succeeded'][test].append(framework.name)
-    else:
-      # This may already be set for single-tests
-      if framework.name not in self.results['failed'][test]:
-        self.results['failed'][test].append(framework.name)
-
-  ############################################################
-  # End report_results
-  ############################################################
-
-  ##########################################################################################
-  # Private methods
-  ##########################################################################################
-
-  ############################################################
-  # Gathers all the tests
-  ############################################################
-  @property
-  def __gather_tests(self):
-    tests = gather_tests(include=self.test,
-      exclude=self.exclude,
-      benchmarker=self)
-
-    # If the tests have been interrupted somehow, then we want to resume them where we left
-    # off, rather than starting from the beginning
-    if os.path.isfile(self.current_benchmark):
-        with open(self.current_benchmark, 'r') as interrupted_benchmark:
-            interrupt_bench = interrupted_benchmark.read().strip()
-            for index, atest in enumerate(tests):
-                if atest.name == interrupt_bench:
-                    tests = tests[index:]
-                    break
-    return tests
-  ############################################################
-  # End __gather_tests
-  ############################################################
-
-  ############################################################
-  # Makes any necessary changes to the server that should be
-  # made before running the tests. This involves setting kernal
-  # settings to allow for more connections, or more file
-  # descriptiors
-  #
-  # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
-  ############################################################
-  def __setup_server(self):
-    try:
-      if os.name == 'nt':
-        return True
-      subprocess.call(['sudo', 'sysctl', '-w', 'net.ipv4.tcp_max_syn_backlog=65535'])
-      subprocess.call(['sudo', 'sysctl', '-w', 'net.core.somaxconn=65535'])
-      subprocess.call(['sudo', '-s', 'ulimit', '-n', '65535'])
-      subprocess.call(['sudo', 'sysctl', 'net.ipv4.tcp_tw_reuse=1'])
-      subprocess.call(['sudo', 'sysctl', 'net.ipv4.tcp_tw_recycle=1'])
-      subprocess.call(['sudo', 'sysctl', '-w', 'kernel.shmmax=134217728'])
-      subprocess.call(['sudo', 'sysctl', '-w', 'kernel.shmall=2097152'])
-
-      with open(os.path.join(self.full_results_directory(), 'sysctl.txt'), 'w') as f:
-        f.write(subprocess.check_output(['sudo','sysctl','-a']))
-    except subprocess.CalledProcessError:
-      return False
-  ############################################################
-  # End __setup_server
-  ############################################################
-
-  ############################################################
-  # Clean up any processes that run with root privileges
-  ############################################################
-  def __cleanup_leftover_processes_before_test(self):
-    p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
-    p.communicate("""
+    ##########################################################################################
+    # Public methods
+    ##########################################################################################
+
+    ############################################################
+    # Prints all the available tests
+    ############################################################
+    def run_list_tests(self):
+        all_tests = self.__gather_tests
+
+        for test in all_tests:
+            print test.name
+
+        self.__finish()
+    ############################################################
+    # End run_list_tests
+    ############################################################
+
+    ############################################################
+    # Prints the metadata for all the available tests
+    ############################################################
+    def run_list_test_metadata(self):
+        all_tests = self.__gather_tests
+        all_tests_json = json.dumps(map(lambda test: {
+            "name": test.name,
+            "approach": test.approach,
+            "classification": test.classification,
+            "database": test.database,
+            "framework": test.framework,
+            "language": test.language,
+            "orm": test.orm,
+            "platform": test.platform,
+            "webserver": test.webserver,
+            "os": test.os,
+            "database_os": test.database_os,
+            "display_name": test.display_name,
+            "notes": test.notes,
+            "versus": test.versus
+        }, all_tests))
+
+        with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
+            f.write(all_tests_json)
+
+    ############################################################
+    # End run_list_test_metadata
+    ############################################################
+
+    ############################################################
+    # parse_timestamp
+    # Re-parses the raw data for a given timestamp
+    ############################################################
+    def parse_timestamp(self):
+        all_tests = self.__gather_tests
+
+        for test in all_tests:
+            test.parse_all()
+
+        self.__parse_results(all_tests)
+
+        self.__finish()
+
+    ############################################################
+    # End parse_timestamp
+    ############################################################
+
+    ############################################################
+    # Run the tests:
+    # This process involves setting up the client/server machines
+    # with any necessary change. Then going through each test,
+    # running their setup script, verifying the URLs, and
+    # running benchmarks against them.
+    ############################################################
+    def run(self):
+        ##########################
+        # Generate metadata
+        ##########################
+        self.run_list_test_metadata(False)
+        ##########################
+        # Get a list of all known
+        # tests that we can run.
+        ##########################
+        all_tests = self.__gather_tests
+        ##########################
+        # Setup client/server
+        ##########################
+        print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
+        self.__setup_server()
+        self.__setup_database()
+        self.__setup_client()
+
+        ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
+        #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
+        #  raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
+
+        ##########################
+        # Run tests
+        ##########################
+        print header("Running Tests...", top='=', bottom='=')
+        result = self.__run_tests(all_tests)
+
+        ##########################
+        # Parse results
+        ##########################
+        if self.mode == "benchmark":
+            print header("Parsing Results ...", top='=', bottom='=')
+            self.__parse_results(all_tests)
+
+
+        self.__finish()
+        return result
+
+    ############################################################
+    # End run
+    ############################################################
+
+    ############################################################
+    # database_sftp_string(batch_file)
+    # generates a fully qualified URL for sftp to database
+    ############################################################
+    def database_sftp_string(self, batch_file):
+        sftp_string =  "sftp -oStrictHostKeyChecking=no "
+        if batch_file != None: sftp_string += " -b " + batch_file + " "
+
+        if self.database_identity_file != None:
+            sftp_string += " -i " + self.database_identity_file + " "
+
+        return sftp_string + self.database_user + "@" + self.database_host
+    ############################################################
+    # End database_sftp_string
+    ############################################################
+
+    ############################################################
+    # client_sftp_string(batch_file)
+    # generates a fully qualified URL for sftp to client
+    ############################################################
+    def client_sftp_string(self, batch_file):
+        sftp_string =  "sftp -oStrictHostKeyChecking=no "
+        if batch_file != None: sftp_string += " -b " + batch_file + " "
+
+        if self.client_identity_file != None:
+            sftp_string += " -i " + self.client_identity_file + " "
+
+        return sftp_string + self.client_user + "@" + self.client_host
+    ############################################################
+    # End client_sftp_string
+    ############################################################
+
+    ############################################################
+    # generate_url(url, port)
+    # generates a fully qualified URL for accessing a test url
+    ############################################################
+    def generate_url(self, url, port):
+        return self.server_host + ":" + str(port) + url
+    ############################################################
+    # End generate_url
+    ############################################################
+
+    ############################################################
+    # get_output_file(test_name, test_type)
+    # returns the output file name for this test_name and
+    # test_type timestamp/test_type/test_name/raw
+    ############################################################
+    def get_output_file(self, test_name, test_type):
+        return os.path.join(self.result_directory, self.timestamp, test_name, test_type, "raw")
+    ############################################################
+    # End get_output_file
+    ############################################################
+
+    ############################################################
+    # output_file(test_name, test_type)
+    # returns the output file for this test_name and test_type
+    # timestamp/test_type/test_name/raw
+    ############################################################
+    def output_file(self, test_name, test_type):
+        path = self.get_output_file(test_name, test_type)
+        try:
+            os.makedirs(os.path.dirname(path))
+        except OSError:
+            pass
+        return path
+    ############################################################
+    # End output_file
+    ############################################################
+
+
+    ############################################################
+    # get_stats_file(test_name, test_type)
+    # returns the stats file name for this test_name and
+    # test_type timestamp/test_type/test_name/raw
+    ############################################################
+    def get_stats_file(self, test_name, test_type):
+        return os.path.join(self.result_directory, self.timestamp, test_name, test_type, "stats")
+    ############################################################
+    # End get_stats_file
+    ############################################################
+
+
+    ############################################################
+    # stats_file(test_name, test_type)
+    # returns the stats file for this test_name and test_type
+    # timestamp/test_type/test_name/raw
+    ############################################################
+    def stats_file(self, test_name, test_type):
+        path = self.get_stats_file(test_name, test_type)
+        try:
+            os.makedirs(os.path.dirname(path))
+        except OSError:
+            pass
+        return path
+    ############################################################
+    # End stats_file
+    ############################################################
+
+
+    ############################################################
+    # full_results_directory
+    ############################################################
+    def full_results_directory(self):
+        path = os.path.join(self.fwroot, self.result_directory, self.timestamp)
+        try:
+            os.makedirs(path)
+        except OSError:
+            pass
+        return path
+    ############################################################
+    # End full_results_directory
+    ############################################################
+
+    ############################################################
+    # report_verify_results
+    # Used by FrameworkTest to add verification details to our results
+    #
+    # TODO: Technically this is an IPC violation - we are accessing
+    # the parent process' memory from the child process
+    ############################################################
+    def report_verify_results(self, framework, test, result):
+        if framework.name not in self.results['verify'].keys():
+            self.results['verify'][framework.name] = dict()
+        self.results['verify'][framework.name][test] = result
+
+    ############################################################
+    # report_benchmark_results
+    # Used by FrameworkTest to add benchmark data to this
+    #
+    # TODO: Technically this is an IPC violation - we are accessing
+    # the parent process' memory from the child process
+    ############################################################
+    def report_benchmark_results(self, framework, test, results):
+        if test not in self.results['rawData'].keys():
+            self.results['rawData'][test] = dict()
+
+        # If results has a size from the parse, then it succeeded.
+        if results:
+            self.results['rawData'][test][framework.name] = results
+
+            # This may already be set for single-tests
+            if framework.name not in self.results['succeeded'][test]:
+                self.results['succeeded'][test].append(framework.name)
+        else:
+            # This may already be set for single-tests
+            if framework.name not in self.results['failed'][test]:
+                self.results['failed'][test].append(framework.name)
+
+    ############################################################
+    # End report_results
+    ############################################################
+
+    ##########################################################################################
+    # Private methods
+    ##########################################################################################
+
+    ############################################################
+    # Gathers all the tests
+    ############################################################
+    @property
+    def __gather_tests(self):
+        tests = gather_tests(include=self.test,
+                             exclude=self.exclude,
+                             benchmarker=self)
+
+        # If the tests have been interrupted somehow, then we want to resume them where we left
+        # off, rather than starting from the beginning
+        if os.path.isfile(self.current_benchmark):
+            with open(self.current_benchmark, 'r') as interrupted_benchmark:
+                interrupt_bench = interrupted_benchmark.read().strip()
+                for index, atest in enumerate(tests):
+                    if atest.name == interrupt_bench:
+                        tests = tests[index:]
+                        break
+        return tests
+    ############################################################
+    # End __gather_tests
+    ############################################################
+
+    ############################################################
+    # Makes any necessary changes to the server that should be
+    # made before running the tests. This involves setting kernal
+    # settings to allow for more connections, or more file
+    # descriptiors
+    #
+    # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
+    ############################################################
+    def __setup_server(self):
+        try:
+            if os.name == 'nt':
+                return True
+            subprocess.call(['sudo', 'sysctl', '-w', 'net.ipv4.tcp_max_syn_backlog=65535'])
+            subprocess.call(['sudo', 'sysctl', '-w', 'net.core.somaxconn=65535'])
+            subprocess.call(['sudo', '-s', 'ulimit', '-n', '65535'])
+            subprocess.call(['sudo', 'sysctl', 'net.ipv4.tcp_tw_reuse=1'])
+            subprocess.call(['sudo', 'sysctl', 'net.ipv4.tcp_tw_recycle=1'])
+            subprocess.call(['sudo', 'sysctl', '-w', 'kernel.shmmax=134217728'])
+            subprocess.call(['sudo', 'sysctl', '-w', 'kernel.shmall=2097152'])
+
+            with open(os.path.join(self.full_results_directory(), 'sysctl.txt'), 'w') as f:
+                f.write(subprocess.check_output(['sudo','sysctl','-a']))
+        except subprocess.CalledProcessError:
+            return False
+    ############################################################
+    # End __setup_server
+    ############################################################
+
+    ############################################################
+    # Clean up any processes that run with root privileges
+    ############################################################
+    def __cleanup_leftover_processes_before_test(self):
+        p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
+        p.communicate("""
       sudo /etc/init.d/apache2 stop
     """)
 
-  ############################################################
-  # Makes any necessary changes to the database machine that
-  # should be made before running the tests. Is very similar
-  # to the server setup, but may also include database specific
-  # changes.
-  ############################################################
-  def __setup_database(self):
-    p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
-    p.communicate("""
+    ############################################################
+    # Makes any necessary changes to the database machine that
+    # should be made before running the tests. Is very similar
+    # to the server setup, but may also include database specific
+    # changes.
+    ############################################################
+    def __setup_database(self):
+        p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
+        p.communicate("""
       sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
       sudo sysctl -w net.core.somaxconn=65535
       sudo sysctl -w kernel.sched_autogroup_enabled=0
@@ -387,8 +385,8 @@ class Benchmarker:
       sudo sysctl -w kernel.shmall=2097152
       sudo sysctl -w kernel.sem="250 32000 256 512"
     """)
-    # TODO - print kernel configuration to file
-    # echo "Printing kernel configuration:" && sudo sysctl -a
+        # TODO - print kernel configuration to file
+        # echo "Printing kernel configuration:" && sudo sysctl -a
 
         # Explanations:
         # net.ipv4.tcp_max_syn_backlog, net.core.somaxconn, kernel.sched_autogroup_enabled: http://tweaked.io/guide/kernel/
@@ -396,19 +394,19 @@ class Benchmarker:
         # net.ipv4.tcp_tw_*: http://www.linuxbrigade.com/reduce-time_wait-socket-connections/
         # kernel.shm*: http://seriousbirder.com/blogs/linux-understanding-shmmax-and-shmall-settings/
         # For kernel.sem: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Tuning_and_Optimizing_Red_Hat_Enterprise_Linux_for_Oracle_9i_and_10g_Databases/chap-Oracle_9i_and_10g_Tuning_Guide-Setting_Semaphores.html
-  ############################################################
-  # End __setup_database
-  ############################################################
-
-  ############################################################
-  # Makes any necessary changes to the client machine that
-  # should be made before running the tests. Is very similar
-  # to the server setup, but may also include client specific
-  # changes.
-  ############################################################
-  def __setup_client(self):
-    p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
-    p.communicate("""
+    ############################################################
+    # End __setup_database
+    ############################################################
+
+    ############################################################
+    # Makes any necessary changes to the client machine that
+    # should be made before running the tests. Is very similar
+    # to the server setup, but may also include client specific
+    # changes.
+    ############################################################
+    def __setup_client(self):
+        p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
+        p.communicate("""
       sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
       sudo sysctl -w net.core.somaxconn=65535
       sudo -s ulimit -n 65535
@@ -417,648 +415,648 @@ class Benchmarker:
       sudo sysctl -w kernel.shmmax=2147483648
       sudo sysctl -w kernel.shmall=2097152
     """)
-  ############################################################
-  # End __setup_client
-  ############################################################
-
-  ############################################################
-  # __run_tests
-  #
-  # 2013-10-02 ASB  Calls each test passed in tests to
-  #                 __run_test in a separate process.  Each
-  #                 test is given a set amount of time and if
-  #                 kills the child process (and subsequently
-  #                 all of its child processes).  Uses
-  #                 multiprocessing module.
-  ############################################################
-
-  def __run_tests(self, tests):
-    if len(tests) == 0:
-      return 0
-
-    logging.debug("Start __run_tests.")
-    logging.debug("__name__ = %s",__name__)
-
-    error_happened = False
-    if self.os.lower() == 'windows':
-      logging.debug("Executing __run_tests on Windows")
-      for test in tests:
-        with open(self.current_benchmark, 'w') as benchmark_resume_file:
-          benchmark_resume_file.write(test.name)
-        if self.__run_test(test) != 0:
-          error_happened = True
-    else:
-      logging.debug("Executing __run_tests on Linux")
-
-      # Setup a nice progressbar and ETA indicator
-      widgets = [self.mode, ': ',  progressbar.Percentage(),
-                 ' ', progressbar.Bar(),
-                 ' Rough ', progressbar.ETA()]
-      pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
-      pbar_test = 0
-
-      # These features do not work on Windows
-      for test in tests:
-        pbar.update(pbar_test)
-        pbar_test = pbar_test + 1
-        if __name__ == 'benchmark.benchmarker':
-          print header("Running Test: %s" % test.name)
-          with open(self.current_benchmark, 'w') as benchmark_resume_file:
-            benchmark_resume_file.write(test.name)
-          test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
-          test_process.start()
-          test_process.join(self.run_test_timeout_seconds)
-          self.__load_results()  # Load intermediate result from child process
-          if(test_process.is_alive()):
-            logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
-            self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
-            test_process.terminate()
-            test_process.join()
-          if test_process.exitcode != 0:
-            error_happened = True
-      pbar.finish()
-    if os.path.isfile(self.current_benchmark):
-      os.remove(self.current_benchmark)
-    logging.debug("End __run_tests.")
-
-    if error_happened:
-      return 1
-    return 0
-  ############################################################
-  # End __run_tests
-  ############################################################
-
-  ############################################################
-  # __run_test
-  # 2013-10-02 ASB  Previously __run_tests.  This code now only
-  #                 processes a single test.
-  #
-  # Ensures that the system has all necessary software to run
-  # the tests. This does not include that software for the individual
-  # test, but covers software such as curl and weighttp that
-  # are needed.
-  ############################################################
-  def __run_test(self, test):
-
-    # Used to capture return values
-    def exit_with_code(code):
-      if self.os.lower() == 'windows':
-        return code
-      else:
-        sys.exit(code)
-
-    logDir = os.path.join(self.full_results_directory(), test.name.lower())
-    try:
-      os.makedirs(logDir)
-    except Exception:
-      pass
-    with open(os.path.join(logDir, 'out.txt'), 'w') as out:
-
-      if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
-        out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
-        return exit_with_code(0)
-
-      # If the test is in the excludes list, we skip it
-      if self.exclude != None and test.name in self.exclude:
-        out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
-        return exit_with_code(0)
-
-      out.write("test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
-      out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
-      out.write("test.name: {name}\n".format(name=str(test.name)))
-      out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
-      if self.results['frameworks'] != None and test.name in self.results['completed']:
-        out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
-        print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
-        return exit_with_code(1)
-      out.flush()
-
-      out.write(header("Beginning %s" % test.name, top='='))
-      out.flush()
-
-      ##########################
-      # Start this test
-      ##########################
-      out.write(header("Starting %s" % test.name))
-      out.flush()
-      try:
-        if test.requires_database():
-          p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=out, shell=True)
-          p.communicate("""
+    ############################################################
+    # End __setup_client
+    ############################################################
+
+    ############################################################
+    # __run_tests
+    #
+    # 2013-10-02 ASB  Calls each test passed in tests to
+    #                 __run_test in a separate process.  Each
+    #                 test is given a set amount of time and if
+    #                 kills the child process (and subsequently
+    #                 all of its child processes).  Uses
+    #                 multiprocessing module.
+    ############################################################
+
+    def __run_tests(self, tests):
+        if len(tests) == 0:
+            return 0
+
+        logging.debug("Start __run_tests.")
+        logging.debug("__name__ = %s",__name__)
+
+        error_happened = False
+        if self.os.lower() == 'windows':
+            logging.debug("Executing __run_tests on Windows")
+            for test in tests:
+                with open(self.current_benchmark, 'w') as benchmark_resume_file:
+                    benchmark_resume_file.write(test.name)
+                if self.__run_test(test) != 0:
+                    error_happened = True
+        else:
+            logging.debug("Executing __run_tests on Linux")
+
+            # Setup a nice progressbar and ETA indicator
+            widgets = [self.mode, ': ',  progressbar.Percentage(),
+                       ' ', progressbar.Bar(),
+                       ' Rough ', progressbar.ETA()]
+            pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
+            pbar_test = 0
+
+            # These features do not work on Windows
+            for test in tests:
+                pbar.update(pbar_test)
+                pbar_test = pbar_test + 1
+                if __name__ == 'benchmark.benchmarker':
+                    print header("Running Test: %s" % test.name)
+                    with open(self.current_benchmark, 'w') as benchmark_resume_file:
+                        benchmark_resume_file.write(test.name)
+                    test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
+                    test_process.start()
+                    test_process.join(self.run_test_timeout_seconds)
+                    self.__load_results()  # Load intermediate result from child process
+                    if(test_process.is_alive()):
+                        logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
+                        self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
+                        test_process.terminate()
+                        test_process.join()
+                    if test_process.exitcode != 0:
+                        error_happened = True
+            pbar.finish()
+        if os.path.isfile(self.current_benchmark):
+            os.remove(self.current_benchmark)
+        logging.debug("End __run_tests.")
+
+        if error_happened:
+            return 1
+        return 0
+    ############################################################
+    # End __run_tests
+    ############################################################
+
+    ############################################################
+    # __run_test
+    # 2013-10-02 ASB  Previously __run_tests.  This code now only
+    #                 processes a single test.
+    #
+    # Ensures that the system has all necessary software to run
+    # the tests. This does not include that software for the individual
+    # test, but covers software such as curl and weighttp that
+    # are needed.
+    ############################################################
+    def __run_test(self, test):
+
+        # Used to capture return values
+        def exit_with_code(code):
+            if self.os.lower() == 'windows':
+                return code
+            else:
+                sys.exit(code)
+
+        logDir = os.path.join(self.full_results_directory(), test.name.lower())
+        try:
+            os.makedirs(logDir)
+        except Exception:
+            pass
+        with open(os.path.join(logDir, 'out.txt'), 'w') as out:
+
+            if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
+                out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
+                return exit_with_code(0)
+
+            # If the test is in the excludes list, we skip it
+            if self.exclude != None and test.name in self.exclude:
+                out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
+                return exit_with_code(0)
+
+            out.write("test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
+            out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
+            out.write("test.name: {name}\n".format(name=str(test.name)))
+            out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
+            if self.results['frameworks'] != None and test.name in self.results['completed']:
+                out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
+                print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
+                return exit_with_code(1)
+            out.flush()
+
+            out.write(header("Beginning %s" % test.name, top='='))
+            out.flush()
+
+            ##########################
+            # Start this test
+            ##########################
+            out.write(header("Starting %s" % test.name))
+            out.flush()
+            try:
+                if test.requires_database():
+                    p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=out, shell=True)
+                    p.communicate("""
             sudo restart mysql
             sudo restart mongod
             sudo service postgresql restart
             sudo service cassandra restart
             /opt/elasticsearch/elasticsearch restart
           """)
-          time.sleep(10)
-
-          st = verify_database_connections([
-            ("mysql", self.database_host, 3306),
-            ("mongodb", self.database_host, 27017),
-            ("postgresql", self.database_host, 5432),
-            ("cassandra", self.database_host, 9160),
-            ("elasticsearch", self.database_host, 9200)
-          ])
-          print "database connection test results:\n" + "\n".join(st[1])
-
-        self.__cleanup_leftover_processes_before_test();
-
-        if self.__is_port_bound(test.port):
-          # We gave it our all
-          self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
-          out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
-          out.flush()
-          print "Error: Unable to recover port, cannot start test"
-          return exit_with_code(1)
-
-        result, process = test.start(out)
-        if result != 0:
-          self.__stop_test(out, process)
-          time.sleep(5)
-          out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
-          out.flush()
-          self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
-          return exit_with_code(1)
-
-        logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
-        time.sleep(self.sleep)
+                    time.sleep(10)
+
+                    st = verify_database_connections([
+                        ("mysql", self.database_host, 3306),
+                        ("mongodb", self.database_host, 27017),
+                        ("postgresql", self.database_host, 5432),
+                        ("cassandra", self.database_host, 9160),
+                        ("elasticsearch", self.database_host, 9200)
+                    ])
+                    print "database connection test results:\n" + "\n".join(st[1])
+
+                self.__cleanup_leftover_processes_before_test();
+
+                if self.__is_port_bound(test.port):
+                    # We gave it our all
+                    self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
+                    out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
+                    out.flush()
+                    print "Error: Unable to recover port, cannot start test"
+                    return exit_with_code(1)
+
+                result, process = test.start(out)
+                if result != 0:
+                    self.__stop_test(out, process)
+                    time.sleep(5)
+                    out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
+                    out.flush()
+                    self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
+                    return exit_with_code(1)
+
+                logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
+                time.sleep(self.sleep)
+
+                ##########################
+                # Verify URLs
+                ##########################
+                logging.info("Verifying framework URLs")
+                passed_verify = test.verify_urls(logDir)
+
+                ##########################
+                # Nuke /tmp
+                ##########################
+                try:
+                    subprocess.check_call('sudo rm -rf /tmp/*', shell=True, stderr=out, stdout=out)
+                except Exception:
+                    out.write(header("Error: Could not empty /tmp"))
+
+                ##########################
+                # Benchmark this test
+                ##########################
+                if self.mode == "benchmark":
+                    logging.info("Benchmarking")
+                    out.write(header("Benchmarking %s" % test.name))
+                    out.flush()
+                    test.benchmark(logDir)
+
+                ##########################
+                # Stop this test
+                ##########################
+                out.write(header("Stopping %s" % test.name))
+                out.flush()
+                self.__stop_test(out, process)
+                out.flush()
+                time.sleep(5)
+
+                if self.__is_port_bound(test.port):
+                    # This can happen sometimes - let's try again
+                    self.__stop_test(out, process)
+                    out.flush()
+                    time.sleep(5)
+                    if self.__is_port_bound(test.port):
+                        # We gave it our all
+                        self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
+                        out.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
+                        out.flush()
+                        return exit_with_code(1)
+
+                out.write(header("Stopped %s" % test.name))
+                out.flush()
+
+                ##########################################################
+                # Remove contents of  /tmp folder
+                ##########################################################
+                if self.clear_tmp:
+                    try:
+                        filelist = [ f for f in os.listdir("/tmp") ]
+                        for f in filelist:
+                            try:
+                                os.remove("/tmp/" + f)
+                            except OSError as err:
+                                print "Failed to remove " + str(f) + " from /tmp directory: " + str(err)
+                    except OSError:
+                        print "Failed to remove contents of /tmp directory."
+
+                ##########################################################
+                # Save results thus far into the latest results directory
+                ##########################################################
+
+                out.write(header("Saving results through %s" % test.name))
+                out.flush()
+                self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
+
+                if self.mode == "verify" and not passed_verify:
+                    print "Failed verify!"
+                    return exit_with_code(1)
+            except (OSError, IOError, subprocess.CalledProcessError) as e:
+                self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
+                out.write(header("Subprocess Error %s" % test.name))
+                traceback.print_exc(file=out)
+                out.flush()
+                try:
+                    self.__stop_test(out, process)
+                except (subprocess.CalledProcessError) as e:
+                    self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
+                    out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
+                    traceback.print_exc(file=out)
+                    out.flush()
+                out.close()
+                return exit_with_code(1)
+            # TODO - subprocess should not catch this exception!
+            # Parent process should catch it and cleanup/exit
+            except (KeyboardInterrupt) as e:
+                self.__stop_test(out, process)
+                out.write(header("Cleaning up..."))
+                out.flush()
+                self.__finish()
+                sys.exit(1)
+
+            out.close()
+            return exit_with_code(0)
+
+    ############################################################
+    # End __run_tests
+    ############################################################
+
+    ############################################################
+    # __stop_test(benchmarker)
+    # Stops all running tests
+    ############################################################
+    def __stop_test(self, out, process):
+        if process is not None and process.poll() is None:
+            # Stop
+            pids = self.__find_child_processes(process.pid)
+            if pids is not None:
+                stop = ['kill', '-STOP'] + pids
+                subprocess.call(stop, stderr=out, stdout=out)
+            pids = self.__find_child_processes(process.pid)
+            if pids is not None:
+                term = ['kill', '-TERM'] + pids
+                subprocess.call(term, stderr=out, stdout=out)
+            # Okay, if there are any more PIDs, kill them harder
+            pids = self.__find_child_processes(process.pid)
+            if pids is not None:
+                kill = ['kill', '-KILL'] + pids
+                subprocess.call(kill, stderr=out, stdout=out)
+            process.terminate()
+    ############################################################
+    # End __stop_test
+    ############################################################
+
+    ############################################################
+    # __find_child_processes
+    # Recursively finds all child processes for the given PID.
+    ############################################################
+    def __find_child_processes(self, pid):
+        toRet = []
+        try:
+            pids = subprocess.check_output(['pgrep','-P',str(pid)]).split()
+            toRet.extend(pids)
+            for aPid in pids:
+                toRet.extend(self.__find_child_processes(aPid))
+        except:
+            # pgrep will return a non-zero status code if there are no
+            # processes who have a PPID of PID.
+            pass
+
+        return toRet
+    ############################################################
+    # End __find_child_processes
+    ############################################################
+
+    def is_port_bound(self, port):
+        return self.__is_port_bound(port)
+
+    ############################################################
+    # __is_port_bound
+    # Check if the requested port is available. If it
+    # isn't available, then a previous test probably didn't
+    # shutdown properly.
+    ############################################################
+    def __is_port_bound(self, port):
+        port = int(port)
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        try:
+            # Try to bind to all IP addresses, this port
+            s.bind(("", port))
+            # If we get here, we were able to bind successfully,
+            # which means the port is free.
+        except socket.error:
+            # If we get an exception, it might be because the port is still bound
+            # which would be bad, or maybe it is a privileged port (<1024) and we
+            # are not running as root, or maybe the server is gone, but sockets are
+            # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
+            # connect.
+            try:
+                s.connect(("127.0.0.1", port))
+                # If we get here, we were able to connect to something, which means
+                # that the port is still bound.
+                return True
+            except socket.error:
+                # An exception means that we couldn't connect, so a server probably
+                # isn't still running on the port.
+                pass
+        finally:
+            s.close()
+
+        return False
+
+    ############################################################
+    # End __is_port_bound
+    ############################################################
+
+    ############################################################
+    # __parse_results
+    # Ensures that the system has all necessary software to run
+    # the tests. This does not include that software for the individual
+    # test, but covers software such as curl and weighttp that
+    # are needed.
+    ############################################################
+    def __parse_results(self, tests):
+        # Run the method to get the commmit count of each framework.
+        self.__count_commits()
+        # Call the method which counts the sloc for each framework
+        self.__count_sloc()
+
+        # Time to create parsed files
+        # Aggregate JSON file
+        with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
+            f.write(json.dumps(self.results, indent=2))
+
+    ############################################################
+    # End __parse_results
+    ############################################################
+
+
+    #############################################################
+    # __count_sloc
+    #############################################################
+    def __count_sloc(self):
+        frameworks = gather_frameworks(include=self.test,
+                                       exclude=self.exclude, benchmarker=self)
+
+        jsonResult = {}
+        for framework, testlist in frameworks.iteritems():
+            if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
+                logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
+                continue
+
+            # Unfortunately the source_code files use lines like
+            # ./cpoll_cppsp/www/fortune_old instead of
+            # ./www/fortune_old
+            # so we have to back our working dir up one level
+            wd = os.path.dirname(testlist[0].directory)
+
+            try:
+                command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
+
+                if os.path.exists(os.path.join(testlist[0].directory, "cloc_defs.txt")):
+                    command += " --read-lang-def %s" % os.path.join(testlist[0].directory, "cloc_defs.txt")
+                    logging.info("Using custom cloc definitions for %s", framework)
+
+                # Find the last instance of the word 'code' in the yaml output. This should
+                # be the line count for the sum of all listed files or just the line count
+                # for the last file in the case where there's only one file listed.
+                command = command + "| grep code | tail -1 | cut -d: -f 2"
+                logging.debug("Running \"%s\" (cwd=%s)", command, wd)
+                lineCount = subprocess.check_output(command, cwd=wd, shell=True)
+                jsonResult[framework] = int(lineCount)
+            except subprocess.CalledProcessError:
+                continue
+            except ValueError as ve:
+                logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
+        self.results['rawData']['slocCounts'] = jsonResult
+    ############################################################
+    # End __count_sloc
+    ############################################################
+
+    ############################################################
+    # __count_commits
+    #
+    ############################################################
+    def __count_commits(self):
+        frameworks = gather_frameworks(include=self.test,
+                                       exclude=self.exclude, benchmarker=self)
+
+        def count_commit(directory, jsonResult):
+            command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
+            try:
+                commitCount = subprocess.check_output(command, shell=True)
+                jsonResult[framework] = int(commitCount)
+            except subprocess.CalledProcessError:
+                pass
+
+        # Because git can be slow when run in large batches, this
+        # calls git up to 4 times in parallel. Normal improvement is ~3-4x
+        # in my trials, or ~100 seconds down to ~25
+        # This is safe to parallelize as long as each thread only
+        # accesses one key in the dictionary
+        threads = []
+        jsonResult = {}
+        t1 = datetime.now()
+        for framework, testlist in frameworks.iteritems():
+            directory = testlist[0].directory
+            t = threading.Thread(target=count_commit, args=(directory,jsonResult))
+            t.start()
+            threads.append(t)
+            # Git has internal locks, full parallel will just cause contention
+            # and slowness, so we rate-limit a bit
+            if len(threads) >= 4:
+                threads[0].join()
+                threads.remove(threads[0])
+
+        # Wait for remaining threads
+        for t in threads:
+            t.join()
+        t2 = datetime.now()
+        # print "Took %s seconds " % (t2 - t1).seconds
+
+        self.results['rawData']['commitCounts'] = jsonResult
+        self.commits = jsonResult
+    ############################################################
+    # End __count_commits
+    ############################################################
+
+    ############################################################
+    # __write_intermediate_results
+    ############################################################
+    def __write_intermediate_results(self,test_name,status_message):
+        try:
+            self.results["completed"][test_name] = status_message
+            with open(os.path.join(self.full_results_directory(), 'results.json'), 'w') as f:
+                f.write(json.dumps(self.results, indent=2))
+        except (IOError):
+            logging.error("Error writing results.json")
 
-        ##########################
-        # Verify URLs
-        ##########################
-        logging.info("Verifying framework URLs")
-        passed_verify = test.verify_urls(logDir)
+    ############################################################
+    # End __write_intermediate_results
+    ############################################################
 
-        ##########################
-        # Nuke /tmp
-        ##########################
+    def __load_results(self):
         try:
-          subprocess.check_call('sudo rm -rf /tmp/*', shell=True, stderr=out, stdout=out)
-        except Exception:
-          out.write(header("Error: Could not empty /tmp"))
+            with open(os.path.join(self.full_results_directory(), 'results.json')) as f:
+                self.results = json.load(f)
+        except (ValueError, IOError):
+            pass
+
+    ############################################################
+    # __finish
+    ############################################################
+    def __finish(self):
+        if not self.list_tests and not self.parse:
+            tests = self.__gather_tests
+            # Normally you don't have to use Fore.BLUE before each line, but
+            # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
+            # or stream flush, so we have to ensure that the color code is printed repeatedly
+            prefix = Fore.CYAN
+            for line in header("Verification Summary", top='=', bottom='').split('\n'):
+                print prefix + line
+            for test in tests:
+                print prefix + "| Test: %s" % test.name
+                if test.name in self.results['verify'].keys():
+                    for test_type, result in self.results['verify'][test.name].iteritems():
+                        if result.upper() == "PASS":
+                            color = Fore.GREEN
+                        elif result.upper() == "WARN":
+                            color = Fore.YELLOW
+                        else:
+                            color = Fore.RED
+                        print prefix + "|       " + test_type.ljust(11) + ' : ' + color + result.upper()
+                else:
+                    print prefix + "|      " + Fore.RED + "NO RESULTS (Did framework launch?)"
+            print prefix + header('', top='', bottom='=') + Style.RESET_ALL
+
+        print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
+        print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
+
+    ############################################################
+    # End __finish
+    ############################################################
+
+    ##########################################################################################
+    # Constructor
+    ##########################################################################################
+
+    ############################################################
+    # Initialize the benchmarker. The args are the arguments
+    # parsed via argparser.
+    ############################################################
+    def __init__(self, args):
+
+        # Map type strings to their objects
+        types = dict()
+        types['json'] = JsonTestType()
+        types['db'] = DBTestType()
+        types['query'] = QueryTestType()
+        types['fortune'] = FortuneTestType()
+        types['update'] = UpdateTestType()
+        types['plaintext'] = PlaintextTestType()
+
+        # Turn type into a map instead of a string
+        if args['type'] == 'all':
+            args['types'] = types
+        else:
+            args['types'] = { args['type'] : types[args['type']] }
+        del args['type']
 
-        ##########################
-        # Benchmark this test
-        ##########################
-        if self.mode == "benchmark":
-          logging.info("Benchmarking")
-          out.write(header("Benchmarking %s" % test.name))
-          out.flush()
-          test.benchmark(logDir)
 
-        ##########################
-        # Stop this test
-        ##########################
-        out.write(header("Stopping %s" % test.name))
-        out.flush()
-        self.__stop_test(out, process)
-        out.flush()
-        time.sleep(5)
-
-        if self.__is_port_bound(test.port):
-          # This can happen sometimes - let's try again
-          self.__stop_test(out, process)
-          out.flush()
-          time.sleep(5)
-          if self.__is_port_bound(test.port):
-            # We gave it our all
-            self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
-            out.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
-            out.flush()
-            return exit_with_code(1)
-
-        out.write(header("Stopped %s" % test.name))
-        out.flush()
-
-        ##########################################################
-        # Remove contents of  /tmp folder
-        ##########################################################
-        if self.clear_tmp:
-          try:
-            filelist = [ f for f in os.listdir("/tmp") ]
-            for f in filelist:
-              try:
-                os.remove("/tmp/" + f)
-              except OSError as err:
-                print "Failed to remove " + str(f) + " from /tmp directory: " + str(err)
-          except OSError:
-            print "Failed to remove contents of /tmp directory."
-
-        ##########################################################
-        # Save results thus far into the latest results directory
-        ##########################################################
-
-        out.write(header("Saving results through %s" % test.name))
-        out.flush()
-        self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
-
-        if self.mode == "verify" and not passed_verify:
-          print "Failed verify!"
-          return exit_with_code(1)
-      except (OSError, IOError, subprocess.CalledProcessError) as e:
-        self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
-        out.write(header("Subprocess Error %s" % test.name))
-        traceback.print_exc(file=out)
-        out.flush()
+        args['max_threads'] = args['threads']
+        args['max_concurrency'] = max(args['concurrency_levels'])
+
+        self.__dict__.update(args)
+        # pprint(self.__dict__)
+
+        self.start_time = time.time()
+        self.run_test_timeout_seconds = 7200
+
+        # setup logging
+        logging.basicConfig(stream=sys.stderr, level=logging.INFO)
+
+        # setup some additional variables
+        if self.database_user == None: self.database_user = self.client_user
+        if self.database_host == None: self.database_host = self.client_host
+        if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
+
+        # Remember root directory
+        self.fwroot = setup_util.get_fwroot()
+
+        # setup current_benchmark.txt location
+        self.current_benchmark = "/tmp/current_benchmark.txt"
+
+        if hasattr(self, 'parse') and self.parse != None:
+            self.timestamp = self.parse
+        else:
+            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
+
+        # setup results and latest_results directories
+        self.result_directory = os.path.join(self.fwroot, "results")
+        if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
+            shutil.rmtree(os.path.join(self.fwroot, "results"))
+
+        # remove installs directories if --clean-all provided
+        self.install_root = "%s/%s" % (self.fwroot, "installs")
+        if args['clean_all']:
+            os.system("sudo rm -rf " + self.install_root)
+            os.mkdir(self.install_root)
+
+        self.results = None
         try:
-          self.__stop_test(out, process)
-        except (subprocess.CalledProcessError) as e:
-          self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
-          out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
-          traceback.print_exc(file=out)
-          out.flush()
-        out.close()
-        return exit_with_code(1)
-      # TODO - subprocess should not catch this exception!
-      # Parent process should catch it and cleanup/exit
-      except (KeyboardInterrupt) as e:
-        self.__stop_test(out, process)
-        out.write(header("Cleaning up..."))
-        out.flush()
-        self.__finish()
-        sys.exit(1)
-
-      out.close()
-      return exit_with_code(0)
-
-  ############################################################
-  # End __run_tests
-  ############################################################
-
-  ############################################################
-  # __stop_test(benchmarker)
-  # Stops all running tests
-  ############################################################
-  def __stop_test(self, out, process):
-    if process is not None and process.poll() is None:
-      # Stop 
-      pids = self.__find_child_processes(process.pid)
-      if pids is not None:
-        stop = ['kill', '-STOP'] + pids
-        subprocess.call(stop, stderr=out, stdout=out)
-      pids = self.__find_child_processes(process.pid)
-      if pids is not None:
-        term = ['kill', '-TERM'] + pids
-        subprocess.call(term, stderr=out, stdout=out)
-      # Okay, if there are any more PIDs, kill them harder
-      pids = self.__find_child_processes(process.pid)
-      if pids is not None:
-        kill = ['kill', '-KILL'] + pids
-        subprocess.call(kill, stderr=out, stdout=out)
-      process.terminate()
-  ############################################################
-  # End __stop_test
-  ############################################################
-
-  ############################################################
-  # __find_child_processes
-  # Recursively finds all child processes for the given PID.
-  ############################################################
-  def __find_child_processes(self, pid):
-    toRet = []
-    try:
-      pids = subprocess.check_output(['pgrep','-P',str(pid)]).split()
-      toRet.extend(pids)
-      for aPid in pids:
-        toRet.extend(self.__find_child_processes(aPid))
-    except:
-      # pgrep will return a non-zero status code if there are no
-      # processes who have a PPID of PID.
-      pass
-
-    return toRet
-  ############################################################
-  # End __find_child_processes
-  ############################################################
-
-  def is_port_bound(self, port):
-    return self.__is_port_bound(port)
-
-  ############################################################
-  # __is_port_bound
-  # Check if the requested port is available. If it
-  # isn't available, then a previous test probably didn't
-  # shutdown properly.
-  ############################################################
-  def __is_port_bound(self, port):
-    port = int(port)
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    try:
-      # Try to bind to all IP addresses, this port
-      s.bind(("", port))
-      # If we get here, we were able to bind successfully,
-      # which means the port is free.
-    except socket.error:
-      # If we get an exception, it might be because the port is still bound
-      # which would be bad, or maybe it is a privileged port (<1024) and we
-      # are not running as root, or maybe the server is gone, but sockets are
-      # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
-      # connect.
-      try:
-        s.connect(("127.0.0.1", port))
-        # If we get here, we were able to connect to something, which means
-        # that the port is still bound.
-        return True
-      except socket.error:
-        # An exception means that we couldn't connect, so a server probably
-        # isn't still running on the port.
-        pass
-    finally:
-      s.close()
-
-    return False
-
-  ############################################################
-  # End __is_port_bound
-  ############################################################
-
-  ############################################################
-  # __parse_results
-  # Ensures that the system has all necessary software to run
-  # the tests. This does not include that software for the individual
-  # test, but covers software such as curl and weighttp that
-  # are needed.
-  ############################################################
-  def __parse_results(self, tests):
-    # Run the method to get the commmit count of each framework.
-    self.__count_commits()
-    # Call the method which counts the sloc for each framework
-    self.__count_sloc()
-
-    # Time to create parsed files
-    # Aggregate JSON file
-    with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
-      f.write(json.dumps(self.results, indent=2))
-
-  ############################################################
-  # End __parse_results
-  ############################################################
-
-
-  #############################################################
-  # __count_sloc
-  #############################################################
-  def __count_sloc(self):
-    frameworks = gather_frameworks(include=self.test,
-      exclude=self.exclude, benchmarker=self)
-
-    jsonResult = {}
-    for framework, testlist in frameworks.iteritems():
-      if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
-        logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
-        continue
-
-      # Unfortunately the source_code files use lines like
-      # ./cpoll_cppsp/www/fortune_old instead of
-      # ./www/fortune_old
-      # so we have to back our working dir up one level
-      wd = os.path.dirname(testlist[0].directory)
-
-      try:
-        command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
-
-        if os.path.exists(os.path.join(testlist[0].directory, "cloc_defs.txt")):
-          command += " --read-lang-def %s" % os.path.join(testlist[0].directory, "cloc_defs.txt")
-          logging.info("Using custom cloc definitions for %s", framework)
-
-        # Find the last instance of the word 'code' in the yaml output. This should
-        # be the line count for the sum of all listed files or just the line count
-        # for the last file in the case where there's only one file listed.
-        command = command + "| grep code | tail -1 | cut -d: -f 2"
-        logging.debug("Running \"%s\" (cwd=%s)", command, wd)
-        lineCount = subprocess.check_output(command, cwd=wd, shell=True)
-        jsonResult[framework] = int(lineCount)
-      except subprocess.CalledProcessError:
-        continue
-      except ValueError as ve:
-        logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
-    self.results['rawData']['slocCounts'] = jsonResult
-  ############################################################
-  # End __count_sloc
-  ############################################################
-
-  ############################################################
-  # __count_commits
-  #
-  ############################################################
-  def __count_commits(self):
-    frameworks = gather_frameworks(include=self.test,
-      exclude=self.exclude, benchmarker=self)
-
-    def count_commit(directory, jsonResult):
-      command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
-      try:
-        commitCount = subprocess.check_output(command, shell=True)
-        jsonResult[framework] = int(commitCount)
-      except subprocess.CalledProcessError:
-        pass
-
-    # Because git can be slow when run in large batches, this
-    # calls git up to 4 times in parallel. Normal improvement is ~3-4x
-    # in my trials, or ~100 seconds down to ~25
-    # This is safe to parallelize as long as each thread only
-    # accesses one key in the dictionary
-    threads = []
-    jsonResult = {}
-    t1 = datetime.now()
-    for framework, testlist in frameworks.iteritems():
-      directory = testlist[0].directory
-      t = threading.Thread(target=count_commit, args=(directory,jsonResult))
-      t.start()
-      threads.append(t)
-      # Git has internal locks, full parallel will just cause contention
-      # and slowness, so we rate-limit a bit
-      if len(threads) >= 4:
-        threads[0].join()
-        threads.remove(threads[0])
-
-    # Wait for remaining threads
-    for t in threads:
-      t.join()
-    t2 = datetime.now()
-    # print "Took %s seconds " % (t2 - t1).seconds
-
-    self.results['rawData']['commitCounts'] = jsonResult
-    self.commits = jsonResult
-  ############################################################
-  # End __count_commits
-  ############################################################
-
-  ############################################################
-  # __write_intermediate_results
-  ############################################################
-  def __write_intermediate_results(self,test_name,status_message):
-    try:
-      self.results["completed"][test_name] = status_message
-      with open(os.path.join(self.full_results_directory(), 'results.json'), 'w') as f:
-        f.write(json.dumps(self.results, indent=2))
-    except (IOError):
-      logging.error("Error writing results.json")
-
-  ############################################################
-  # End __write_intermediate_results
-  ############################################################
-
-  def __load_results(self):
-    try:
-      with open(os.path.join(self.full_results_directory(), 'results.json')) as f:
-        self.results = json.load(f)
-    except (ValueError, IOError):
-      pass
-
-  ############################################################
-  # __finish
-  ############################################################
-  def __finish(self):
-    if not self.list_tests and not self.list_test_metadata and not self.parse:
-      tests = self.__gather_tests
-      # Normally you don't have to use Fore.BLUE before each line, but
-      # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
-      # or stream flush, so we have to ensure that the color code is printed repeatedly
-      prefix = Fore.CYAN
-      for line in header("Verification Summary", top='=', bottom='').split('\n'):
-        print prefix + line
-      for test in tests:
-        print prefix + "| Test: %s" % test.name
-        if test.name in self.results['verify'].keys():
-          for test_type, result in self.results['verify'][test.name].iteritems():
-            if result.upper() == "PASS":
-              color = Fore.GREEN
-            elif result.upper() == "WARN":
-              color = Fore.YELLOW
-            else:
-              color = Fore.RED
-            print prefix + "|       " + test_type.ljust(11) + ' : ' + color + result.upper()
+            with open(os.path.join(self.full_results_directory(), 'results.json'), 'r') as f:
+                #Load json file into results object
+                self.results = json.load(f)
+        except IOError:
+            logging.warn("results.json for test not found.")
+
+        if self.results == None:
+            self.results = dict()
+            self.results['concurrencyLevels'] = self.concurrency_levels
+            self.results['queryIntervals'] = self.query_levels
+            self.results['frameworks'] = [t.name for t in self.__gather_tests]
+            self.results['duration'] = self.duration
+            self.results['rawData'] = dict()
+            self.results['rawData']['json'] = dict()
+            self.results['rawData']['db'] = dict()
+            self.results['rawData']['query'] = dict()
+            self.results['rawData']['fortune'] = dict()
+            self.results['rawData']['update'] = dict()
+            self.results['rawData']['plaintext'] = dict()
+            self.results['completed'] = dict()
+            self.results['succeeded'] = dict()
+            self.results['succeeded']['json'] = []
+            self.results['succeeded']['db'] = []
+            self.results['succeeded']['query'] = []
+            self.results['succeeded']['fortune'] = []
+            self.results['succeeded']['update'] = []
+            self.results['succeeded']['plaintext'] = []
+            self.results['failed'] = dict()
+            self.results['failed']['json'] = []
+            self.results['failed']['db'] = []
+            self.results['failed']['query'] = []
+            self.results['failed']['fortune'] = []
+            self.results['failed']['update'] = []
+            self.results['failed']['plaintext'] = []
+            self.results['verify'] = dict()
         else:
-          print prefix + "|      " + Fore.RED + "NO RESULTS (Did framework launch?)"
-      print prefix + header('', top='', bottom='=') + Style.RESET_ALL
-
-    print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
-    print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
-
-  ############################################################
-  # End __finish
-  ############################################################
-
-  ##########################################################################################
-  # Constructor
-  ##########################################################################################
-
-  ############################################################
-  # Initialize the benchmarker. The args are the arguments
-  # parsed via argparser.
-  ############################################################
-  def __init__(self, args):
-
-    # Map type strings to their objects
-    types = dict()
-    types['json'] = JsonTestType()
-    types['db'] = DBTestType()
-    types['query'] = QueryTestType()
-    types['fortune'] = FortuneTestType()
-    types['update'] = UpdateTestType()
-    types['plaintext'] = PlaintextTestType()
-
-    # Turn type into a map instead of a string
-    if args['type'] == 'all':
-        args['types'] = types
-    else:
-        args['types'] = { args['type'] : types[args['type']] }
-    del args['type']
-
-
-    args['max_threads'] = args['threads']
-    args['max_concurrency'] = max(args['concurrency_levels'])
-
-    self.__dict__.update(args)
-    # pprint(self.__dict__)
-
-    self.start_time = time.time()
-    self.run_test_timeout_seconds = 7200
-
-    # setup logging
-    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
-
-    # setup some additional variables
-    if self.database_user == None: self.database_user = self.client_user
-    if self.database_host == None: self.database_host = self.client_host
-    if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
-
-    # Remember root directory
-    self.fwroot = setup_util.get_fwroot()
-
-    # setup current_benchmark.txt location
-    self.current_benchmark = "/tmp/current_benchmark.txt"
-
-    if hasattr(self, 'parse') and self.parse != None:
-      self.timestamp = self.parse
-    else:
-      self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
-
-    # setup results and latest_results directories
-    self.result_directory = os.path.join(self.fwroot, "results")
-    if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
-        shutil.rmtree(os.path.join(self.fwroot, "results"))
-
-    # remove installs directories if --clean-all provided
-    self.install_root = "%s/%s" % (self.fwroot, "installs")
-    if args['clean_all']:
-        os.system("sudo rm -rf " + self.install_root)
-        os.mkdir(self.install_root)
-
-    self.results = None
-    try:
-      with open(os.path.join(self.full_results_directory(), 'results.json'), 'r') as f:
-        #Load json file into results object
-        self.results = json.load(f)
-    except IOError:
-      logging.warn("results.json for test not found.")
-
-    if self.results == None:
-      self.results = dict()
-      self.results['concurrencyLevels'] = self.concurrency_levels
-      self.results['queryIntervals'] = self.query_levels
-      self.results['frameworks'] = [t.name for t in self.__gather_tests]
-      self.results['duration'] = self.duration
-      self.results['rawData'] = dict()
-      self.results['rawData']['json'] = dict()
-      self.results['rawData']['db'] = dict()
-      self.results['rawData']['query'] = dict()
-      self.results['rawData']['fortune'] = dict()
-      self.results['rawData']['update'] = dict()
-      self.results['rawData']['plaintext'] = dict()
-      self.results['completed'] = dict()
-      self.results['succeeded'] = dict()
-      self.results['succeeded']['json'] = []
-      self.results['succeeded']['db'] = []
-      self.results['succeeded']['query'] = []
-      self.results['succeeded']['fortune'] = []
-      self.results['succeeded']['update'] = []
-      self.results['succeeded']['plaintext'] = []
-      self.results['failed'] = dict()
-      self.results['failed']['json'] = []
-      self.results['failed']['db'] = []
-      self.results['failed']['query'] = []
-      self.results['failed']['fortune'] = []
-      self.results['failed']['update'] = []
-      self.results['failed']['plaintext'] = []
-      self.results['verify'] = dict()
-    else:
-      #for x in self.__gather_tests():
-      #  if x.name not in self.results['frameworks']:
-      #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
-      # Always overwrite framework list
-      self.results['frameworks'] = [t.name for t in self.__gather_tests]
-
-    # Setup the ssh command string
-    self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
-    self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
-    if self.database_identity_file != None:
-      self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
-    if self.client_identity_file != None:
-      self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
-
-  ############################################################
-  # End __init__
-  ############################################################
+            #for x in self.__gather_tests():
+            #  if x.name not in self.results['frameworks']:
+            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
+            # Always overwrite framework list
+            self.results['frameworks'] = [t.name for t in self.__gather_tests]
+
+        # Setup the ssh command string
+        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
+        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
+        if self.database_identity_file != None:
+            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
+        if self.client_identity_file != None:
+            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
+
+            ############################################################
+            # End __init__
+            ############################################################

+ 149 - 149
toolset/benchmark/utils.py

@@ -7,125 +7,125 @@ import socket
 from ast import literal_eval
 
 def gather_tests(include = [], exclude=[], benchmarker=None):
-  '''
-  Given test names as strings, returns a list of FrameworkTest objects. 
-  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
-  variables for checking the test directory, the test database os, and 
-  other useful items. 
-
-  With no arguments, every test in this framework will be returned.  
-  With include, only tests with this exact name will be returned. 
-  With exclude, all tests but those excluded will be returned. 
-
-  A benchmarker is needed to construct full FrameworkTest objects. If
-  one is not provided, a default Benchmarker will be created. 
-  '''
-
-  # Avoid setting up a circular import
-  from benchmark import framework_test
-  from benchmark.benchmarker import Benchmarker
-  from setup.linux import setup_util
-
-  # Help callers out a bit
-  if include is None:
-    include = []
-  if exclude is None:
-    exclude = []
-
-  # Old, hacky method to exclude all tests was to 
-  # request a test known to not exist, such as ''. 
-  # If test '' was requested, short-circuit and return 
-  # nothing immediately
-  if len(include) == 1 and '' in include:
-    return []
-    
-  # Setup default Benchmarker using example configuration
-  if benchmarker is None:
-    print "Creating Benchmarker from benchmark.cfg.example"
-    default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
-    config = ConfigParser.SafeConfigParser()
-    config.readfp(open(default_config))
-    defaults = dict(config.items("Defaults"))
-    
-    # Convert strings into proper python types
-    for k,v in defaults.iteritems():
-      try:
-        defaults[k] = literal_eval(v)
-      except Exception:
-        pass
-
-    # Ensure we only run the __init__ method of Benchmarker
-    defaults['install'] = None
-    
-    benchmarker = Benchmarker(defaults)
-
-  
-  # Search for configuration files
-  fwroot = setup_util.get_fwroot()
-  config_files = []
-  if benchmarker.test_dir:
-    for test_dir in benchmarker.test_dir:
-        dir_config_files = glob.glob("{!s}/frameworks/{!s}/benchmark_config.json".format(fwroot, test_dir))
-        if len(dir_config_files):
-            config_files.extend(dir_config_files)
-        else:
-            raise Exception("Unable to locate tests in test-dir: {!s}".format(test_dir))
-  else:
-    config_files.extend(glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))
-  
-  tests = []
-  for config_file_name in config_files:
-    config = None
-    with open(config_file_name, 'r') as config_file:
-      try:
-        config = json.load(config_file)
-      except ValueError:
-        # User-friendly errors
-        print("Error loading '%s'." % config_file_name)
-        raise
-
-    # Find all tests in the config file
-    config_tests = framework_test.parse_config(config, 
-      os.path.dirname(config_file_name), benchmarker)
-        
-    # Filter
-    for test in config_tests:
-      if len(include) is 0 and len(exclude) is 0:
-        # No filters, we are running everything
-        tests.append(test)
-      elif test.name in exclude:
-        continue
-      elif test.name in include:
-        tests.append(test)
-      else: 
-        # An include list exists, but this test is 
-        # not listed there, so we ignore it
-        pass
-
-  # Ensure we were able to locate everything that was 
-  # explicitly included 
-  if 0 != len(include):
-    names = {test.name for test in tests}
-    if 0 != len(set(include) - set(names)):
-      missing = list(set(include) - set(names))
-      raise Exception("Unable to locate tests %s" % missing)
-
-  tests.sort(key=lambda x: x.name)
-  return tests
+    '''
+    Given test names as strings, returns a list of FrameworkTest objects.
+    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
+    variables for checking the test directory, the test database os, and
+    other useful items.
+
+    With no arguments, every test in this framework will be returned.
+    With include, only tests with this exact name will be returned.
+    With exclude, all tests but those excluded will be returned.
+
+    A benchmarker is needed to construct full FrameworkTest objects. If
+    one is not provided, a default Benchmarker will be created.
+    '''
+
+    # Avoid setting up a circular import
+    from benchmark import framework_test
+    from benchmark.benchmarker import Benchmarker
+    from setup.linux import setup_util
+
+    # Help callers out a bit
+    if include is None:
+        include = []
+    if exclude is None:
+        exclude = []
+
+    # Old, hacky method to exclude all tests was to
+    # request a test known to not exist, such as ''.
+    # If test '' was requested, short-circuit and return
+    # nothing immediately
+    if len(include) == 1 and '' in include:
+        return []
+
+    # Setup default Benchmarker using example configuration
+    if benchmarker is None:
+        print "Creating Benchmarker from benchmark.cfg"
+        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
+        config = ConfigParser.SafeConfigParser()
+        config.readfp(open(default_config))
+        defaults = dict(config.items("Defaults"))
+
+        # Convert strings into proper python types
+        for k,v in defaults.iteritems():
+            try:
+                defaults[k] = literal_eval(v)
+            except Exception:
+                pass
+
+        # Ensure we only run the __init__ method of Benchmarker
+        defaults['install'] = None
+
+        benchmarker = Benchmarker(defaults)
+
+
+    # Search for configuration files
+    fwroot = setup_util.get_fwroot()
+    config_files = []
+    if benchmarker.test_dir:
+        for test_dir in benchmarker.test_dir:
+            dir_config_files = glob.glob("{!s}/frameworks/{!s}/benchmark_config.json".format(fwroot, test_dir))
+            if len(dir_config_files):
+                config_files.extend(dir_config_files)
+            else:
+                raise Exception("Unable to locate tests in test-dir: {!s}".format(test_dir))
+    else:
+        config_files.extend(glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))
+
+    tests = []
+    for config_file_name in config_files:
+        config = None
+        with open(config_file_name, 'r') as config_file:
+            try:
+                config = json.load(config_file)
+            except ValueError:
+                # User-friendly errors
+                print("Error loading '%s'." % config_file_name)
+                raise
+
+        # Find all tests in the config file
+        config_tests = framework_test.parse_config(config,
+                                                   os.path.dirname(config_file_name), benchmarker)
+
+        # Filter
+        for test in config_tests:
+            if len(include) is 0 and len(exclude) is 0:
+                # No filters, we are running everything
+                tests.append(test)
+            elif test.name in exclude:
+                continue
+            elif test.name in include:
+                tests.append(test)
+            else:
+                # An include list exists, but this test is
+                # not listed there, so we ignore it
+                pass
+
+    # Ensure we were able to locate everything that was
+    # explicitly included
+    if 0 != len(include):
+        names = {test.name for test in tests}
+        if 0 != len(set(include) - set(names)):
+            missing = list(set(include) - set(names))
+            raise Exception("Unable to locate tests %s" % missing)
+
+    tests.sort(key=lambda x: x.name)
+    return tests
 
 def gather_frameworks(include = [], exclude=[], benchmarker=None):
-  '''Return a dictionary mapping frameworks->[test1,test2,test3]
-  for quickly grabbing all tests in a grouped manner. 
-  Args have the same meaning as gather_tests'''
+    '''Return a dictionary mapping frameworks->[test1,test2,test3]
+    for quickly grabbing all tests in a grouped manner.
+    Args have the same meaning as gather_tests'''
 
-  tests = gather_tests(include, exclude, benchmarker)
-  frameworks = dict()
+    tests = gather_tests(include, exclude, benchmarker)
+    frameworks = dict()
 
-  for test in tests:
-    if test.framework not in frameworks: 
-      frameworks[test.framework] = []
-    frameworks[test.framework].append(test)
-  return frameworks
+    for test in tests:
+        if test.framework not in frameworks:
+            frameworks[test.framework] = []
+        frameworks[test.framework].append(test)
+    return frameworks
 
 def header(message, top='-', bottom='-'):
     '''
@@ -135,45 +135,45 @@ def header(message, top='-', bottom='-'):
     bottomheader = (bottom * 80)[:80]
     result = ""
     if topheader != "":
-      result += "%s" % topheader
+        result += "%s" % topheader
     if message != "":
-      if result == "":
-        result = "  %s" % message
-      else:
-        result += "\n  %s" % message
+        if result == "":
+            result = "  %s" % message
+        else:
+            result += "\n  %s" % message
     if bottomheader != "":
-      if result == "":
-        result = "%s" % bottomheader
-      else:
-        result += "\n%s" % bottomheader
+        if result == "":
+            result = "%s" % bottomheader
+        else:
+            result += "\n%s" % bottomheader
     return result + '\n'
 
 def check_services(services):
 
-  def check_service(address, port):
-    try:
-      s = socket.socket()
-      s.settimeout(20)
-      s.connect((address, port))
-      return (True, "")
-    except Exception as ex:
-      return (False, ex)
-    finally:
-      s.close
-
-  res = []
-  for s in services:
-    r = check_service(s[1], s[2])
-    res.append((s[0], r[0], str(r[1])))
-  return res
+    def check_service(address, port):
+        try:
+            s = socket.socket()
+            s.settimeout(20)
+            s.connect((address, port))
+            return (True, "")
+        except Exception as ex:
+            return (False, ex)
+        finally:
+            s.close
+
+    res = []
+    for s in services:
+        r = check_service(s[1], s[2])
+        res.append((s[0], r[0], str(r[1])))
+    return res
 
 def verify_database_connections(services):
-  allGo = True
-  messages = []
-  for r in check_services(services):
-    if r[1]:
-      messages.append(r[0] + ": is GO!")
-    else:
-      messages.append(r[0] + ": is _NO_ GO!: ERROR: " + r[2])
-      allGo = False
-  return (allGo, messages)
+    allGo = True
+    messages = []
+    for r in check_services(services):
+        if r[1]:
+            messages.append(r[0] + ": is GO!")
+        else:
+            messages.append(r[0] + ": is _NO_ GO!: ERROR: " + r[2])
+            allGo = False
+    return (allGo, messages)

+ 56 - 83
toolset/run-tests.py

@@ -5,10 +5,6 @@ import sys
 import os
 import platform
 import multiprocessing
-import itertools
-import copy
-import subprocess
-from pprint import pprint 
 from benchmark.benchmarker import Benchmarker
 from setup.linux.unbuffered import Unbuffered
 from setup.linux import setup_util
@@ -19,31 +15,31 @@ from colorama import init
 init()
 
 class StoreSeqAction(argparse.Action):
-  '''Helper class for parsing a sequence from the command line'''
-  def __init__(self, option_strings, dest, nargs=None, **kwargs):
-     super(StoreSeqAction, self).__init__(option_strings, dest, type=str, **kwargs)
-  def __call__(self, parser, namespace, values, option_string=None):
-    setattr(namespace, self.dest, self.parse_seq(values))
-  def parse_seq(self, argument):
-    result = argument.split(',')
-    sequences = [x for x in result if ":" in x]
-    for sequence in sequences:
-      try:
-        (start,step,end) = sequence.split(':')
-      except ValueError: 
-        print "  Invalid: %s" % sequence
-        print "  Requires start:step:end, e.g. 1:2:10"
-        raise
-      result.remove(sequence)
-      result = result + range(int(start), int(end), int(step))
-    return [abs(int(item)) for item in result]
+    '''Helper class for parsing a sequence from the command line'''
+    def __init__(self, option_strings, dest, nargs=None, **kwargs):
+        super(StoreSeqAction, self).__init__(option_strings, dest, type=str, **kwargs)
+    def __call__(self, parser, namespace, values, option_string=None):
+        setattr(namespace, self.dest, self.parse_seq(values))
+    def parse_seq(self, argument):
+        result = argument.split(',')
+        sequences = [x for x in result if ":" in x]
+        for sequence in sequences:
+            try:
+                (start,step,end) = sequence.split(':')
+            except ValueError:
+                print("  Invalid: {!s}".format(sequence))
+                print("  Requires start:step:end, e.g. 1:2:10")
+                raise
+            result.remove(sequence)
+            result = result + range(int(start), int(end), int(step))
+        return [abs(int(item)) for item in result]
 
 
 ###################################################################################################
 # Main
 ###################################################################################################
 def main(argv=None):
-    ''' Runs the program. There are three ways to pass arguments 
+    ''' Runs the program. There are three ways to pass arguments
     1) environment variables TFB_*
     2) configuration file benchmark.cfg
     3) command line flags
@@ -69,16 +65,21 @@ def main(argv=None):
     # 'Ubuntu', '14.04', 'trusty' respectively
     os.environ['TFB_DISTRIB_ID'], os.environ['TFB_DISTRIB_RELEASE'], os.environ['TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
 
-    print "FWROOT is %s"%os.environ['FWROOT']
+    print("FWROOT is {!s}.".format(os.environ['FWROOT']))
 
     conf_parser = argparse.ArgumentParser(
         description=__doc__,
         formatter_class=argparse.RawDescriptionHelpFormatter,
         add_help=False)
-    conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
+    conf_parser.add_argument(
+        '--conf_file', default='benchmark.cfg', metavar='FILE',
+        help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
     args, remaining_argv = conf_parser.parse_known_args()
 
     try:
+        if not os.path.exists(args.conf_file) and not os.path.exists('benchmark.cfg'):
+            print("No config file found. Falling back to benchmark.cfg.example defaults.")
+            args.conf_file = 'benchmark.cfg.example'
         with open (args.conf_file):
             config = ConfigParser.SafeConfigParser()
             config.read([os.getcwd() + '/' + args.conf_file])
@@ -90,21 +91,27 @@ def main(argv=None):
                 except Exception:
                     pass
     except IOError:
-        if args.conf_file != 'benchmark.cfg':
-            print 'Configuration file not found!'
-        defaults = { "client-host":"localhost"}
+        print("Configuration file not found!")
+        exit(1)
 
     ##########################################################
     # Set up default values
-    ##########################################################        
-    serverHost = os.environ.get('TFB_SERVER_HOST')
-    clientHost = os.environ.get('TFB_CLIENT_HOST')
-    clientUser = os.environ.get('TFB_CLIENT_USER')
-    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
-    runnerUser = os.environ.get('TFB_RUNNER_USER')
-    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
-    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
-    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
+    ##########################################################
+
+    # Verify and massage options
+    if defaults.client_user or defaults.client_host is None:
+        print("client_user and client_host are required!")
+        print("Please check your configuration file.")
+        print("Aborting!")
+        exit(1)
+
+    if defaults.database_user is None:
+        defaults.database_user = defaults.client_user
+    if defaults.database_host is None:
+        defaults.database_host = defaults.client_host
+    if defaults.server_host is None:
+        defaults.server_host = defaults.client_host
+
     maxThreads = 8
     try:
         maxThreads = multiprocessing.cpu_count()
@@ -115,47 +122,32 @@ def main(argv=None):
     # Set up argument parser
     ##########################################################
     parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
-        parents=[conf_parser],
-        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-        epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
-        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
-        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
-        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
+                                     parents=[conf_parser],
+                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+                                     epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
+        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
+        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
+        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
         0:1:5 creates [0, 1, 2, 3, 4]
         ''')
 
-    # SSH options
-    parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
-    parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
-    parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
-    parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
-                        help='The key to use for SSH to the client instance.')
-    parser.add_argument('-d', '--database-host', default=databaHost,
-                        help='The database server.  If not provided, defaults to the value of --client-host.')
-    parser.add_argument('--database-user', default=databaUser,
-                        help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
-    parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
-                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.') 
-    
     # Install options
     parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
     parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
 
     # Test options
     parser.add_argument('--test', nargs='+', help='names of tests to run')
-    parser.add_argument('--test-dir', nargs='+', dest='test_dir', help='name of framework directory containing all tests to run')
     parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
     parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
     parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
     parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
-    parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
     parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
-                        'this binary')
+                                                                                    'this binary')
     parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
 
     # Benchmark options
     parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
-    parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction) 
+    parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction)
     parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
     parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
@@ -167,36 +159,17 @@ def main(argv=None):
     parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
     args = parser.parse_args(remaining_argv)
 
-    # Verify and massage options
-    if args.client_user is None:
-      print 'Usernames (e.g. --client-user, and --database-user) are required!'
-      print 'The system will SSH into the client and the database for the install stage'
-      print 'Aborting'
-      exit(1)    
-
-    if args.database_user is None:
-      args.database_user = args.client_user
-
-    if args.database_host is None:
-      args.database_host = args.client_host
-
-    if args.verbose:
-        print 'Configuration options: '
-        pprint(vars(args))
-
     benchmarker = Benchmarker(vars(args))
 
     # Run the benchmarker in the specified mode
-    #   Do not use benchmarker variables for these checks, 
+    #   Do not use benchmarker variables for these checks,
     #   they are either str or bool based on the python version
     if args.list_tests:
-      benchmarker.run_list_tests()
-    elif args.list_test_metadata:
-      benchmarker.run_list_test_metadata()
+        benchmarker.run_list_tests()
     elif args.parse != None:
-      benchmarker.parse_timestamp()
+        benchmarker.parse_timestamp()
     else:
-      return benchmarker.run()
+        return benchmarker.run()
 
 if __name__ == "__main__":
     sys.exit(main())

+ 1 - 9
toolset/travis/travis_setup.sh

@@ -13,15 +13,7 @@ echo "NoHostAuthenticationForLocalhost yes" | tee -a /home/travis/.ssh/config
 chmod 600 ~/.ssh/config
 
 # Set up the benchmark.cfg for travis user
-echo "[Defaults]"                                       > benchmark.cfg
-echo "client_identity_file=/home/travis/.ssh/id_rsa"   >> benchmark.cfg
-echo "database_identity_file=/home/travis/.ssh/id_rsa" >> benchmark.cfg
-echo "client_host=127.0.0.1"                           >> benchmark.cfg
-echo "database_host=127.0.0.1"                         >> benchmark.cfg
-echo "server_host=127.0.0.1"                           >> benchmark.cfg
-echo "client_user=travis"                              >> benchmark.cfg
-echo "database_user=travis"                            >> benchmark.cfg
-echo "runner_user=travis"                              >> benchmark.cfg
+sed -i s/techempower/travis/g ./benchmark.cfg
 
 echo "travis ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers