Browse Source

Ensure certain messages are always printed to stdout

- Adds a default StreamHandler inside of __run_test that can
be used to send unformatted output to stdout. This basically
emulates 'print' while allowing the messages to also be
logged into the FileHandler
- Docs at top of FrameworkTest
- Removes some extra calls to basicConfig
- Refines WrapLogger to support all log levels
- Adds utils.py file to store usefulties
Hamilton Turner 11 years ago
parent
commit
f75720d5d2

+ 54 - 34
toolset/benchmark/benchmarker.py

@@ -2,6 +2,7 @@ from setup.linux.installer import Installer
 from setup.linux import setup_util
 
 from benchmark import framework_test
+from utils import WrapLogger
 
 import os
 import json
@@ -278,7 +279,7 @@ class Benchmarker:
   # report_results
   ############################################################
   def report_results(self, framework, test, results):
-    log.info("report_results: %s - %s" % (framework.name, test))
+    log.debug("report_results: %s - %s" % (framework.name, test))
     log.debug("report_results: %s" % results)
 
     if test not in self.results['rawData'].keys():
@@ -419,14 +420,18 @@ class Benchmarker:
     try:
       if os.name == 'nt':
         return True
-      subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
-      subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
-      subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
-      subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
-      subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
-      subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
-      subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
-      subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
+      (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
+      def check(command):
+        subprocess.check_call(command, shell=True, stdout=out, stderr=err)
+
+      check(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
+      check("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
+      check("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
+      check("sudo -s ulimit -n 65535".rsplit(" "))
+      check("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
+      check("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
+      check("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
+      check("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
     except subprocess.CalledProcessError:
       return False
   ############################################################
@@ -441,7 +446,8 @@ class Benchmarker:
   ############################################################
   def __setup_database(self):
     log.info("__setup_database")
-    p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
+    (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
+    p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=out, stderr=err)
     p.communicate("""
       sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
       sudo sysctl -w net.core.somaxconn=65535
@@ -463,7 +469,8 @@ class Benchmarker:
   ############################################################
   def __setup_client(self):
     log.info("__setup_client")
-    p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
+    (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
+    p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=out, stderr=err)
     p.communicate("""
       sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
       sudo sysctl -w net.core.somaxconn=65535
@@ -514,6 +521,7 @@ class Benchmarker:
             log.warn("Child process for {name} is still alive. Terminating.".format(name=test.name))
             self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
             test_process.terminate()
+          log.handlers = []  # Clean up handlers left by __run_test
     log.info("End __run_tests")
 
   ############################################################
@@ -542,6 +550,26 @@ class Benchmarker:
     logHandler.setLevel(logging.DEBUG)
     log.addHandler(logHandler)
 
+    # Ensure messages of level info will be sent to stdout
+    # This is a bit tricky, because we pass our logger to 
+    # framework_test, so this means anything logged to it 
+    # at INFO or greater is always shown in stdout. This is 
+    # good for some things (e.g. validate) and bad for others 
+    # (e.g. "in function foo"). Ensure you use logger vs log
+    # in framework test properly
+    rootStreamHandler = logging.getLogger().handlers[0]
+    if rootStreamHandler.level > logging.INFO:
+      #class FileFilter(logging.Filter):
+      #  def filter(self, record):
+      #    if record.filename == "benchmarker.py":
+      ##      return True
+      #    return False
+      streamHandler = logging.StreamHandler()
+      streamHandler.setFormatter(logging.Formatter("%(message)s"))
+      streamHandler.setLevel(logging.INFO)
+      # streamHandler.addFilter(FileFilter())
+      log.addHandler(streamHandler)
+
     if hasattr(test, 'skip') and test.skip.lower() == "true":
       log.info("Skipping %s: benchmark_config specifies to skip this test", test.name)
       return
@@ -570,19 +598,18 @@ class Benchmarker:
     log.info(textwrap.dedent("""
       =====================================================
         Beginning {name}
-      -----------------------------------------------------
-      """.format(name=test.name)))
+      -----------------------------------------------------""".format(name=test.name)))
 
     # Start this test
     log.info(textwrap.dedent("""
       -----------------------------------------------------
         Starting {name}
-      -----------------------------------------------------
-      """.format(name=test.name)))
+      -----------------------------------------------------""".format(name=test.name)))
 
     try:
       if test.requires_database():
-        p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
+        (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
+        p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=out, stderr=err)
         log.debug("Restarting database")
         p.communicate("""
           sudo restart mysql
@@ -596,8 +623,7 @@ class Benchmarker:
         log.error( textwrap.dedent("""
           ---------------------------------------------------------
             Error: Port {port} is not available, cannot start {name}
-          ---------------------------------------------------------
-          """.format(name=test.name, port=str(test.port))) )
+          ---------------------------------------------------------""".format(name=test.name, port=str(test.port))) )
         return
 
       result = test.start(log)
@@ -608,12 +634,11 @@ class Benchmarker:
         log.error(textwrap.dedent("""
           -----------------------------------------------------
             Stopped {name}
-          -----------------------------------------------------
-          """.format(name=test.name)) )
+          -----------------------------------------------------""".format(name=test.name)) )
         self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
         return
       
-      log.debug("Sleeping for %s" % self.sleep)
+      log.info("Sleeping for %s", self.sleep)
       time.sleep(self.sleep)
 
       # Verify URLs
@@ -624,16 +649,14 @@ class Benchmarker:
         log.info( textwrap.dedent("""
           -----------------------------------------------------
             Benchmarking {name} ...
-          -----------------------------------------------------
-          """.format(name=test.name)) )
+          -----------------------------------------------------""".format(name=test.name)) )
         test.benchmark(log)
 
       # Stop this test
       log.info( textwrap.dedent("""
         -----------------------------------------------------
           Stopping {name}
-        -----------------------------------------------------
-        """.format(name=test.name)) )
+        -----------------------------------------------------""".format(name=test.name)) )
       test.stop(log)
       time.sleep(5)
 
@@ -642,15 +665,14 @@ class Benchmarker:
         log.error( textwrap.dedent("""
           -----------------------------------------------------
             Error: Port {port} was not released by stop {name}
-          -----------------------------------------------------
-          """.format(name=test.name, port=str(test.port))) )
+          -----------------------------------------------------""".format(name=test.name, port=str(test.port))) )
+        log.handlers = []
         return
 
       log.info( textwrap.dedent("""
         -----------------------------------------------------
           Stopped {name}
-        -----------------------------------------------------
-        """.format(name=test.name)) )
+        -----------------------------------------------------""".format(name=test.name)) )
       time.sleep(5)
 
       ##########################################################
@@ -660,8 +682,7 @@ class Benchmarker:
       log.info( textwrap.dedent("""
         ----------------------------------------------------
         Saving results through {name}
-        ----------------------------------------------------
-        """.format(name=test.name)) )
+        ----------------------------------------------------""".format(name=test.name)) )
       self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
     except (OSError, IOError, subprocess.CalledProcessError) as e:
       self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
@@ -687,8 +708,7 @@ class Benchmarker:
       log.info( """
         -----------------------------------------------------
           Cleaning up....
-        -----------------------------------------------------
-        """)
+        -----------------------------------------------------""")
       self.__finish()
       sys.exit()
   ############################################################
@@ -814,7 +834,7 @@ class Benchmarker:
   # __write_intermediate_results
   ############################################################
   def __write_intermediate_results(self,test_name,status_message):
-    log.info("__write_intermediate_results: %s reports %s" % (test_name, status_message))
+    log.debug("__write_intermediate_results: %s reports %s" % (test_name, status_message))
     try:
       self.results["completed"][test_name] = status_message
       latest_results = os.path.join(self.latest_results_directory, 'results.json')

+ 122 - 155
toolset/benchmark/framework_test.py

@@ -15,9 +15,22 @@ import logging
 log = logging.getLogger('framework_test')
 
 class FrameworkTest:
-  ##########################################################################################
-  # Class variables
-  ##########################################################################################
+  """
+  Represents a framework test, including all types (JSON, plaintext, DB, etc)
+  defined in that test. Used by Benchmarker to start, verify, benchmark, and 
+  stop tests. Calls into the test's setup.py as needed. 
+
+  Note: Any method in this class called from Benchmarker#__run_test is run 
+        inside a thread
+  Note: Many methods have a parameter 'logger' passed in from Benchmarker. 
+        This uses python's logging module to support writing output to both a 
+        file and stdout concurrently. If you wish to print something to stdout,
+        regardless of the current global log level, use logger.info("Something"). 
+        If you wish to respect the current global log level, use the logger 
+        defined for this class e.g. log.info("Something else"). If you would 
+        like to use this 'logger' with subprocess, see class WrapLogger
+  """
+
   headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
   headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  
@@ -137,7 +150,7 @@ class FrameworkTest:
   # and a "randomNumber" key, and that both keys map to 
   # integers.
   ############################################################
-  def validateDb(self, jsonString, out, err):
+  def validateDb(self, jsonString, logger=log):
     try:
       obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
 
@@ -156,7 +169,7 @@ class FrameworkTest:
       pass
     return False
 
-  def validateDbStrict(self, jsonString, out, err):
+  def validateDbStrict(self, jsonString, logger=log):
     try:
       obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
 
@@ -177,7 +190,7 @@ class FrameworkTest:
   # each object has an "id" and a "randomNumber" key, and that
   # both keys map to integers.
   ############################################################
-  def validateQuery(self, jsonString, out, err):
+  def validateQuery(self, jsonString, logger=log):
     try:
       arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
 
@@ -196,7 +209,7 @@ class FrameworkTest:
   # each object has an "id" and a "randomNumber" key, and that
   # both keys map to integers.
   ############################################################
-  def validateQueryOneOrLess(self, jsonString, out, err):
+  def validateQueryOneOrLess(self, jsonString, logger=log):
     try:
       arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
 
@@ -221,7 +234,7 @@ class FrameworkTest:
   # each object has an "id" and a "randomNumber" key, and that
   # both keys map to integers.
   ############################################################
-  def validateQueryFiveHundredOrMore(self, jsonString, out, err):
+  def validateQueryFiveHundredOrMore(self, jsonString, logger=log):
     try:
       arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
 
@@ -244,7 +257,7 @@ class FrameworkTest:
   # Parses the given HTML string and asks a FortuneHTMLParser
   # whether the parsed string is a valid fortune return.
   ############################################################
-  def validateFortune(self, htmlString, out, err):
+  def validateFortune(self, htmlString, logger=log):
     try:
       parser = FortuneHTMLParser()
       parser.feed(htmlString)
@@ -260,7 +273,7 @@ class FrameworkTest:
   # each object has an "id" and a "randomNumber" key, and that
   # both keys map to integers.
   ############################################################
-  def validateUpdate(self, jsonString, out, err):
+  def validateUpdate(self, jsonString, logger=log):
     try:
       arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
 
@@ -276,7 +289,7 @@ class FrameworkTest:
   ############################################################
   #
   ############################################################
-  def validatePlaintext(self, jsonString, out, err):
+  def validatePlaintext(self, jsonString, logger=log):
     try:
       return jsonString.lower().strip() == "hello, world!"
     except:
@@ -293,13 +306,13 @@ class FrameworkTest:
     # Load profile for this installation
     profile="%s/bash_profile.sh" % self.directory
     if not os.path.exists(profile):
-      logging.warning("Framework %s does not have a bash_profile" % self.name)
+      logger.warning("Framework %s does not have a bash_profile", self.name)
       profile="$FWROOT/config/benchmark_profile"
     
     set_iroot="export IROOT=%s" % self.install_root
     setup_util.replace_environ(config=profile, command=set_iroot)
 
-    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
+    (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
     return self.setup_module.start(self.benchmarker, out, err)
   ############################################################
   # End start
@@ -310,8 +323,8 @@ class FrameworkTest:
   # Stops the test using it's setup file
   ############################################################
   def stop(self, logger=log):
-    logger.info("stop")
-    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
+    log.info("stop")
+    (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
     return self.setup_module.stop(out, err)
   ############################################################
   # End stop
@@ -325,178 +338,155 @@ class FrameworkTest:
   # or not it passed
   ############################################################
   def verify_urls(self, logger=log):
-    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
 
     # JSON
     if self.runTests[self.JSON]:
-      out.write(textwrap.dedent("""
+      logger.info(textwrap.dedent("""
         -----------------------------------------------------
           VERIFYING JSON ({url})
-        -----------------------------------------------------
-        """.format(url = self.json_url)))
-      out.flush()
+        -----------------------------------------------------""".format(url = self.json_url)))
 
       url = self.benchmarker.generate_url(self.json_url, self.port)
-      output = self.__curl_url(url, self.JSON, out, err)
-      out.write("VALIDATING JSON ... ")
-      if self.validateJson(output, out, err):
+      output = self.__curl_url(url, self.JSON, logger)
+      logger.info("VALIDATING JSON ... ")
+      if self.validateJson(output, log):
         self.json_url_passed = True
-        out.write("PASS\n\n")
+        logger.info("PASS")
       else:
         self.json_url_passed = False
-        out.write("FAIL\n\n")
-      out.flush
+        logger.info("FAIL")
 
     # DB
     if self.runTests[self.DB]:
-      out.write(textwrap.dedent("""
+      logger.info(textwrap.dedent("""
         -----------------------------------------------------
           VERIFYING DB ({url})
-        -----------------------------------------------------
-        """.format(url = self.db_url)))
-      out.flush()
+        -----------------------------------------------------""".format(url = self.db_url)))
 
       url = self.benchmarker.generate_url(self.db_url, self.port)
-      output = self.__curl_url(url, self.DB, out, err)
-      if self.validateDb(output, out, err):
+      output = self.__curl_url(url, self.DB, logger)
+      if self.validateDb(output, logger):
         self.db_url_passed = True
       else:
         self.db_url_passed = False
-      if self.validateDbStrict(output, out, err):
+      if self.validateDbStrict(output, logger):
         self.db_url_warn = False
       else:
         self.db_url_warn = True
 
-      out.write("VALIDATING DB ... ")
+      logger.info("VALIDATING DB ... ")
       if self.db_url_passed:
-        out.write("PASS")
         if self.db_url_warn:
-          out.write(" (with warnings)")
-        out.write("\n\n")
+          logger.info("PASS (with warnings)")
+        else:
+          logger.info("PASS")
       else:
-        out.write("FAIL\n\n")
-      out.flush
+        logger.info("FAIL")
 
     # Query
     if self.runTests[self.QUERY]:
-      out.write(textwrap.dedent("""
+      logger.info(textwrap.dedent("""
         -----------------------------------------------------
           VERIFYING QUERY ({url})
-        -----------------------------------------------------
-        """.format(url=self.query_url+"2")))
-      out.flush()
+        -----------------------------------------------------""".format(url=self.query_url+"2")))
 
       url = self.benchmarker.generate_url(self.query_url + "2", self.port)
-      output = self.__curl_url(url, self.QUERY, out, err)
-      if self.validateQuery(output, out, err):
+      output = self.__curl_url(url, self.QUERY, logger)
+      if self.validateQuery(output, logger):
         self.query_url_passed = True
-        out.write(self.query_url + "2 - PASS\n\n")
+        logger.info(self.query_url + "2 - PASS")
       else:
         self.query_url_passed = False
-        out.write(self.query_url + "2 - FAIL\n\n")
-      out.write("-----------------------------------------------------\n\n")
-      out.flush()
-
+        logger.info(self.query_url + "2 - FAIL")
+      logger.info("-----------------------------------------------------")
+      
       self.query_url_warn = False
       url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
-      output2 = self.__curl_url(url2, self.QUERY, out, err)
-      if not self.validateQueryOneOrLess(output2, out, err):
+      output2 = self.__curl_url(url2, self.QUERY, logger)
+      if not self.validateQueryOneOrLess(output2, logger):
         self.query_url_warn = True
-        out.write(self.query_url + "0 - WARNING\n\n")
+        logger.info(self.query_url + "0 - WARNING")
       else:
-        out.write(self.query_url + "0 - PASS\n\n")
-      out.write("-----------------------------------------------------\n\n")
-      out.flush()
+        logger.info(self.query_url + "0 - PASS")
+      logger.info("-----------------------------------------------------")
 
       url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
-      output3 = self.__curl_url(url3, self.QUERY, out, err)
-      if not self.validateQueryOneOrLess(output3, out, err):
+      output3 = self.__curl_url(url3, self.QUERY, logger)
+      if not self.validateQueryOneOrLess(output3, logger):
         self.query_url_warn = True
-        out.write(self.query_url + "foo - WARNING\n\n")
+        logger.info(self.query_url + "foo - WARNING")
       else:
-        out.write(self.query_url + "foo - PASS\n\n")
-      out.write("-----------------------------------------------------\n\n")
-      out.flush()
+        logger.info(self.query_url + "foo - PASS")
+      logger.info("-----------------------------------------------------")  
 
       url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
-      output4 = self.__curl_url(url4, self.QUERY, out, err)
-      if not self.validateQueryFiveHundredOrMore(output4, out, err):
+      output4 = self.__curl_url(url4, self.QUERY, logger)
+      if not self.validateQueryFiveHundredOrMore(output4, logger):
         self.query_url_warn = True
-        out.write(self.query_url + "501 - WARNING\n\n")
+        logger.info(self.query_url + "501 - WARNING")
       else:
-        out.write(self.query_url + "501 - PASS\n\n")
-      out.write("-----------------------------------------------------\n\n\n")
-      out.flush()
-
-      out.write("VALIDATING QUERY ... ")
-      if self.query_url_passed:
-        out.write("PASS")
-        if self.query_url_warn:
-          out.write(" (with warnings)")
-        out.write("\n\n")
+        logger.info(self.query_url + "501 - PASS")
+      logger.info("-----------------------------------------------------")
+
+      logger.info("VALIDATING QUERY ... ")
+      if self.query_url_passed and self.query_url_warn:
+        logger.info("PASS (with warnings)")
+      elif self.query_url_passed:
+        logger.info("PASS")
       else:
-        out.write("FAIL\n\n")
-      out.flush
+        logger.info("FAIL")
 
     # Fortune
     if self.runTests[self.FORTUNE]:
-      out.write(textwrap.dedent("""
+      logger.info(textwrap.dedent("""
         -----------------------------------------------------
           VERIFYING FORTUNE ({url})
-        -----------------------------------------------------
-        """.format(url = self.fortune_url)))
-      out.flush()
+        -----------------------------------------------------""".format(url = self.fortune_url)))
 
       url = self.benchmarker.generate_url(self.fortune_url, self.port)
-      output = self.__curl_url(url, self.FORTUNE, out, err)
-      out.write("VALIDATING FORTUNE ... ")
-      if self.validateFortune(output, out, err):
+      output = self.__curl_url(url, self.FORTUNE, logger)
+      logger.info("VALIDATING FORTUNE ... ")
+      if self.validateFortune(output, logger):
         self.fortune_url_passed = True
-        out.write("PASS\n\n")
+        logger.info("PASS")
       else:
         self.fortune_url_passed = False
-        out.write("FAIL\n\n")
-      out.flush
+        logger.info("FAIL")
 
     # Update
     if self.runTests[self.UPDATE]:
-      out.write(textwrap.dedent("""
+      logger.info(textwrap.dedent("""
         -----------------------------------------------------
           VERIFYING UPDATE ({url})
         -----------------------------------------------------
         """.format(url = self.update_url)))
-      out.flush()
 
       url = self.benchmarker.generate_url(self.update_url + "2", self.port)
-      output = self.__curl_url(url, self.UPDATE, out, err)
-      out.write("VALIDATING UPDATE ... ")
-      if self.validateUpdate(output, out, err):
+      output = self.__curl_url(url, self.UPDATE, logger)
+      logger.info("VALIDATING UPDATE ... ")
+      if self.validateUpdate(output, logger):
         self.update_url_passed = True
-        out.write("PASS\n\n")
+        logger.info("PASS")
       else:
         self.update_url_passed = False
-        out.write("FAIL\n\n")
-      out.flush
+        logger.info("FAIL")
 
     # plaintext
     if self.runTests[self.PLAINTEXT]:
-      out.write(textwrap.dedent("""
+      logger.info(textwrap.dedent("""
         -----------------------------------------------------
           VERIFYING PLAINTEXT ({url})
-        -----------------------------------------------------
-        """.format(url = self.plaintext_url)))
-      out.flush()
+        -----------------------------------------------------""".format(url = self.plaintext_url)))
 
       url = self.benchmarker.generate_url(self.plaintext_url, self.port)
-      output = self.__curl_url(url, self.PLAINTEXT, out, err)
-      out.write("VALIDATING PLAINTEXT ... ")
-      if self.validatePlaintext(output, out, err):
+      output = self.__curl_url(url, self.PLAINTEXT, logger)
+      logger.info("VALIDATING PLAINTEXT ... ")
+      if self.validatePlaintext(output, logger):
         self.plaintext_url_passed = True
-        out.write("PASS\n\n")
+        logger.info("PASS")
       else:
         self.plaintext_url_passed = False
-        out.write("FAIL\n\n")
-      out.flush
+        logger.info("FAIL")
 
   ############################################################
   # End verify_urls
@@ -535,13 +525,11 @@ class FrameworkTest:
   # JSON/DB/Query.
   ############################################################
   def benchmark(self, logger=log):
-    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
     
     # JSON
     if self.runTests[self.JSON]:
       try:
-        out.write("BENCHMARKING JSON ... ") 
-        out.flush()
+        logger.info("BENCHMARKING JSON ... ") 
         results = None
         output_file = self.benchmarker.output_file(self.name, self.JSON)
         if not os.path.exists(output_file):
@@ -550,19 +538,17 @@ class FrameworkTest:
             pass
         if self.json_url_passed:
           remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
-          self.__run_benchmark(remote_script, output_file, err)
+          self.__run_benchmark(remote_script, output_file, logger)
         results = self.__parse_test(self.JSON)
         self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
-        out.write( "Complete\n" )
-        out.flush()
+        logger.info("Complete")
       except AttributeError:
         pass
 
     # DB
     if self.runTests[self.DB]:
       try:
-        out.write("BENCHMARKING DB ... ") 
-        out.flush()
+        logger.info("BENCHMARKING DB ... ") 
         results = None
         output_file = self.benchmarker.output_file(self.name, self.DB)
         warning_file = self.benchmarker.warning_file(self.name, self.DB)
@@ -575,18 +561,17 @@ class FrameworkTest:
             pass
         if self.db_url_passed:
           remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
-          self.__run_benchmark(remote_script, output_file, err)
+          self.__run_benchmark(remote_script, output_file, logger)
         results = self.__parse_test(self.DB)
         self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
-        out.write( "Complete\n" )
+        logger.info("Complete")
       except AttributeError:
         pass
 
     # Query
     if self.runTests[self.QUERY]:
       try:
-        out.write("BENCHMARKING Query ... ")
-        out.flush()
+        logger.info("BENCHMARKING Query ...")
         results = None
         output_file = self.benchmarker.output_file(self.name, self.QUERY)
         warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
@@ -599,19 +584,17 @@ class FrameworkTest:
             pass
         if self.query_url_passed:
           remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
-          self.__run_benchmark(remote_script, output_file, err)
+          self.__run_benchmark(remote_script, output_file, logger)
         results = self.__parse_test(self.QUERY)
         self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
-        out.write( "Complete\n" )
-        out.flush()
+        logger.info("Complete")
       except AttributeError:
         pass
 
     # fortune
     if self.runTests[self.FORTUNE]:
       try:
-        out.write("BENCHMARKING Fortune ... ") 
-        out.flush()
+        logger.info("BENCHMARKING Fortune ... ") 
         results = None
         output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
         if not os.path.exists(output_file):
@@ -620,19 +603,17 @@ class FrameworkTest:
             pass
         if self.fortune_url_passed:
           remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
-          self.__run_benchmark(remote_script, output_file, err)
+          self.__run_benchmark(remote_script, output_file, logger)
         results = self.__parse_test(self.FORTUNE)
         self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
-        out.write( "Complete\n" )
-        out.flush()
+        logger.info("Complete")
       except AttributeError:
         pass
 
     # update
     if self.runTests[self.UPDATE]:
       try:
-        out.write("BENCHMARKING Update ... ") 
-        out.flush()
+        logger.info("BENCHMARKING Update ... ") 
         results = None
         output_file = self.benchmarker.output_file(self.name, self.UPDATE)
         if not os.path.exists(output_file):
@@ -641,19 +622,17 @@ class FrameworkTest:
             pass
         if self.update_url_passed:
           remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
-          self.__run_benchmark(remote_script, output_file, err)
+          self.__run_benchmark(remote_script, output_file, logger)
         results = self.__parse_test(self.UPDATE)
         self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
-        out.write( "Complete\n" )
-        out.flush()
+        logger.info( "Complete" )
       except AttributeError:
         pass
 
     # plaintext
     if self.runTests[self.PLAINTEXT]:
       try:
-        out.write("BENCHMARKING Plaintext ... ")
-        out.flush()
+        logger.info("BENCHMARKING Plaintext ... ")
         results = None
         output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
         if not os.path.exists(output_file):
@@ -665,10 +644,9 @@ class FrameworkTest:
           self.__run_benchmark(remote_script, output_file, err)
         results = self.__parse_test(self.PLAINTEXT)
         self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
-        out.write( "Complete\n" )
-        out.flush()
+        logger.info( "Complete" )
       except AttributeError:
-        traceback.print_exc()
+        logger.exception("Error Running Benchmark for %s", self.name)
         pass
 
   ############################################################
@@ -819,12 +797,11 @@ class FrameworkTest:
   # template that uses weighttp to run the test. All the results
   # outputed to the output_file.
   ############################################################
-  def __run_benchmark(self, script, output_file, err):
+  def __run_benchmark(self, script, output_file, logger):
+    err = WrapLogger(logger, logging.ERROR)
     with open(output_file, 'w') as raw_file:
-	  
       p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
       p.communicate(script)
-      err.flush()
   ############################################################
   # End __run_benchmark
   ############################################################
@@ -880,7 +857,10 @@ class FrameworkTest:
   # Dump HTTP response and headers. Throw exception if there
   # is an HTTP error.
   ############################################################
-  def __curl_url(self, url, testType, out, err):
+  def __curl_url(self, url, testType, logger=log):
+    # Send output to our benchmark's logger for archival to file,
+    # but only show err on stdout by default
+    (out, err) = WrapLogger(logger, logging.DEBUG), WrapLogger(logger, logging.ERROR)
     output = None
     try:
       # Use -m 15 to make curl stop trying after 15sec.
@@ -890,10 +870,6 @@ class FrameworkTest:
       # error output for sure in stdout.
       # Use -sS to hide progress bar, but show errors.
       subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
-      # HTTP output may not end in a newline, so add that here.
-      out.write( "\n\n" )
-      out.flush()
-      err.flush()
 
       # We need to get the respond body from the curl and return it.
       p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
@@ -924,10 +900,7 @@ class FrameworkTest:
     self.benchmarker = benchmarker
     self.runTests = runTests
     self.fwroot = benchmarker.fwroot
-    
-    # setup logging
-    logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
-    
+        
     self.install_root="%s/%s" % (self.fwroot, "installs")
     if benchmarker.install_strategy is 'pertest':
       self.install_root="%s/pertest/%s" % (self.install_root, name)
@@ -993,14 +966,8 @@ class WrapLogger():
     self.level = level
     self.file = tempfile.TemporaryFile()
 
-  def write(self, str):
-    if self.level == "out":
-      self.logger.info(str)
-    elif self.level == "err":
-      self.logger.error(str)
-    else:
-      self.logger.error("Unknown level %s" % self.level)
-      self.logger.error(str)
+  def write(self, message):
+    self.logger.log(self.level, message)
 
   def __getattr__(self, name):
     return getattr(self.file, name)

+ 25 - 0
toolset/benchmark/utils.py

@@ -0,0 +1,25 @@
+import tempfile
+
+
+class WrapLogger():
+  """
+  Used to convert a Logger into file streams. Adds easy integration 
+  of Logger into subprocess, which takes file parameters for stdout
+  and stderr. 
+
+  Use: 
+    (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
+    subprocess.Popen(command, stdout=out, stderr=err)
+  """
+  def __init__(self, logger, level):
+    self.logger = logger
+    self.level = level
+    self.file = tempfile.TemporaryFile()
+
+  def write(self, message):
+    self.logger.log(self.level, message)
+
+  def __getattr__(self, name):
+    return getattr(self.file, name)
+
+

+ 0 - 3
toolset/setup/linux/installer.py

@@ -389,9 +389,6 @@ EOF
     self.fwroot = benchmarker.fwroot
     self.strategy = install_strategy
     
-    # setup logging
-    logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
-
     try:
       os.mkdir(self.install_dir)
     except OSError: