Browse Source

Enable real-time output from __run_test (e.g. simulate linux tee)

Instead of opening a file handler, just use the logging framework.
In addition to the normal StreamHandler, register a FileHandler to
save a log file from running each test.

The setup.py files depend on out and err file objects, so we fake
them with a new class WrapLogger. This allows us to see real-time
output from those files as well. It is also easier for the setup.py
files as they use subprocess heavily, and it uses parameters for
errfile and outfile
Hamilton Turner 11 years ago
parent
commit
e14a63290f
2 changed files with 155 additions and 155 deletions
  1. 122 149
      toolset/benchmark/benchmarker.py
  2. 33 6
      toolset/benchmark/framework_test.py

+ 122 - 149
toolset/benchmark/benchmarker.py

@@ -532,196 +532,169 @@ class Benchmarker:
   ############################################################
   def __run_test(self, test):
     log.info("__run_test")
+
+    logfile = os.path.join(self.latest_results_directory, 'logs', test.name, 'output.log')
     try:
-      os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
+      os.makedirs(os.path.dirname(logfile))
     except:
       pass
-    with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
-         open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
-      if hasattr(test, 'skip'):
-        if test.skip.lower() == "true":
-          out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
-          return
-
-      if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
-        # the operating system requirements of this test for the
-        # application server or the database server don't match
-        # our current environment
-        out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
-        return 
-      
-      # If the test is in the excludes list, we skip it
-      if self.exclude != None and test.name in self.exclude:
-        out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
-        return
-      
-      # If the test does not contain an implementation of the current test-type, skip it
-      if self.type != 'all' and not test.contains_type(self.type):
-        out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
-        return
 
-      out.write("test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
-      out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
-      out.write("test.name: {name}\n".format(name=str(test.name)))
-      out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
-      if self.results['frameworks'] != None and test.name in self.results['completed']:
-        out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
-        return
+    # Create handler for file logging
+    logHandler = logging.FileHandler(logfile, mode='w')
+    f = logging.Formatter("%(asctime)s: %(name)-12s: %(levelname)-8s %(message)s")
+    logHandler.setFormatter(f)
+    logHandler.setLevel(logging.DEBUG)
+    log.addHandler(logHandler)
+
+    if hasattr(test, 'skip') and test.skip.lower() == "true":
+      log.info("Skipping %s: benchmark_config specifies to skip this test", test.name)
+      return
+
+    if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
+      log.info("Skipping %s: OS or Database OS specified in benchmark_config does not match the current environment", test.name)
+      return 
+    
+    if self.exclude != None and test.name in self.exclude:
+      log.info("Excluding %s: Added to the excludes list", test.name)
+      return
+    
+    if self.type != 'all' and not test.contains_type(self.type):
+      log.info("Skipping %s: Does not contain test for %s", test.name, self.type)
+      return
+
+    log.debug("test.os.lower() = %s  test.database_os.lower() = %s", test.os.lower(), test.database_os.lower())
+    log.debug("self.results['frameworks'] != None: %s", self.results['frameworks'] != None)
+    log.debug("test.name: %s", test.name)
+    log.debug("self.results['completed']: %s", self.results['completed'])
 
-      out.flush()
+    #if self.results['frameworks'] != None and test.name in self.results['completed']:
+    #  log.debug("Skipping %s: Found in latest saved data", test.name)
+    #  return
 
-      out.write( textwrap.dedent("""
+    log.info(textwrap.dedent("""
       =====================================================
         Beginning {name}
       -----------------------------------------------------
-      """.format(name=test.name)) )
-      out.flush()
+      """.format(name=test.name)))
 
-      ##########################
-      # Start this test
-      ##########################  
-      out.write( textwrap.dedent("""
+    # Start this test
+    log.info(textwrap.dedent("""
       -----------------------------------------------------
         Starting {name}
       -----------------------------------------------------
-      """.format(name=test.name)) )
-      out.flush()
-      try:
-        if test.requires_database():
-          p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
-          p.communicate("""
-            sudo restart mysql
-            sudo restart mongodb
-            sudo /etc/init.d/postgresql restart
-          """)
-          time.sleep(10)
-
-        if self.__is_port_bound(test.port):
-          self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
-          err.write( textwrap.dedent("""
-            ---------------------------------------------------------
-              Error: Port {port} is not available before start {name}
-            ---------------------------------------------------------
-            """.format(name=test.name, port=str(test.port))) )
-          err.flush()
-          return
-
-        result = test.start(out, err)
-        if result != 0: 
-          test.stop(out, err)
-          time.sleep(5)
-          err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
-          err.write( textwrap.dedent("""
-            -----------------------------------------------------
-              Stopped {name}
-            -----------------------------------------------------
-            """.format(name=test.name)) )
-          err.flush()
-          self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
-          return
-        
-        time.sleep(self.sleep)
-
-        ##########################
-        # Verify URLs
-        ##########################
-        test.verify_urls(out, err)
-        out.flush()
-        err.flush()
-
-        ##########################
-        # Benchmark this test
-        ##########################
-        if self.mode == "benchmark":
-          out.write( textwrap.dedent("""
-            -----------------------------------------------------
-              Benchmarking {name} ...
-            -----------------------------------------------------
-            """.format(name=test.name)) )
-          out.flush()
-          test.benchmark(out, err)
-          out.flush()
-          err.flush()
-
-        ##########################
-        # Stop this test
-        ##########################
-        out.write( textwrap.dedent("""
+      """.format(name=test.name)))
+
+    try:
+      if test.requires_database():
+        p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
+        log.debug("Restarting database")
+        p.communicate("""
+          sudo restart mysql
+          sudo restart mongodb
+		      sudo /etc/init.d/postgresql restart
+        """)
+        time.sleep(10)
+
+      if self.__is_port_bound(test.port):
+        self.__write_intermediate_results(test.name, "port %s is not available before start" % test.port)
+        log.error( textwrap.dedent("""
+          ---------------------------------------------------------
+            Error: Port {port} is not available, cannot start {name}
+          ---------------------------------------------------------
+          """.format(name=test.name, port=str(test.port))) )
+        return
+
+      result = test.start(log)
+      if result != 0: 
+        test.stop(log)
+        time.sleep(5)
+        log.error("ERROR: Problem starting %s", test.name)
+        log.error(textwrap.dedent("""
+          -----------------------------------------------------
+            Stopped {name}
+          -----------------------------------------------------
+          """.format(name=test.name)) )
+        self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
+        return
+      
+      log.debug("Sleeping for %s" % self.sleep)
+      time.sleep(self.sleep)
+
+      # Verify URLs
+      test.verify_urls(log)
+
+      # Benchmark
+      if self.mode == "benchmark":
+        log.info( textwrap.dedent("""
+          -----------------------------------------------------
+            Benchmarking {name} ...
+          -----------------------------------------------------
+          """.format(name=test.name)) )
+        test.benchmark(log)
+
+      # Stop this test
+      log.info( textwrap.dedent("""
         -----------------------------------------------------
           Stopping {name}
         -----------------------------------------------------
         """.format(name=test.name)) )
-        out.flush()
-        test.stop(out, err)
-        out.flush()
-        err.flush()
-        time.sleep(5)
+      test.stop(log)
+      time.sleep(5)
 
-        if self.__is_port_bound(test.port):
-          self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
-          err.write( textwrap.dedent("""
-            -----------------------------------------------------
-              Error: Port {port} was not released by stop {name}
-            -----------------------------------------------------
-            """.format(name=test.name, port=str(test.port))) )
-          err.flush()
-          return
+      if self.__is_port_bound(test.port):
+        self.__write_intermediate_results(test.name, "port %s was not released by stop" % test.port)
+        log.error( textwrap.dedent("""
+          -----------------------------------------------------
+            Error: Port {port} was not released by stop {name}
+          -----------------------------------------------------
+          """.format(name=test.name, port=str(test.port))) )
+        return
 
-        out.write( textwrap.dedent("""
+      log.info( textwrap.dedent("""
         -----------------------------------------------------
           Stopped {name}
         -----------------------------------------------------
         """.format(name=test.name)) )
-        out.flush()
-        time.sleep(5)
+      time.sleep(5)
 
-        ##########################################################
-        # Save results thus far into toolset/benchmark/latest.json
-        ##########################################################
+      ##########################################################
+      # Save results thus far into toolset/benchmark/latest.json
+      ##########################################################
 
-        out.write( textwrap.dedent("""
+      log.info( textwrap.dedent("""
         ----------------------------------------------------
         Saving results through {name}
         ----------------------------------------------------
         """.format(name=test.name)) )
-        out.flush()
-        self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
-      except (OSError, IOError, subprocess.CalledProcessError) as e:
-        self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
-        err.write( textwrap.dedent("""
+      self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
+    except (OSError, IOError, subprocess.CalledProcessError) as e:
+      self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
+      log.error( textwrap.dedent("""
         -----------------------------------------------------
           Subprocess Error {name}
         -----------------------------------------------------
         {err}
-        {trace}
-        """.format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
-        err.flush()
-        try:
-          test.stop(out, err)
-        except (subprocess.CalledProcessError) as e:
-          self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
-          err.write( textwrap.dedent("""
+        {trace}""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
+      log.debug("Subprocess Error Details", exc_info=True)
+      try:
+        test.stop(log)
+      except (subprocess.CalledProcessError) as e:
+        self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
+        log.error( textwrap.dedent("""
           -----------------------------------------------------
             Subprocess Error: Test .stop() raised exception {name}
           -----------------------------------------------------
           {err}
-          {trace}
-          """.format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
-          err.flush()
-      except (KeyboardInterrupt, SystemExit) as e:
-        test.stop(out)
-        out.write( """
+          {trace}""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
+    except (KeyboardInterrupt, SystemExit) as e:
+      test.stop(log)
+      log.info( """
         -----------------------------------------------------
           Cleaning up....
         -----------------------------------------------------
         """)
-        out.flush()
-        self.__finish()
-        sys.exit()
-
-      out.close()
-      err.close()
-
+      self.__finish()
+      sys.exit()
   ############################################################
   # End __run_tests
   ############################################################

+ 33 - 6
toolset/benchmark/framework_test.py

@@ -287,7 +287,7 @@ class FrameworkTest:
   # start(benchmarker)
   # Start the test using it's setup file
   ############################################################
-  def start(self, out, err):
+  def start(self, logger=log):
     log.info("start")
 
     # Load profile for this installation
@@ -299,6 +299,7 @@ class FrameworkTest:
     set_iroot="export IROOT=%s" % self.install_root
     setup_util.replace_environ(config=profile, command=set_iroot)
 
+    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
     return self.setup_module.start(self.benchmarker, out, err)
   ############################################################
   # End start
@@ -308,8 +309,9 @@ class FrameworkTest:
   # stop(benchmarker)
   # Stops the test using it's setup file
   ############################################################
-  def stop(self, out, err):
-    log.info("stop")
+  def stop(self, logger=log):
+    logger.info("stop")
+    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
     return self.setup_module.stop(out, err)
   ############################################################
   # End stop
@@ -322,7 +324,9 @@ class FrameworkTest:
   # For each url, a flag will be set on this object for whether
   # or not it passed
   ############################################################
-  def verify_urls(self, out, err):
+  def verify_urls(self, logger=log):
+    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
+
     # JSON
     if self.runTests[self.JSON]:
       out.write(textwrap.dedent("""
@@ -530,7 +534,9 @@ class FrameworkTest:
   # Runs the benchmark for each type of test that it implements
   # JSON/DB/Query.
   ############################################################
-  def benchmark(self, out, err):
+  def benchmark(self, logger=log):
+    (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
+    
     # JSON
     if self.runTests[self.JSON]:
       try:
@@ -913,7 +919,6 @@ class FrameworkTest:
   # Constructor
   ##########################################################################################  
   def __init__(self, name, directory, benchmarker, runTests, args):
-    log.debug("__init__: %s in %s" % (name, directory))
     self.name = name
     self.directory = directory
     self.benchmarker = benchmarker
@@ -980,3 +985,25 @@ def parse_config(config, directory, benchmarker):
 ##############################################################
 # End parse_config
 ##############################################################
+
+import tempfile
+class WrapLogger():
+  def __init__(self, logger, level):
+    self.logger = logger
+    self.level = level
+    self.file = tempfile.TemporaryFile()
+
+  def write(self, str):
+    if self.level == "out":
+      self.logger.info(str)
+    elif self.level == "err":
+      self.logger.error(str)
+    else:
+      self.logger.error("Unknown level %s" % self.level)
+      self.logger.error(str)
+
+  def __getattr__(self, name):
+    return getattr(self.file, name)
+
+
+