Browse Source

Use python logging framework consistently

Hamilton Turner 11 years ago
parent
commit
16fa1e4f6b
3 changed files with 61 additions and 23 deletions
  1. 40 14
      toolset/benchmark/benchmarker.py
  2. 7 0
      toolset/benchmark/framework_test.py
  3. 14 9
      toolset/run-tests.py

+ 40 - 14
toolset/benchmark/benchmarker.py

@@ -12,6 +12,7 @@ import pprint
 import csv
 import sys
 import logging
+log = logging.getLogger('benchmarker')
 import socket
 import glob
 from multiprocessing import Process
@@ -74,6 +75,7 @@ class Benchmarker:
   # Re-parses the raw data for a given timestamp
   ############################################################
   def parse_timestamp(self):
+    log.info("parse_timestamp")
     all_tests = self.__gather_tests
 
     for test in all_tests:
@@ -95,6 +97,8 @@ class Benchmarker:
   # running benchmarks against them.
   ############################################################
   def run(self):
+    log.info("run")
+
     ##########################
     # Get a list of all known
     # tests that we can run.
@@ -192,6 +196,7 @@ class Benchmarker:
   # test_type timestamp/test_type/test_name/raw 
   ############################################################
   def get_output_file(self, test_name, test_type):
+    log.debug("get_output_file")
     return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
   ############################################################
   # End get_output_file
@@ -203,6 +208,7 @@ class Benchmarker:
   # timestamp/test_type/test_name/raw 
   ############################################################
   def output_file(self, test_name, test_type):
+    log.debug("output_file")
     path = self.get_output_file(test_name, test_type)
     try:
       os.makedirs(os.path.dirname(path))
@@ -244,6 +250,7 @@ class Benchmarker:
   # full_results_directory
   ############################################################
   def full_results_directory(self):
+    log.debug("full_results_directory")
     path = os.path.join(self.result_directory, self.timestamp)
     try:
       os.makedirs(path)
@@ -259,6 +266,7 @@ class Benchmarker:
   ############################################################
 
   def latest_results_directory(self):
+    log.debug("latest_results_directory")
     path = os.path.join(self.result_directory,"latest")
     try:
       os.makedirs(path)
@@ -270,6 +278,9 @@ class Benchmarker:
   # report_results
   ############################################################
   def report_results(self, framework, test, results):
+    log.info("report_results: %s - %s" % (framework.name, test))
+    log.debug("report_results: %s" % results)
+
     if test not in self.results['rawData'].keys():
       self.results['rawData'][test] = dict()
 
@@ -301,6 +312,7 @@ class Benchmarker:
   ############################################################
   @property
   def __gather_tests(self):
+    log.info("__gather_tests")
     tests = []
 
     # Assume we are running from FrameworkBenchmarks
@@ -332,7 +344,7 @@ class Benchmarker:
         try:
           config = json.load(config_file)
         except:
-          print("Error loading '%s'." % config_file_name)
+          log.error("Error loading '%s'" % config_file_name)
           raise
 
       if config is None:
@@ -358,6 +370,8 @@ class Benchmarker:
   # Gathers all the frameworks
   ############################################################
   def __gather_frameworks(self):
+    log.info("__gather_frameworks")
+
     frameworks = []
     # Loop through each directory (we assume we're being run from the benchmarking root)
     for dirname, dirnames, filenames in os.walk('.'):
@@ -401,6 +415,7 @@ class Benchmarker:
   # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
   ############################################################
   def __setup_server(self):
+    log.info("__setup_server")
     try:
       if os.name == 'nt':
         return True
@@ -425,6 +440,7 @@ class Benchmarker:
   # changes.
   ############################################################
   def __setup_database(self):
+    log.info("__setup_database")
     p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
     p.communicate("""
       sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
@@ -446,6 +462,7 @@ class Benchmarker:
   # changes.
   ############################################################
   def __setup_client(self):
+    log.info("__setup_client")
     p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
     p.communicate("""
       sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
@@ -472,15 +489,15 @@ class Benchmarker:
   ############################################################
 
   def __run_tests(self, tests):
-    logging.debug("Start __run_tests.")
-    logging.debug("__name__ = %s",__name__)
+    log.info("__run_tests")
+    log.debug("__run_tests with __name__ = %s",__name__)
 
     if self.os.lower() == 'windows':
-      logging.debug("Executing __run_tests on Windows")
+      log.info("Executing __run_tests on Windows")
       for test in tests:
         self.__run_test(test)
     else:
-      logging.debug("Executing __run_tests on Linux")
+      log.info("Executing __run_tests on Linux")
       # These features do not work on Windows
       for test in tests:
         if __name__ == 'benchmark.benchmarker':
@@ -494,10 +511,10 @@ class Benchmarker:
           test_process.join(self.run_test_timeout_seconds)
           self.__load_results()  # Load intermediate result from child process
           if(test_process.is_alive()):
-            logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
+            log.warn("Child process for {name} is still alive. Terminating.".format(name=test.name))
             self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
             test_process.terminate()
-    logging.debug("End __run_tests.")
+    log.info("End __run_tests")
 
   ############################################################
   # End __run_tests
@@ -514,6 +531,7 @@ class Benchmarker:
   # are needed.
   ############################################################
   def __run_test(self, test):
+    log.info("__run_test")
     try:
       os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
     except:
@@ -753,6 +771,8 @@ class Benchmarker:
   # are needed.
   ############################################################
   def __parse_results(self, tests):
+    log.info("__parse_results")
+    
     # Run the method to get the commmit count of each framework.
     self.__count_commits()
    # Call the method which counts the sloc for each framework
@@ -760,7 +780,10 @@ class Benchmarker:
 
     # Time to create parsed files
     # Aggregate JSON file
-    with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
+    results_file=os.path.join(self.full_results_directory(), "results.json")
+    log.debug("Writing results to %s"%results_file)
+    log.debug("Results: %s" % json.dumps(self.results))
+    with open(results_file, "w") as f:
       f.write(json.dumps(self.results))
 
   ############################################################
@@ -773,6 +796,7 @@ class Benchmarker:
   # This is assumed to be run from the benchmark root directory
   #############################################################
   def __count_sloc(self):
+    log.info("__count_sloc")
     all_frameworks = self.__gather_frameworks()
     jsonResult = {}
 
@@ -798,6 +822,7 @@ class Benchmarker:
   # __count_commits
   ############################################################
   def __count_commits(self):
+    log.info("__count_commits")
     all_frameworks = self.__gather_frameworks()
 
     jsonResult = {}
@@ -820,12 +845,15 @@ class Benchmarker:
   # __write_intermediate_results
   ############################################################
   def __write_intermediate_results(self,test_name,status_message):
+    log.info("__write_intermediate_results: %s reports %s" % (test_name, status_message))
     try:
       self.results["completed"][test_name] = status_message
-      with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
+      latest_results = os.path.join(self.latest_results_directory, 'results.json')
+      log.debug("Dumping to %s: %s" % (latest_results, json.dumps(self.results)))
+      with open(latest_results, 'w') as f:
         f.write(json.dumps(self.results))
     except (IOError):
-      logging.error("Error writing results.json")
+      log.error("Error writing results.json")
 
   ############################################################
   # End __write_intermediate_results
@@ -842,6 +870,7 @@ class Benchmarker:
   # __finish
   ############################################################
   def __finish(self):
+    log.info("__finish")
     print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
     print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
 
@@ -862,9 +891,6 @@ class Benchmarker:
     self.__dict__.update(args)
     self.start_time = time.time()
     self.run_test_timeout_seconds = 3600
-
-    # setup logging
-    logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
     
     # setup some additional variables
     if self.database_user == None: self.database_user = self.client_user
@@ -928,7 +954,7 @@ class Benchmarker:
         #Load json file into results object
         self.results = json.load(f)
     except IOError:
-      logging.warn("results.json for test %s not found.",self.name) 
+      log.warn("results.json for test %s not found.",self.name) 
     
     if self.results == None:
       self.results = dict()

+ 7 - 0
toolset/benchmark/framework_test.py

@@ -12,6 +12,7 @@ import traceback
 import json
 import textwrap
 import logging
+log = logging.getLogger('framework_test')
 
 class FrameworkTest:
   ##########################################################################################
@@ -287,6 +288,8 @@ class FrameworkTest:
   # Start the test using it's setup file
   ############################################################
   def start(self, out, err):
+    log.info("start")
+
     # Load profile for this installation
     profile="%s/bash_profile.sh" % self.directory
     if not os.path.exists(profile):
@@ -306,6 +309,7 @@ class FrameworkTest:
   # Stops the test using it's setup file
   ############################################################
   def stop(self, out, err):
+    log.info("stop")
     return self.setup_module.stop(out, err)
   ############################################################
   # End stop
@@ -670,6 +674,7 @@ class FrameworkTest:
   # Method meant to be run for a given timestamp
   ############################################################
   def parse_all(self):
+    log.info("parse_all")
     # JSON
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
       results = self.__parse_test(self.JSON)
@@ -707,6 +712,7 @@ class FrameworkTest:
   # __parse_test(test_type)
   ############################################################
   def __parse_test(self, test_type):
+    log.info("__parse_test") 
     try:
       results = dict()
       results['results'] = []
@@ -907,6 +913,7 @@ class FrameworkTest:
   # Constructor
   ##########################################################################################  
   def __init__(self, name, directory, benchmarker, runTests, args):
+    log.debug("__init__: %s in %s" % (name, directory))
     self.name = name
     self.directory = directory
     self.benchmarker = benchmarker

+ 14 - 9
toolset/run-tests.py

@@ -4,8 +4,9 @@ import ConfigParser
 import sys
 import os
 import multiprocessing
+import logging
+log = logging.getLogger('run-tests')
 import subprocess
-from pprint import pprint 
 from benchmark.benchmarker import Benchmarker
 from setup.linux.unbuffered import Unbuffered
 from setup.linux import setup_util
@@ -55,7 +56,7 @@ def main(argv=None):
             defaults = dict(config.items("Defaults"))
     except IOError:
         if args.conf_file != 'benchmark.cfg':
-            print 'Configuration file not found!'
+            log.warn('Configuration file not found!')
         defaults = { "client-host":"localhost"}
 
     ##########################################################
@@ -127,15 +128,21 @@ def main(argv=None):
 
     # Misc Options
     parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
-    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
+    parser.add_argument('--log', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='ERROR', help='Set the logging level')
     parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
     args = parser.parse_args(remaining_argv)
 
+    # Set up our initial logging level
+    numeric_level = getattr(logging, args.log.upper(), logging.ERROR)
+    if not isinstance(numeric_level, int):
+        raise ValueError('Invalid log level: %s' % loglevel)
+    logging.basicConfig(level=numeric_level)
+
     # Verify and massage options
     if args.client_user is None:
-      print 'Usernames (e.g. --client-user and --database-user) are required!'
-      print 'The system will SSH into the client and the database for the install stage'
-      print 'Aborting'
+      log.critical('Usernames (e.g. --client-user and --database-user) are required!')
+      log.critical('The system will SSH into the client and the database for the install stage')
+      log.critical('Aborting')
       exit(1)
 
     if args.database_user is None:
@@ -144,9 +151,7 @@ def main(argv=None):
     if args.database_host is None:
       args.database_host = args.client_host
 
-    if args.verbose:
-        print 'Configuration options: '
-        pprint(args)
+    log.info("Configuration: %s" % str(args))
 
     benchmarker = Benchmarker(vars(args))