Browse Source

Replace manual text wrap with helper class

Our usage of textwrap was causing extra newlines
at the prefix and suffix of each message. This fixes
that issue and cleans up the codebase a bit
Hamilton Turner 11 years ago
parent
commit
b8f151272f
3 changed files with 46 additions and 106 deletions
  1. 22 74
      toolset/benchmark/benchmarker.py
  2. 8 32
      toolset/benchmark/framework_test.py
  3. 16 0
      toolset/benchmark/utils.py

+ 22 - 74
toolset/benchmark/benchmarker.py

@@ -3,12 +3,12 @@ from setup.linux import setup_util
 
 
 from benchmark import framework_test
 from benchmark import framework_test
 from utils import WrapLogger
 from utils import WrapLogger
+from utils import Header
 
 
 import os
 import os
 import json
 import json
 import subprocess
 import subprocess
 import time
 import time
-import textwrap
 import pprint
 import pprint
 import csv
 import csv
 import sys
 import sys
@@ -109,11 +109,7 @@ class Benchmarker:
     ##########################
     ##########################
     # Setup client/server
     # Setup client/server
     ##########################
     ##########################
-    print textwrap.dedent("""
-      =====================================================
-        Preparing Server, Database, and Client ...
-      =====================================================
-      """)
+    print Header("Preparing Server, Database, and Client ...", top='=', bottom='=')
     self.__setup_server()
     self.__setup_server()
     self.__setup_database()
     self.__setup_database()
     self.__setup_client()
     self.__setup_client()
@@ -125,22 +121,14 @@ class Benchmarker:
     ##########################
     ##########################
     # Run tests
     # Run tests
     ##########################
     ##########################
-    print textwrap.dedent("""
-      =====================================================
-        Running Tests ...
-      =====================================================
-      """)
+    print Header("Running Tests...", top='=', bottom='=')
     self.__run_tests(all_tests)
     self.__run_tests(all_tests)
 
 
     ##########################
     ##########################
     # Parse results
     # Parse results
     ##########################  
     ##########################  
     if self.mode == "benchmark":
     if self.mode == "benchmark":
-      print textwrap.dedent("""
-      =====================================================
-        Parsing Results ...
-      =====================================================
-      """)
+      print Header("Parsing Results ...", top='=', bottom='=')
       self.__parse_results(all_tests)
       self.__parse_results(all_tests)
 
 
     self.__finish()
     self.__finish()
@@ -508,11 +496,7 @@ class Benchmarker:
       # These features do not work on Windows
       # These features do not work on Windows
       for test in tests:
       for test in tests:
         if __name__ == 'benchmark.benchmarker':
         if __name__ == 'benchmark.benchmarker':
-          print textwrap.dedent("""
-            -----------------------------------------------------
-              Running Test: {name} ...
-            -----------------------------------------------------
-            """.format(name=test.name))
+          print Header("Running Test: %s" % test.name)
           test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
           test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
           test_process.start()
           test_process.start()
           test_process.join(self.run_test_timeout_seconds)
           test_process.join(self.run_test_timeout_seconds)
@@ -595,16 +579,10 @@ class Benchmarker:
     #  log.debug("Skipping %s: Found in latest saved data", test.name)
     #  log.debug("Skipping %s: Found in latest saved data", test.name)
     #  return
     #  return
 
 
-    log.info(textwrap.dedent("""
-      =====================================================
-        Beginning {name}
-      -----------------------------------------------------""".format(name=test.name)))
+    log.info(Header("Beginning %s" % test.name, top='='))
 
 
     # Start this test
     # Start this test
-    log.info(textwrap.dedent("""
-      -----------------------------------------------------
-        Starting {name}
-      -----------------------------------------------------""".format(name=test.name)))
+    log.info(Header("Starting %s" % test.name))
 
 
     try:
     try:
       if test.requires_database():
       if test.requires_database():
@@ -620,10 +598,7 @@ class Benchmarker:
 
 
       if self.__is_port_bound(test.port):
       if self.__is_port_bound(test.port):
         self.__write_intermediate_results(test.name, "port %s is not available before start" % test.port)
         self.__write_intermediate_results(test.name, "port %s is not available before start" % test.port)
-        log.error( textwrap.dedent("""
-          ---------------------------------------------------------
-            Error: Port {port} is not available, cannot start {name}
-          ---------------------------------------------------------""".format(name=test.name, port=str(test.port))) )
+        log.error(Header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
         return
         return
 
 
       result = test.start(log)
       result = test.start(log)
@@ -631,10 +606,7 @@ class Benchmarker:
         test.stop(log)
         test.stop(log)
         time.sleep(5)
         time.sleep(5)
         log.error("ERROR: Problem starting %s", test.name)
         log.error("ERROR: Problem starting %s", test.name)
-        log.error(textwrap.dedent("""
-          -----------------------------------------------------
-            Stopped {name}
-          -----------------------------------------------------""".format(name=test.name)) )
+        log.error(Header("Stopped %s" % test.name))
         self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
         self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
         return
         return
       
       
@@ -646,69 +618,45 @@ class Benchmarker:
 
 
       # Benchmark
       # Benchmark
       if self.mode == "benchmark":
       if self.mode == "benchmark":
-        log.info( textwrap.dedent("""
-          -----------------------------------------------------
-            Benchmarking {name} ...
-          -----------------------------------------------------""".format(name=test.name)) )
+        log.info(Header("Benchmarking %s" % test.name))
         test.benchmark(log)
         test.benchmark(log)
 
 
       # Stop this test
       # Stop this test
-      log.info( textwrap.dedent("""
-        -----------------------------------------------------
-          Stopping {name}
-        -----------------------------------------------------""".format(name=test.name)) )
+      log.info(Header("Stopping %s" % test.name))
       test.stop(log)
       test.stop(log)
       time.sleep(5)
       time.sleep(5)
 
 
       if self.__is_port_bound(test.port):
       if self.__is_port_bound(test.port):
         self.__write_intermediate_results(test.name, "port %s was not released by stop" % test.port)
         self.__write_intermediate_results(test.name, "port %s was not released by stop" % test.port)
-        log.error( textwrap.dedent("""
-          -----------------------------------------------------
-            Error: Port {port} was not released by stop {name}
-          -----------------------------------------------------""".format(name=test.name, port=str(test.port))) )
-        log.handlers = []
+        log.error(Header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
         return
         return
 
 
-      log.info( textwrap.dedent("""
-        -----------------------------------------------------
-          Stopped {name}
-        -----------------------------------------------------""".format(name=test.name)) )
+      log.info(Header("Stopped %s" % test.name))
       time.sleep(5)
       time.sleep(5)
 
 
       ##########################################################
       ##########################################################
       # Save results thus far into toolset/benchmark/latest.json
       # Save results thus far into toolset/benchmark/latest.json
       ##########################################################
       ##########################################################
 
 
-      log.info( textwrap.dedent("""
-        ----------------------------------------------------
-        Saving results through {name}
-        ----------------------------------------------------""".format(name=test.name)) )
+      log.info(Header("Saving results through %s" % test.name))
       self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
       self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
     except (OSError, IOError, subprocess.CalledProcessError) as e:
     except (OSError, IOError, subprocess.CalledProcessError) as e:
       self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
       self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
-      log.error( textwrap.dedent("""
-        -----------------------------------------------------
-          Subprocess Error {name}
-        -----------------------------------------------------
-        {err}
-        {trace}""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
+      log.error(Header("Subprocess Error %s" % test.name))
+      log.error("%s" % e)
+      log.error("%s" % sys.exc_info()[:2])
       log.debug("Subprocess Error Details", exc_info=True)
       log.debug("Subprocess Error Details", exc_info=True)
       try:
       try:
         test.stop(log)
         test.stop(log)
       except (subprocess.CalledProcessError) as e:
       except (subprocess.CalledProcessError) as e:
         self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
         self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
-        log.error( textwrap.dedent("""
-          -----------------------------------------------------
-            Subprocess Error: Test .stop() raised exception {name}
-          -----------------------------------------------------
-          {err}
-          {trace}""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
+        log.error(Header("Subprocess Error: Test .stop() raised exception %s" % test.name))
+        log.error("%s" % e)
+        log.error("%s" % sys.exc_info()[:2])
+        log.debug("Subprocess Error Details", exc_info=True)
     except (KeyboardInterrupt, SystemExit) as e:
     except (KeyboardInterrupt, SystemExit) as e:
       test.stop(log)
       test.stop(log)
-      log.info( """
-        -----------------------------------------------------
-          Cleaning up....
-        -----------------------------------------------------""")
+      log.info(Header("Cleaning up..."))
       self.__finish()
       self.__finish()
       sys.exit()
       sys.exit()
   ############################################################
   ############################################################

+ 8 - 32
toolset/benchmark/framework_test.py

@@ -10,11 +10,12 @@ import pprint
 import sys
 import sys
 import traceback
 import traceback
 import json
 import json
-import textwrap
 import logging
 import logging
 log = logging.getLogger('framework_test')
 log = logging.getLogger('framework_test')
 
 
 from utils import WrapLogger
 from utils import WrapLogger
+from utils import Header
+
 class FrameworkTest:
 class FrameworkTest:
   """
   """
   Represents a framework test, including all types (JSON, plaintext, DB, etc)
   Represents a framework test, including all types (JSON, plaintext, DB, etc)
@@ -360,11 +361,7 @@ class FrameworkTest:
 
 
     # JSON
     # JSON
     if self.runTests[self.JSON]:
     if self.runTests[self.JSON]:
-      logger.info(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING JSON ({url})
-        -----------------------------------------------------""".format(url = self.json_url)))
-
+      logger.info(Header("VERIFYING JSON (%s)" % self.json_url))
       url = self.benchmarker.generate_url(self.json_url, self.port)
       url = self.benchmarker.generate_url(self.json_url, self.port)
       output = self.__curl_url(url, self.JSON, logger)
       output = self.__curl_url(url, self.JSON, logger)
       logger.info("VALIDATING JSON ... ")
       logger.info("VALIDATING JSON ... ")
@@ -377,11 +374,7 @@ class FrameworkTest:
 
 
     # DB
     # DB
     if self.runTests[self.DB]:
     if self.runTests[self.DB]:
-      logger.info(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING DB ({url})
-        -----------------------------------------------------""".format(url = self.db_url)))
-
+      logger.info(Header("VERIFYING DB (%s)" % self.db_url))
       url = self.benchmarker.generate_url(self.db_url, self.port)
       url = self.benchmarker.generate_url(self.db_url, self.port)
       output = self.__curl_url(url, self.DB, logger)
       output = self.__curl_url(url, self.DB, logger)
       if self.validateDb(output, logger):
       if self.validateDb(output, logger):
@@ -404,11 +397,7 @@ class FrameworkTest:
 
 
     # Query
     # Query
     if self.runTests[self.QUERY]:
     if self.runTests[self.QUERY]:
-      logger.info(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING QUERY ({url})
-        -----------------------------------------------------""".format(url=self.query_url+"2")))
-
+      logger.info(Header("VERIFYING QUERY (%s)" % self.query_url+"2"))
       url = self.benchmarker.generate_url(self.query_url + "2", self.port)
       url = self.benchmarker.generate_url(self.query_url + "2", self.port)
       output = self.__curl_url(url, self.QUERY, logger)
       output = self.__curl_url(url, self.QUERY, logger)
       if self.validateQuery(output, logger):
       if self.validateQuery(output, logger):
@@ -457,11 +446,7 @@ class FrameworkTest:
 
 
     # Fortune
     # Fortune
     if self.runTests[self.FORTUNE]:
     if self.runTests[self.FORTUNE]:
-      logger.info(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING FORTUNE ({url})
-        -----------------------------------------------------""".format(url = self.fortune_url)))
-
+      logger.info(Header("VERIFYING FORTUNE (%s)" % self.fortune_url))
       url = self.benchmarker.generate_url(self.fortune_url, self.port)
       url = self.benchmarker.generate_url(self.fortune_url, self.port)
       output = self.__curl_url(url, self.FORTUNE, logger)
       output = self.__curl_url(url, self.FORTUNE, logger)
       logger.info("VALIDATING FORTUNE ... ")
       logger.info("VALIDATING FORTUNE ... ")
@@ -474,12 +459,7 @@ class FrameworkTest:
 
 
     # Update
     # Update
     if self.runTests[self.UPDATE]:
     if self.runTests[self.UPDATE]:
-      logger.info(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING UPDATE ({url})
-        -----------------------------------------------------
-        """.format(url = self.update_url)))
-
+      logger.info(Header("VERIFYING UPDATE (%s)" % self.update_url))
       url = self.benchmarker.generate_url(self.update_url + "2", self.port)
       url = self.benchmarker.generate_url(self.update_url + "2", self.port)
       output = self.__curl_url(url, self.UPDATE, logger)
       output = self.__curl_url(url, self.UPDATE, logger)
       logger.info("VALIDATING UPDATE ... ")
       logger.info("VALIDATING UPDATE ... ")
@@ -492,11 +472,7 @@ class FrameworkTest:
 
 
     # plaintext
     # plaintext
     if self.runTests[self.PLAINTEXT]:
     if self.runTests[self.PLAINTEXT]:
-      logger.info(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING PLAINTEXT ({url})
-        -----------------------------------------------------""".format(url = self.plaintext_url)))
-
+      logger.info(Header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
       url = self.benchmarker.generate_url(self.plaintext_url, self.port)
       url = self.benchmarker.generate_url(self.plaintext_url, self.port)
       output = self.__curl_url(url, self.PLAINTEXT, logger)
       output = self.__curl_url(url, self.PLAINTEXT, logger)
       logger.info("VALIDATING PLAINTEXT ... ")
       logger.info("VALIDATING PLAINTEXT ... ")

+ 16 - 0
toolset/benchmark/utils.py

@@ -23,3 +23,19 @@ class WrapLogger():
     return getattr(self.file, name)
     return getattr(self.file, name)
 
 
 
 
+class Header():
+  """
+  """
+
+  def __init__(self, message, top='-', bottom='-'):
+    self.message = message
+    self.top = top
+    self.bottom = bottom
+
+  def __str__(self):
+    topheader = self.top * 80
+    topheader = topheader[:80]
+    bottomheader = self.bottom * 80
+    bottomheader = bottomheader[:80]
+    return "%s\n  %s\n%s" % (topheader, self.message, bottomheader)
+