Browse Source

Adding Header utility class

Hamilton Turner 11 years ago
parent
commit
85f978003c
3 changed files with 49 additions and 118 deletions
  1. 23 87
      toolset/benchmark/benchmarker.py
  2. 8 31
      toolset/benchmark/framework_test.py
  3. 18 0
      toolset/benchmark/utils.py

+ 23 - 87
toolset/benchmark/benchmarker.py

@@ -2,12 +2,13 @@ from setup.linux.installer import Installer
 from setup.linux import setup_util
 
 from benchmark import framework_test
+from utils import Header
 
 import os
 import json
 import subprocess
+import traceback
 import time
-import textwrap
 import pprint
 import csv
 import sys
@@ -104,11 +105,7 @@ class Benchmarker:
     ##########################
     # Setup client/server
     ##########################
-    print textwrap.dedent("""
-      =====================================================
-        Preparing Server, Database, and Client ...
-      =====================================================
-      """)
+    print Header("Preparing Server, Database, and Client ...", top='=', bottom='=')
     self.__setup_server()
     self.__setup_database()
     self.__setup_client()
@@ -120,22 +117,14 @@ class Benchmarker:
     ##########################
     # Run tests
     ##########################
-    print textwrap.dedent("""
-      =====================================================
-        Running Tests ...
-      =====================================================
-      """)
+    print Header("Running Tests...", top='=', bottom='=')
     result = self.__run_tests(all_tests)
 
     ##########################
     # Parse results
     ##########################  
     if self.mode == "benchmark":
-      print textwrap.dedent("""
-      =====================================================
-        Parsing Results ...
-      =====================================================
-      """)
+      print Header("Parsing Results ...", top='=', bottom='=')
       self.__parse_results(all_tests)
 
     self.__finish()
@@ -528,14 +517,10 @@ class Benchmarker:
       # These features do not work on Windows
       for test in tests:
         if __name__ == 'benchmark.benchmarker':
-          print textwrap.dedent("""
-            -----------------------------------------------------
-              Running Test: {name} ...
-            -----------------------------------------------------
-            """.format(name=test.name))
+          print Header("Running Test: %s" % test.name)
           with open('current_benchmark.txt', 'w') as benchmark_resume_file:
             benchmark_resume_file.write(test.name)
-          test_process = Process(target=self.__run_test, args=(test,))
+          test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
           test_process.start()
           test_process.join(self.run_test_timeout_seconds)
           self.__load_results()  # Load intermediate result from child process
@@ -580,6 +565,7 @@ class Benchmarker:
       os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
     except:
       pass
+<<<<<<< HEAD
     with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
          open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
       if hasattr(test, 'skip'):
@@ -611,24 +597,15 @@ class Benchmarker:
       if self.results['frameworks'] != None and test.name in self.results['completed']:
         out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
         return exit_with_code(1)
-
       out.flush()
 
-      out.write( textwrap.dedent("""
-      =====================================================
-        Beginning {name}
-      -----------------------------------------------------
-      """.format(name=test.name)) )
+      out.write(Header("Beginning %s" % test.name, top='='))
       out.flush()
 
       ##########################
       # Start this test
       ##########################  
-      out.write( textwrap.dedent("""
-      -----------------------------------------------------
-        Starting {name}
-      -----------------------------------------------------
-      """.format(name=test.name)) )
+      out.write(Header("Starting %s" % test.name))
       out.flush()
       try:
         if test.requires_database():
@@ -643,11 +620,7 @@ class Benchmarker:
 
         if self.__is_port_bound(test.port):
           self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
-          err.write( textwrap.dedent("""
-            ---------------------------------------------------------
-              Error: Port {port} is not available before start {name}
-            ---------------------------------------------------------
-            """.format(name=test.name, port=str(test.port))) )
+          err.write(Header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
           err.flush()
           return exit_with_code(1)
 
@@ -656,11 +629,7 @@ class Benchmarker:
           test.stop(out, err)
           time.sleep(5)
           err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
-          err.write( textwrap.dedent("""
-            -----------------------------------------------------
-              Stopped {name}
-            -----------------------------------------------------
-            """.format(name=test.name)) )
+          err.write(Header("Stopped %s" % test.name))
           err.flush()
           self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
           return exit_with_code(1)
@@ -678,11 +647,7 @@ class Benchmarker:
         # Benchmark this test
         ##########################
         if self.mode == "benchmark":
-          out.write( textwrap.dedent("""
-            -----------------------------------------------------
-              Benchmarking {name} ...
-            -----------------------------------------------------
-            """.format(name=test.name)) )
+          out.write(Header("Benchmarking %s" % test.name))
           out.flush()
           test.benchmark(out, err)
           out.flush()
@@ -691,11 +656,7 @@ class Benchmarker:
         ##########################
         # Stop this test
         ##########################
-        out.write( textwrap.dedent("""
-        -----------------------------------------------------
-          Stopping {name}
-        -----------------------------------------------------
-        """.format(name=test.name)) )
+        out.write(Header("Stopping %s" % test.name))
         out.flush()
         test.stop(out, err)
         out.flush()
@@ -704,19 +665,11 @@ class Benchmarker:
 
         if self.__is_port_bound(test.port):
           self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
-          err.write( textwrap.dedent("""
-            -----------------------------------------------------
-              Error: Port {port} was not released by stop {name}
-            -----------------------------------------------------
-            """.format(name=test.name, port=str(test.port))) )
+          err.write(Header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
           err.flush()
           return exit_with_code(1)
 
-        out.write( textwrap.dedent("""
-        -----------------------------------------------------
-          Stopped {name}
-        -----------------------------------------------------
-        """.format(name=test.name)) )
+        out.write(Header("Stopped %s" % test.name))
         out.flush()
         time.sleep(5)
 
@@ -724,11 +677,8 @@ class Benchmarker:
         # Save results thus far into toolset/benchmark/latest.json
         ##########################################################
 
-        out.write( textwrap.dedent("""
-        ----------------------------------------------------
-        Saving results through {name}
-        ----------------------------------------------------
-        """.format(name=test.name)) )
+<<<<<<< HEAD
+        out.write(Header("Saving results through %s" % test.name))
         out.flush()
         self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
 
@@ -737,25 +687,15 @@ class Benchmarker:
           return exit_with_code(1)
       except (OSError, IOError, subprocess.CalledProcessError) as e:
         self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
-        err.write( textwrap.dedent("""
-        -----------------------------------------------------
-          Subprocess Error {name}
-        -----------------------------------------------------
-        {err}
-        {trace}
-        """.format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
+        err.write((Header("Subprocess Error %s" % test.name))
+        traceback.print_exc(file=err)
         err.flush()
         try:
           test.stop(out, err)
         except (subprocess.CalledProcessError) as e:
           self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
-          err.write( textwrap.dedent("""
-          -----------------------------------------------------
-            Subprocess Error: Test .stop() raised exception {name}
-          -----------------------------------------------------
-          {err}
-          {trace}
-          """.format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
+          err.write(Header("Subprocess Error: Test .stop() raised exception %s" % test.name))
+          traceback.print_exc(file=err)
           err.flush()
         out.close()
         err.close()
@@ -764,11 +704,7 @@ class Benchmarker:
       # Parent process should catch it and cleanup/exit
       except (KeyboardInterrupt) as e:
         test.stop(out, err)
-        out.write( """
-        -----------------------------------------------------
-          Cleaning up....
-        -----------------------------------------------------
-        """)
+        out.write(Header("Cleaning up..."))
         out.flush()
         self.__finish()
         sys.exit(1)

+ 8 - 31
toolset/benchmark/framework_test.py

@@ -10,12 +10,13 @@ import pprint
 import sys
 import traceback
 import json
-import textwrap
 import logging
 import csv
 import shlex
 import math
 
+from utils import Header
+
 class FrameworkTest:
   ##########################################################################################
   # Class variables
@@ -489,11 +490,7 @@ class FrameworkTest:
 
     # JSON
     if self.runTests[self.JSON]:
-      out.write(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING JSON ({url})
-        -----------------------------------------------------
-        """.format(url = self.json_url)))
+      out.write(Header("VERIFYING JSON (%s)" % self.json_url))
       out.flush()
 
       url = self.benchmarker.generate_url(self.json_url, self.port)
@@ -511,11 +508,7 @@ class FrameworkTest:
 
     # DB
     if self.runTests[self.DB]:
-      out.write(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING DB ({url})
-        -----------------------------------------------------
-        """.format(url = self.db_url)))
+      out.write(Header("VERIFYING DB (%s)" % self.db_url))
       out.flush()
 
       url = self.benchmarker.generate_url(self.db_url, self.port)
@@ -544,11 +537,7 @@ class FrameworkTest:
 
     # Query
     if self.runTests[self.QUERY]:
-      out.write(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING QUERY ({url})
-        -----------------------------------------------------
-        """.format(url=self.query_url+"2")))
+      out.write(Header("VERIFYING QUERY (%s)" % self.query_url+"2"))
       out.flush()
 
       url = self.benchmarker.generate_url(self.query_url + "2", self.port)
@@ -610,11 +599,7 @@ class FrameworkTest:
 
     # Fortune
     if self.runTests[self.FORTUNE]:
-      out.write(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING FORTUNE ({url})
-        -----------------------------------------------------
-        """.format(url = self.fortune_url)))
+      out.write(Header("VERIFYING FORTUNE (%s)" % self.fortune_url))
       out.flush()
 
       url = self.benchmarker.generate_url(self.fortune_url, self.port)
@@ -631,11 +616,7 @@ class FrameworkTest:
 
     # Update
     if self.runTests[self.UPDATE]:
-      out.write(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING UPDATE ({url})
-        -----------------------------------------------------
-        """.format(url = self.update_url)))
+      out.write(Header("VERIFYING UPDATE (%s)" % self.update_url))
       out.flush()
 
       url = self.benchmarker.generate_url(self.update_url + "2", self.port)
@@ -653,11 +634,7 @@ class FrameworkTest:
 
     # plaintext
     if self.runTests[self.PLAINTEXT]:
-      out.write(textwrap.dedent("""
-        -----------------------------------------------------
-          VERIFYING PLAINTEXT ({url})
-        -----------------------------------------------------
-        """.format(url = self.plaintext_url)))
+      out.write(Header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
       out.flush()
 
       url = self.benchmarker.generate_url(self.plaintext_url, self.port)

+ 18 - 0
toolset/benchmark/utils.py

@@ -0,0 +1,18 @@
+
+class Header():
+  '''
+  Generates a clean header
+  '''
+
+  def __init__(self, message, top='-', bottom='-'):
+    self.message = message
+    self.top = top
+    self.bottom = bottom
+
+  def __str__(self):
+    topheader = self.top * 80
+    topheader = topheader[:80]
+    bottomheader = self.bottom * 80
+    bottomheader = bottomheader[:80]
+    return "%s\n  %s\n%s" % (topheader, self.message, bottomheader)
+