Browse Source

General Toolset Improvements (WIP) (#3210)

* remove unused test directory

* move toolset print statements to functions

* include changes from #2536

* During verify updates, check for `world` or `World` table updates (postgres)

* make sure postgres is checking both world tables
Nate 7 years ago
parent
commit
31cee04403

+ 15 - 15
toolset/benchmark/benchmarker.py

@@ -50,7 +50,7 @@ class Benchmarker:
         all_tests = self.__gather_tests
         all_tests = self.__gather_tests
 
 
         for test in all_tests:
         for test in all_tests:
-            print test.name
+            print(test.name)
 
 
         self.__finish()
         self.__finish()
     ############################################################
     ############################################################
@@ -124,7 +124,7 @@ class Benchmarker:
         ##########################
         ##########################
         # Setup client/server
         # Setup client/server
         ##########################
         ##########################
-        print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
+        print(header("Preparing Server, Database, and Client ...", top='=', bottom='='))
         with self.quiet_out.enable():
         with self.quiet_out.enable():
             self.__setup_server()
             self.__setup_server()
             self.__setup_database()
             self.__setup_database()
@@ -137,14 +137,14 @@ class Benchmarker:
         ##########################
         ##########################
         # Run tests
         # Run tests
         ##########################
         ##########################
-        print header("Running Tests...", top='=', bottom='=')
+        print(header("Running Tests...", top='=', bottom='='))
         result = self.__run_tests(all_tests)
         result = self.__run_tests(all_tests)
 
 
         ##########################
         ##########################
         # Parse results
         # Parse results
         ##########################
         ##########################
         if self.mode == "benchmark":
         if self.mode == "benchmark":
-            print header("Parsing Results ...", top='=', bottom='=')
+            print(header("Parsing Results ...", top='=', bottom='='))
             self.__parse_results(all_tests)
             self.__parse_results(all_tests)
 
 
 
 
@@ -471,7 +471,7 @@ class Benchmarker:
                 pbar.update(pbar_test)
                 pbar.update(pbar_test)
                 pbar_test = pbar_test + 1
                 pbar_test = pbar_test + 1
                 if __name__ == 'benchmark.benchmarker':
                 if __name__ == 'benchmark.benchmarker':
-                    print header("Running Test: %s" % test.name)
+                    print(header("Running Test: %s" % test.name))
                     with open(self.current_benchmark, 'w') as benchmark_resume_file:
                     with open(self.current_benchmark, 'w') as benchmark_resume_file:
                         benchmark_resume_file.write(test.name)
                         benchmark_resume_file.write(test.name)
                     with self.quiet_out.enable():
                     with self.quiet_out.enable():
@@ -540,7 +540,7 @@ class Benchmarker:
             out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
             out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
             if self.results['frameworks'] != None and test.name in self.results['completed']:
             if self.results['frameworks'] != None and test.name in self.results['completed']:
                 out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
                 out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
-                print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
+                print('WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name)))
                 return exit_with_code(1)
                 return exit_with_code(1)
             out.flush()
             out.flush()
 
 
@@ -563,7 +563,7 @@ class Benchmarker:
                     self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
                     self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
                     out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
                     out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
                     out.flush()
                     out.flush()
-                    print "Error: Unable to recover port, cannot start test"
+                    print("Error: Unable to recover port, cannot start test")
                     return exit_with_code(1)
                     return exit_with_code(1)
 
 
                 result, process = test.start(out)
                 result, process = test.start(out)
@@ -636,7 +636,7 @@ class Benchmarker:
                 self.__upload_results()
                 self.__upload_results()
 
 
                 if self.mode == "verify" and not passed_verify:
                 if self.mode == "verify" and not passed_verify:
-                    print "Failed verify!"
+                    print("Failed verify!")
                     return exit_with_code(1)
                     return exit_with_code(1)
             except KeyboardInterrupt:
             except KeyboardInterrupt:
                 self.__stop_test(test, out)
                 self.__stop_test(test, out)
@@ -880,9 +880,9 @@ class Benchmarker:
             # or stream flush, so we have to ensure that the color code is printed repeatedly
             # or stream flush, so we have to ensure that the color code is printed repeatedly
             prefix = Fore.CYAN
             prefix = Fore.CYAN
             for line in header("Verification Summary", top='=', bottom='').split('\n'):
             for line in header("Verification Summary", top='=', bottom='').split('\n'):
-                print prefix + line
+                print(prefix + line)
             for test in tests:
             for test in tests:
-                print prefix + "| Test: %s" % test.name
+                print(prefix + "| Test: {!s}".format(test.name))
                 if test.name in self.results['verify'].keys():
                 if test.name in self.results['verify'].keys():
                     for test_type, result in self.results['verify'][test.name].iteritems():
                     for test_type, result in self.results['verify'][test.name].iteritems():
                         if result.upper() == "PASS":
                         if result.upper() == "PASS":
@@ -891,13 +891,13 @@ class Benchmarker:
                             color = Fore.YELLOW
                             color = Fore.YELLOW
                         else:
                         else:
                             color = Fore.RED
                             color = Fore.RED
-                        print prefix + "|       " + test_type.ljust(13) + ' : ' + color + result.upper()
+                        print(prefix + "|       " + test_type.ljust(13) + ' : ' + color + result.upper())
                 else:
                 else:
-                    print prefix + "|      " + Fore.RED + "NO RESULTS (Did framework launch?)"
-            print prefix + header('', top='', bottom='=') + Style.RESET_ALL
+                    print(prefix + "|      " + Fore.RED + "NO RESULTS (Did framework launch?)")
+            print(prefix + header('', top='', bottom='=') + Style.RESET_ALL)
 
 
-        print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
-        print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
+        print("Time to complete: " + str(int(time.time() - self.start_time)) + " seconds")
+        print("Results are saved in " + os.path.join(self.result_directory, self.timestamp))
 
 
     ############################################################
     ############################################################
     # End __finish
     # End __finish

+ 7 - 7
toolset/benchmark/framework_test.py

@@ -198,7 +198,7 @@ class FrameworkTest:
       # TODO: hax; should dynamically know where this file is
       # TODO: hax; should dynamically know where this file is
       with open (self.fwroot + "/toolset/setup/linux/client.sh", "r") as myfile:
       with open (self.fwroot + "/toolset/setup/linux/client.sh", "r") as myfile:
         remote_script=myfile.read()
         remote_script=myfile.read()
-        print("\nINSTALL: %s" % self.benchmarker.client_ssh_string)
+        print("\nINSTALL: {!s}".format(self.benchmarker.client_ssh_string))
         p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" ") + ["bash"], stdin=subprocess.PIPE)
         p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" ") + ["bash"], stdin=subprocess.PIPE)
         p.communicate(remote_script)
         p.communicate(remote_script)
         returncode = p.returncode
         returncode = p.returncode
@@ -423,14 +423,14 @@ class FrameworkTest:
             color = Fore.RED
             color = Fore.RED
 
 
           verification.write(("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
           verification.write(("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
-          print ("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)
+          print("   {!s}{!s}{!s} for {!s}\n".format(color, result.upper(), Style.RESET_ALL, url))
           if reason is not None and len(reason) != 0:
           if reason is not None and len(reason) != 0:
             for line in reason.splitlines():
             for line in reason.splitlines():
               verification.write("     " + line + '\n')
               verification.write("     " + line + '\n')
-              print "     " + line
+              print("     " + line)
             if not test.passed:
             if not test.passed:
               verification.write("     See %s\n" % specific_rules_url)
               verification.write("     See %s\n" % specific_rules_url)
-              print "     See %s\n" % specific_rules_url
+              print("     See {!s}\n".format(specific_rules_url))
 
 
         [output_result(r1,r2,url) for (r1, r2, url) in results]
         [output_result(r1,r2,url) for (r1, r2, url) in results]
 
 
@@ -503,7 +503,7 @@ class FrameworkTest:
           self.__end_logging()
           self.__end_logging()
 
 
         results = self.__parse_test(test_type)
         results = self.__parse_test(test_type)
-        print "Benchmark results:"
+        print("Benchmark results:")
         pprint(results)
         pprint(results)
 
 
         self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
         self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
@@ -755,8 +755,8 @@ class FrameworkTest:
     try:
     try:
       x = getattr(self.benchmarker, name)
       x = getattr(self.benchmarker, name)
     except AttributeError:
     except AttributeError:
-      print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
-      print "This is probably a bug"
+      print("AttributeError: {!s} not a member of FrameworkTest or Benchmarker".format(name))
+      print("This is probably a bug")
       raise
       raise
     return x
     return x
 
 

+ 71 - 2
toolset/benchmark/test_types/framework_test_type.py

@@ -1,8 +1,13 @@
 import copy
 import copy
 import sys
 import sys
+import os
+import json
 import subprocess
 import subprocess
 from subprocess import PIPE
 from subprocess import PIPE
 import requests
 import requests
+import MySQLdb
+import psycopg2
+import pymongo
 
 
 # Requests is built ontop of urllib3,
 # Requests is built ontop of urllib3,
 # here we prevent general request logging
 # here we prevent general request logging
@@ -78,7 +83,7 @@ class FrameworkTestType:
         Downloads a URL and returns the HTTP response headers
         Downloads a URL and returns the HTTP response headers
         and body content as a tuple
         and body content as a tuple
         '''
         '''
-        print "Accessing URL %s:" % url
+        print("Accessing URL {!s}:".format(url))
         self.out.write("Accessing URL %s \n" % url)
         self.out.write("Accessing URL %s \n" % url)
 
 
         headers = {'Accept': self.accept_header}
         headers = {'Accept': self.accept_header}
@@ -89,7 +94,7 @@ class FrameworkTestType:
         self.out.write(str(headers))
         self.out.write(str(headers))
         self.out.write(body)
         self.out.write(body)
         b = 40
         b = 40
-        print "  Response (trimmed to %d bytes): \"%s\"" % (b, body.strip()[:b])
+        print("  Response (trimmed to {:d} bytes): \"{!s}\"".format(b, body.strip()[:b]))
         return headers, body
         return headers, body
 
 
     def verify(self, base_url):
     def verify(self, base_url):
@@ -125,3 +130,67 @@ class FrameworkTestType:
         Use before calling parse
         Use before calling parse
         '''
         '''
         return copy.copy(self)
         return copy.copy(self)
+
+    def get_current_world_table(self):
+        '''
+        Return a JSON object containing all 10,000 World items as they currently
+        exist in the database. This is used for verifying that entries in the
+        database have actually changed during an Update verification test.
+        '''
+        database_name = ""
+        results_json = []
+        try:
+            database_name = self.database.lower()
+        except AttributeError:
+            pass
+
+        if database_name == "mysql":
+            try:
+                db = MySQLdb.connect(os.environ.get("DBHOST"), "benchmarkdbuser", "benchmarkdbpass", "hello_world")
+                cursor = db.cursor()
+                cursor.execute("SELECT * FROM World")
+                results = cursor.fetchall()
+                results_json.append(json.loads(json.dumps(dict(results))))
+                db.close()
+            except Exception as e:
+                print("ERROR: Unable to load current MySQL World table.")
+                print(e)
+        elif database_name == "postgres":
+            try:
+                db = psycopg2.connect(host=os.environ.get("DBHOST"),
+                                      port="5432",
+                                      user="benchmarkdbuser",
+                                      password="benchmarkdbpass",
+                                      database="hello_world")
+                cursor = db.cursor()
+                cursor.execute("SELECT * FROM \"World\"")
+                results = cursor.fetchall()
+                results_json.append(json.loads(json.dumps(dict(results))))
+                cursor = db.cursor()
+                cursor.execute("SELECT * FROM \"world\"")
+                results = cursor.fetchall()
+                results_json.append(json.loads(json.dumps(dict(results))))
+                db.close()
+            except Exception as e:
+                print("ERROR: Unable to load current Postgres World table.")
+                print(e)
+        elif database_name == "mongodb":
+            try:
+                worlds_json = {}
+                connection = pymongo.MongoClient(host=os.environ.get("DBHOST"))
+                db = connection.hello_world
+                for world in db.world.find():
+                    if "randomNumber" in world:
+                        if "id" in world:
+                            worlds_json[str(int(world["id"]))] = int(world["randomNumber"])
+                        elif "_id" in world:
+                            worlds_json[str(int(world["_id"]))] = int(world["randomNumber"])
+                results_json.append(worlds_json)
+                connection.close()
+            except Exception as e:
+                print("ERROR: Unable to load current MongoDB World table.")
+                print(e)
+        else:
+            raise ValueError("Database: {!s} does not exist".format(database_name))
+
+        return results_json

+ 2 - 2
toolset/benchmark/test_types/update_type.py

@@ -9,7 +9,7 @@ class UpdateTestType(FrameworkTestType):
             'name': 'update',
             'name': 'update',
             'accept_header': self.accept('json'),
             'accept_header': self.accept('json'),
             'requires_db': True,
             'requires_db': True,
-            'args': ['update_url']
+            'args': ['update_url', 'database']
         }
         }
         FrameworkTestType.__init__(self, **kwargs)
         FrameworkTestType.__init__(self, **kwargs)
 
 
@@ -32,7 +32,7 @@ class UpdateTestType(FrameworkTestType):
             ('501', 'warn'),
             ('501', 'warn'),
             ('',    'fail')
             ('',    'fail')
         ]
         ]
-        problems = verify_query_cases(self, cases, url)
+        problems = verify_query_cases(self, cases, url, True)
 
 
         if len(problems) == 0:
         if len(problems) == 0:
             return [('pass', '', url + case) for (case, _) in cases]
             return [('pass', '', url + case) for (case, _) in cases]

+ 63 - 9
toolset/benchmark/test_types/verifications.py

@@ -1,5 +1,6 @@
 import json
 import json
 import re
 import re
+import math
 
 
 
 
 def basic_body_verification(body, url, is_json_check=True):
 def basic_body_verification(body, url, is_json_check=True):
@@ -10,7 +11,6 @@ def basic_body_verification(body, url, is_json_check=True):
     problems encountered, always as a list. If len(problems) > 0,
     problems encountered, always as a list. If len(problems) > 0,
     then the response body does not have to be examined further and the caller
     then the response body does not have to be examined further and the caller
     should handle the failing problem(s).
     should handle the failing problem(s).
-
     Plaintext and Fortunes set `is_json_check` to False
     Plaintext and Fortunes set `is_json_check` to False
     '''
     '''
 
 
@@ -134,8 +134,8 @@ def verify_helloworld_object(json_object, url):
 
 
 def verify_randomnumber_object(db_object, url, max_infraction='fail'):
 def verify_randomnumber_object(db_object, url, max_infraction='fail'):
     '''
     '''
-    Ensures that `db_object` is a JSON object with 
-    keys 'id' and 'randomNumber' that both map to ints. 
+    Ensures that `db_object` is a JSON object with
+    keys 'id' and 'randomNumber' that both map to ints.
     Should closely resemble:
     Should closely resemble:
     { "id": 2354, "randomNumber": 8952 }
     { "id": 2354, "randomNumber": 8952 }
     '''
     '''
@@ -201,11 +201,11 @@ def verify_randomnumber_object(db_object, url, max_infraction='fail'):
 
 
 def verify_randomnumber_list(expected_len, headers, body, url, max_infraction='fail'):
 def verify_randomnumber_list(expected_len, headers, body, url, max_infraction='fail'):
     '''
     '''
-    Validates that the object is a list containing a number of 
+    Validates that the object is a list containing a number of
     randomnumber object. Should closely resemble:
     randomnumber object. Should closely resemble:
     [{ "id": 2354, "randomNumber": 8952 }, { "id": 4421, "randomNumber": 32 }, ... ]
     [{ "id": 2354, "randomNumber": 8952 }, { "id": 4421, "randomNumber": 32 }, ... ]
     '''
     '''
-    
+
     response, problems = basic_body_verification(body, url)
     response, problems = basic_body_verification(body, url)
 
 
     if len(problems) > 0:
     if len(problems) > 0:
@@ -249,16 +249,58 @@ def verify_randomnumber_list(expected_len, headers, body, url, max_infraction='f
 
 
     return problems
     return problems
 
 
+def verify_updates(old_worlds, new_worlds, updates_expected, url):
+    '''
+    Validates that the /updates requests actually updated values in the database and didn't
+    just return a JSON list of the correct number of World items.
+sz
+    old_worlds  a JSON object containing the state of the Worlds table BEFORE the /updates requests
+    new_worlds  a JSON object containing the state of the Worlds table AFTER the /updates requests
+    If no items were updated, this validation test returns a "fail."
+
+    If only some items were updated (within a 5% margin of error), this test returns a "warn".
+    This is to account for the unlikely, but possible situation where an entry in the World
+    table is updated to the same value it was previously set as.
+    '''
+    successful_updates = 0
+    problems = []
+
+    n = 0
+    while n < len(old_worlds) and successful_updates == 0:
+        print(old_worlds[n]['1'])
+        for i in range(1, 10001):
+            try:
+                entry_id = str(i)
+                if entry_id in old_worlds[n] and entry_id  in new_worlds[n]:
+                    if old_worlds[n][entry_id] != new_worlds[n][entry_id]:
+                        successful_updates += 1
+            except Exception as e:
+                print e
+        n += 1
+
+    if successful_updates == 0:
+        problems.append(
+            ("fail", "No items were updated in the database.", url))
+    elif successful_updates <= (updates_expected * 0.90):
+        problems.append(
+            ("fail", "Only %s items were updated in the database out of roughly %s expected." % (successful_updates, updates_expected), url))
+    elif successful_updates <= (updates_expected * 0.95):
+        problems.append(
+            ("warn",
+             "There may have been an error updating the database. Only %s items were updated in the database out of the roughly %s expected." % (
+                 successful_updates, updates_expected),
+             url))
+
+    return problems
 
 
-def verify_query_cases(self, cases, url):
+def verify_query_cases(self, cases, url, check_updates=False):
     '''
     '''
-    The the /updates and /queries tests accept a `queries` parameter
+    The /updates and /queries tests accept a `queries` parameter
     that is expected to be between 1-500.
     that is expected to be between 1-500.
     This method execises a framework with different `queries` parameter values
     This method execises a framework with different `queries` parameter values
     then verifies that the framework responds appropriately.
     then verifies that the framework responds appropriately.
     The `cases` parameter should be a list of 2-tuples containing the query case
     The `cases` parameter should be a list of 2-tuples containing the query case
     and the consequence level should the cases fail its verifications, e.g.:
     and the consequence level should the cases fail its verifications, e.g.:
-
     cases = [
     cases = [
         ('2',   'fail'),
         ('2',   'fail'),
         ('0',   'fail'),
         ('0',   'fail'),
@@ -266,7 +308,6 @@ def verify_query_cases(self, cases, url):
         ('501', 'warn'),
         ('501', 'warn'),
         ('',    'fail')
         ('',    'fail')
     ]
     ]
-
     The reason for using 'warn' is generally for a case that will be allowed in the
     The reason for using 'warn' is generally for a case that will be allowed in the
     current run but that may/will be a failing case in future rounds. The cases above
     current run but that may/will be a failing case in future rounds. The cases above
     suggest that not sanitizing the `queries` parameter against non-int input, or failing
     suggest that not sanitizing the `queries` parameter against non-int input, or failing
@@ -277,6 +318,11 @@ def verify_query_cases(self, cases, url):
     MAX = 500
     MAX = 500
     MIN = 1
     MIN = 1
 
 
+    # Only load in the World table if we are doing an Update verification
+    world_db_before = {}
+    if check_updates:
+        world_db_before = self.get_current_world_table()
+
     for q, max_infraction in cases:
     for q, max_infraction in cases:
         case_url = url + q
         case_url = url + q
         headers, body = self.request_headers_and_body(case_url)
         headers, body = self.request_headers_and_body(case_url)
@@ -295,6 +341,14 @@ def verify_query_cases(self, cases, url):
                 expected_len, headers, body, case_url, max_infraction)
                 expected_len, headers, body, case_url, max_infraction)
             problems += verify_headers(headers, case_url)
             problems += verify_headers(headers, case_url)
 
 
+            # Only check update changes if we are doing an Update verification and if we're testing
+            # the highest number of queries, to ensure that we don't accidentally FAIL for a query
+            # that only updates 1 item and happens to set its randomNumber to the same value it
+            # previously held
+            if check_updates and queries >= MAX:
+                world_db_after = self.get_current_world_table()
+                problems += verify_updates(world_db_before, world_db_after, MAX, case_url)
+
         except ValueError:
         except ValueError:
             warning = (
             warning = (
                 '%s given for stringy `queries` parameter %s\n'
                 '%s given for stringy `queries` parameter %s\n'

+ 1 - 1
toolset/benchmark/utils.py

@@ -99,7 +99,7 @@ def gather_tests(include = [], exclude=[], benchmarker=None):
                 config = json.load(config_file)
                 config = json.load(config_file)
             except ValueError:
             except ValueError:
                 # User-friendly errors
                 # User-friendly errors
-                print("Error loading '%s'." % config_file_name)
+                print("Error loading '{!s}'.".format(config_file_name))
                 raise
                 raise
 
 
         # Find all tests in the config file
         # Find all tests in the config file

+ 1 - 2
toolset/setup/linux/prerequisites.sh

@@ -33,8 +33,7 @@ sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options:
 sudo pip install colorama==0.3.1
 sudo pip install colorama==0.3.1
 # Version 2.3 has a nice Counter() and other features
 # Version 2.3 has a nice Counter() and other features
 # but it requires --allow-external and --allow-unverified
 # but it requires --allow-external and --allow-unverified
-sudo pip install progressbar==2.2
-sudo pip install requests
+sudo pip install progressbar==2.2 requests MySQL-python psycopg2 pymongo
 
 
 # Get the ulimit from the benchmark config
 # Get the ulimit from the benchmark config
 if [ -f benchmark.cfg ]; then
 if [ -f benchmark.cfg ]; then

+ 0 - 35
toolset/test/test-run-tests.py

@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import ConfigParser
-import sys
-import os
-import multiprocessing
-import itertools
-import copy
-import subprocess
-from pprint import pprint 
-from benchmark.benchmarker import Benchmarker
-from setup.linux.unbuffered import Unbuffered
-from setup.linux import setup_util
-
-from run-tests import StoreSeqAction
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--foo', action=StoreSeqAction)
-tests = ["1", "1,", "0.23",                       # Single numbers
-        "1,5,7", "1,2,-3", "1,1000,12,1,1,1,1,1", # Lists
-        "1:2:10", "1:2", "10:-2:0",               # Sequences
-        "1,2:1:5"                                 # Complex
-]
-for test in tests:
-  try:
-    t = "--foo %s" % test
-    print "Testing %s" % test
-    print "  %s" % parser.parse_args(t.split())
-    print "  Success"
-  except Exception as e: 
-    print "  Exception! %s" % e
-    continue
-
-# vim: sw=2