|
@@ -326,7 +326,7 @@ def verify_updates(old_worlds, new_worlds, updates_expected, url):
|
|
|
return problems
|
|
|
|
|
|
|
|
|
-def verify_query_cases(self, cases, url, check_updates=False):
|
|
|
+def verify_query_cases(test_instance, cases, url, check_updates=False):
|
|
|
'''
|
|
|
The /updates and /queries tests accept a `queries` parameter
|
|
|
that is expected to be between 1-500.
|
|
@@ -352,19 +352,19 @@ def verify_query_cases(self, cases, url, check_updates=False):
|
|
|
MIN = 1
|
|
|
# Initialization for query counting
|
|
|
repetitions = 1
|
|
|
- concurrency = max(self.config.concurrency_levels)
|
|
|
+ concurrency = max(test_instance.config.concurrency_levels)
|
|
|
expected_queries = 20 * repetitions * concurrency
|
|
|
expected_rows = expected_queries
|
|
|
|
|
|
# Only load in the World table if we are doing an Update verification
|
|
|
world_db_before = {}
|
|
|
if check_updates:
|
|
|
- world_db_before = databases[self.database.lower()].get_current_world_table(self.config)
|
|
|
+ world_db_before = databases[test_instance.database.lower()].get_current_world_table(test_instance.config)
|
|
|
expected_queries = expected_queries + concurrency * repetitions # eventually bulk updates!
|
|
|
|
|
|
for q, max_infraction in cases:
|
|
|
case_url = url + q
|
|
|
- headers, body, status = self.request_headers_and_body_and_status(case_url)
|
|
|
+ headers, body, status = test_instance.request_headers_and_body_and_status(case_url)
|
|
|
|
|
|
try:
|
|
|
queries = int(q) # drops down for 'foo' and ''
|
|
@@ -378,14 +378,14 @@ def verify_query_cases(self, cases, url, check_updates=False):
|
|
|
|
|
|
problems += verify_randomnumber_list(expected_len, headers, body,
|
|
|
case_url, max_infraction)
|
|
|
- problems += verify_headers(self.request_headers_and_body_and_status, headers, case_url)
|
|
|
+ problems += verify_headers(test_instance.request_headers_and_body_and_status, headers, case_url)
|
|
|
|
|
|
# Only check update changes if we are doing an Update verification and if we're testing
|
|
|
# the highest number of queries, to ensure that we don't accidentally FAIL for a query
|
|
|
# that only updates 1 item and happens to set its randomNumber to the same value it
|
|
|
# previously held
|
|
|
if check_updates and queries >= MAX:
|
|
|
- world_db_after = databases[self.database.lower()].get_current_world_table(self.config)
|
|
|
+ world_db_after = databases[test_instance.database.lower()].get_current_world_table(test_instance.config)
|
|
|
problems += verify_updates(world_db_before, world_db_after,
|
|
|
MAX, case_url)
|
|
|
|
|
@@ -408,16 +408,16 @@ def verify_query_cases(self, cases, url, check_updates=False):
|
|
|
# parameter input
|
|
|
problems += verify_randomnumber_list(
|
|
|
expected_len, headers, body, case_url, max_infraction)
|
|
|
- problems += verify_headers(self.request_headers_and_body_and_status, headers, case_url)
|
|
|
+ problems += verify_headers(test_instance.request_headers_and_body_and_status, headers, case_url)
|
|
|
|
|
|
- if hasattr(self, 'database'):
|
|
|
+ if hasattr(test_instance, 'database'):
|
|
|
# verify the number of queries and rows read for 20 queries, with a concurrency level of 512, with 2 repetitions
|
|
|
- problems += verify_queries_count(self, "world", url + "20", concurrency, repetitions, expected_queries,
|
|
|
+ problems += verify_queries_count(test_instance, "world", url + "20", concurrency, repetitions, expected_queries,
|
|
|
expected_rows, check_updates)
|
|
|
return problems
|
|
|
|
|
|
|
|
|
-def verify_queries_count(self, tbl_name, url, concurrency=512, count=2, expected_queries=1024, expected_rows=1024,
|
|
|
+def verify_queries_count(test_instance, tbl_name, url, concurrency=512, count=2, expected_queries=1024, expected_rows=1024,
|
|
|
check_updates=False):
|
|
|
'''
|
|
|
Checks that the number of executed queries, at the given concurrency level,
|
|
@@ -431,20 +431,20 @@ def verify_queries_count(self, tbl_name, url, concurrency=512, count=2, expected
|
|
|
|
|
|
problems = []
|
|
|
|
|
|
- queries, rows, rows_updated, margin, trans_failures = databases[self.database.lower()].verify_queries(self.config,
|
|
|
- tbl_name, url,
|
|
|
- concurrency,
|
|
|
- count,
|
|
|
- check_updates)
|
|
|
+ queries, rows, rows_updated, margin, trans_failures = databases[test_instance.database.lower()].verify_queries(test_instance.config,
|
|
|
+ tbl_name, url,
|
|
|
+ concurrency,
|
|
|
+ count,
|
|
|
+ check_updates)
|
|
|
|
|
|
isBulk = check_updates and (queries < 1.001 * expected_queries) and (queries > 0.999 * expected_queries)
|
|
|
|
|
|
if check_updates and not isBulk: # Restore the normal queries number if bulk queries are not used
|
|
|
expected_queries = (expected_queries - count * concurrency) * 2
|
|
|
|
|
|
- # Add a margin based on the number of cpu cores
|
|
|
- queries_margin = 1.015 # For a run on Travis
|
|
|
- if multiprocessing.cpu_count() > 2:
|
|
|
+ # Add a margin based on whether we're running in a CI environment
|
|
|
+ queries_margin = 1.015 # For a run in CI environment
|
|
|
+ if not test_instance.config.is_ci:
|
|
|
queries_margin = 1 # real run (Citrine or Azure) -> no margin on queries
|
|
|
# Check for transactions failures (socket errors...)
|
|
|
if trans_failures > 0:
|