2
0
Эх сурвалжийг харах

Make regression test runner include "expected failure, ok" cases in its statistics. This previously caused discrepancy between the runner and the test generator.

Alexander Gessler 10 жил өмнө
parent
commit
c07cddff9e

+ 9 - 2
test/regression/run.py

@@ -65,7 +65,7 @@ import utils
 # -------------------------------------------------------------------------------
 EXPECTED_FAILURE_NOT_MET, DATABASE_LENGTH_MISMATCH, \
 DATABASE_VALUE_MISMATCH, IMPORT_FAILURE, \
-FILE_NOT_READABLE, COMPARE_SUCCESS = range(6)
+FILE_NOT_READABLE, COMPARE_SUCCESS, EXPECTED_FAILURE = range(7)
 
 messages = collections.defaultdict(lambda: "<unknown", {
         EXPECTED_FAILURE_NOT_MET:
@@ -88,7 +88,10 @@ messages = collections.defaultdict(lambda: "<unknown", {
 
         COMPARE_SUCCESS:
 """Results match archived reference dump in database\n\
-\tNumber of bytes compared: {0}"""
+\tNumber of bytes compared: {0}""",
+
+        EXPECTED_FAILURE:
+"""Expected failure was met.""",
 })
 
 outfilename_output = "run_regression_suite_output.txt"
@@ -211,6 +214,10 @@ def process_dir(d, outfile_results, zipin, result):
                 result.fail(fullpath, outfile_expect, pppreset, EXPECTED_FAILURE_NOT_MET)
                 outfile_results.write("Expected import to fail\n")
                 continue
+            elif failure and r:
+                result.ok(fullpath, pppreset, EXPECTED_FAILURE) 
+                outfile_results.write("Failed as expected, skipping.\n")
+                continue
             
             with open(outfile_expect, "wb") as s:
                 s.write(input_expected)