瀏覽代碼

Add "tags" support (#5257)

* Add tag support

* add --list-tag

* filter out broken tests
Nate 5 年之前
父節點
當前提交
beb84102dc

+ 2 - 1
frameworks/C/libreactor/benchmark_config.json

@@ -19,7 +19,8 @@
         "database_os": "Linux",
         "display_name": "libreactor",
         "notes": "",
-        "versus": "None"
+        "versus": "None",
+        "tags": ["broken"]
       }
     }
   ]

+ 2 - 1
frameworks/C/onion/benchmark_config.json

@@ -21,7 +21,8 @@
       "database_os": "Linux",
       "display_name": "onion",
       "notes": "",
-      "versus": "onion"
+      "versus": "onion",
+      "tags": ["broken"]
     }
   }]
 }

+ 17 - 1
toolset/run-tests.py

@@ -123,6 +123,11 @@ def main(argv=None):
         nargs='+',
         dest='test_lang',
         help='name of language directory containing all tests to run')
+    parser.add_argument(
+        '--tag',
+        nargs='+',
+        dest='tag',
+        help='tests to be run by tag name')
     parser.add_argument(
         '--exclude', default=None, nargs='+', help='names of tests to exclude')
     parser.add_argument(
@@ -147,7 +152,11 @@ def main(argv=None):
         action='store_true',
         default=False,
         help='lists all the known tests that can run')
-
+    parser.add_argument(
+        '--list-tag',
+        dest='list_tag',
+        default=False,
+        help='lists all the known tests with a specific tag')
     # Benchmark options
     parser.add_argument(
         '--duration',
@@ -216,6 +225,13 @@ def main(argv=None):
             for test in all_tests:
                 log(test.name)
 
+        elif config.list_tag:
+            all_tests = benchmarker.metadata.gather_tests()
+
+            for test in all_tests:
+                if hasattr(test, "tags") and config.list_tag in test.tags:
+                    log(test.name)
+
         elif config.parse:
             all_tests = benchmarker.metadata.gather_tests()
 

+ 2 - 0
toolset/utils/benchmark_config.py

@@ -38,6 +38,7 @@ class BenchmarkConfig:
         self.clean = args.clean
         self.mode = args.mode
         self.list_tests = args.list_tests
+        self.list_tag = args.list_tag
         self.max_concurrency = max(args.concurrency_levels)
         self.concurrency_levels = args.concurrency_levels
         self.cached_query_levels = args.cached_query_levels
@@ -50,6 +51,7 @@ class BenchmarkConfig:
         self.test = args.test
         self.test_dir = args.test_dir
         self.test_lang = args.test_lang
+        self.tag = args.tag
         self.network_mode = args.network_mode
         self.server_docker_host = None
         self.database_docker_host = None

+ 12 - 3
toolset/utils/metadata.py

@@ -105,10 +105,18 @@ class Metadata:
 
             # Filter
             for test in config_tests:
+                if hasattr(test, "tags"):
+                    if "broken" in test.tags:
+                        continue
+                    if self.benchmarker.config.tag:
+                        for t in self.benchmarker.config.tag:
+                            if t in test.tags and test.name not in exclude:
+                                tests.append(test)
+                                break
                 if len(include) > 0:
                     if test.name in include:
                         tests.append(test)
-                elif test.name not in exclude:
+                elif test.name not in exclude and not self.benchmarker.config.tag:
                     tests.append(test)
 
         # Ensure we were able to locate everything that was
@@ -121,7 +129,7 @@ class Metadata:
 
         tests.sort(key=lambda x: x.name)
 
-        return tests
+        return list(set(tests))
 
     def tests_to_run(self):
         '''
@@ -245,7 +253,8 @@ class Metadata:
             "database_os": test.database_os,
             "display_name": test.display_name,
             "notes": test.notes,
-            "versus": test.versus
+            "versus": test.versus,
+            "tags": hasattr(test, "tags") and test.tags or []
         }, all_tests))
 
         with open(