Переглянути джерело

Merge pull request #14208 from aws-lumberyard-dev/tiaf_multi_suite_multi_label_exclude

TIAF Python Rollout: Opt-In Phase 1
John 2 роки тому
батько
коміт
5c78d22ce4
53 змінених файлів з 754 додано та 943 видалено
  1. 1 0
      AutomatedTesting/Gem/PythonTests/WhiteBox/CMakeLists.txt
  2. 1 0
      CMakeLists.txt
  3. 2 1
      Code/Tools/TestImpactFramework/CMakeLists.txt
  4. 7 3
      Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Include/Static/TestImpactCommandLineOptions.h
  5. 3 0
      Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Include/Static/TestImpactCommandLineOptionsUtils.h
  6. 8 3
      Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Include/Static/TestImpactConsoleTestSequenceEventHandler.h
  7. 22 15
      Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Source/TestImpactCommandLineOptions.cpp
  8. 17 6
      Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Source/TestImpactCommandLineOptionsUtils.cpp
  9. 13 10
      Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Source/TestImpactConsoleTestSequenceEventHandler.cpp
  10. 3 2
      Code/Tools/TestImpactFramework/Frontend/Console/Native/Code/Source/TestImpactConsoleMain.cpp
  11. 3 2
      Code/Tools/TestImpactFramework/Frontend/Console/Python/Code/Source/TestImpactConsoleMain.cpp
  12. 35 16
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactClientSequenceReport.h
  13. 0 12
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactClientSequenceReportSerializer.h
  14. 14 6
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactRuntime.h
  15. 13 8
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactTestSequence.h
  16. 23 40
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactUtils.h
  17. 24 0
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.h
  18. 5 0
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Artifact/Static/TestImpactTestSuiteMeta.h
  19. 9 4
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Target/Common/TestImpactTestTarget.cpp
  20. 4 1
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Target/Common/TestImpactTestTarget.h
  21. 0 27
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/TestEngine/Common/TestImpactTestEngine.h
  22. 7 4
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/TestImpactRuntimeUtils.h
  23. 31 0
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.cpp
  24. 8 4
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/TestImpactClientSequenceReport.cpp
  25. 7 252
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/TestImpactClientSequenceReportSerializer.cpp
  26. 7 266
      Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/TestImpactUtils.cpp
  27. 2 0
      Code/Tools/TestImpactFramework/Runtime/Common/Code/testimpactframework_runtime_common_files.cmake
  28. 6 3
      Code/Tools/TestImpactFramework/Runtime/Native/Code/Include/TestImpactFramework/Native/TestImpactNativeRuntime.h
  29. 36 23
      Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/Artifact/Factory/TestImpactNativeTestTargetMetaMapFactory.cpp
  30. 5 2
      Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/Artifact/Factory/TestImpactNativeTestTargetMetaMapFactory.h
  31. 38 5
      Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/TestEngine/Native/TestImpactNativeTestEngine.cpp
  32. 28 16
      Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/TestImpactNativeRuntime.cpp
  33. 17 2
      Code/Tools/TestImpactFramework/Runtime/Python/Code/Include/TestImpactFramework/Python/TestImpactPythonRuntime.h
  34. 26 14
      Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/Artifact/Factory/TestImpactPythonTestTargetMetaMapFactory.cpp
  35. 6 3
      Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/Artifact/Factory/TestImpactPythonTestTargetMetaMapFactory.h
  36. 4 10
      Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/TestEngine/Python/TestImpactPythonTestEngine.cpp
  37. 0 2
      Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/TestEngine/Python/TestImpactPythonTestEngine.h
  38. 38 32
      Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/TestImpactPythonRuntime.cpp
  39. 7 2
      cmake/LYTestWrappers.cmake
  40. 8 3
      cmake/TestImpactFramework/ConsoleFrontendConfig.in
  41. 44 35
      cmake/TestImpactFramework/LYTestImpactFramework.cmake
  42. 68 0
      cmake/TestImpactFramework/TestImpactTestTargetConfig.cmake
  43. 7 16
      scripts/build/Platform/Windows/build_config.json
  44. 9 2
      scripts/build/TestImpactAnalysis/Testing/conftest.py
  45. 45 31
      scripts/build/TestImpactAnalysis/Testing/test_tiaf_unit_tests.py
  46. 2 1
      scripts/build/TestImpactAnalysis/mars_utils.py
  47. 6 6
      scripts/build/TestImpactAnalysis/persistent_storage/tiaf_persistent_storage.py
  48. 8 9
      scripts/build/TestImpactAnalysis/persistent_storage/tiaf_persistent_storage_local.py
  49. 10 10
      scripts/build/TestImpactAnalysis/persistent_storage/tiaf_persistent_storage_s3.py
  50. 38 25
      scripts/build/TestImpactAnalysis/test_impact/base_test_impact.py
  51. 2 1
      scripts/build/TestImpactAnalysis/test_impact/runtime_test_impact_args.py
  52. 25 7
      scripts/build/TestImpactAnalysis/tiaf_driver.py
  53. 2 1
      scripts/build/TestImpactAnalysis/tiaf_report_constants.py

+ 1 - 0
AutomatedTesting/Gem/PythonTests/WhiteBox/CMakeLists.txt

@@ -22,5 +22,6 @@ if(PAL_TRAIT_WHITEBOX_TESTS_SUPPORTED AND PAL_TRAIT_BUILD_TESTS_SUPPORTED AND PA
             AutomatedTesting.Assets
         COMPONENT
             WhiteBox
+        LABELS REQUIRES_tiaf
     )
 endif()

+ 1 - 0
CMakeLists.txt

@@ -46,6 +46,7 @@ include(cmake/Install.cmake)
 include(cmake/LYWrappers.cmake)
 include(cmake/Gems.cmake)
 include(cmake/UnitTest.cmake)
+include(cmake/TestImpactFramework/TestImpactTestTargetConfig.cmake) # LYTestWrappers dependency
 include(cmake/LYTestWrappers.cmake)
 include(cmake/Monolithic.cmake)
 include(cmake/SettingsRegistry.cmake)

+ 2 - 1
Code/Tools/TestImpactFramework/CMakeLists.txt

@@ -10,9 +10,10 @@ o3de_pal_dir(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME} ${O
 
 include(${pal_dir}/PAL_${PAL_PLATFORM_NAME_LOWERCASE}.cmake)
 
-if(PAL_TRAIT_TEST_IMPACT_FRAMEWORK_SUPPORTED AND LY_TEST_IMPACT_ACTIVE)
+if(PAL_TRAIT_TEST_IMPACT_FRAMEWORK_SUPPORTED)
     add_subdirectory(Runtime)
     add_subdirectory(Frontend)
+
     ly_add_pytest(
         NAME TiafToolsTest
         PATH ${LY_ROOT_FOLDER}/scripts/build/TestImpactAnalysis/Testing/test_tiaf_tools.py

+ 7 - 3
Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Include/Static/TestImpactCommandLineOptions.h

@@ -100,8 +100,11 @@ namespace TestImpact
         //! Returns the global test sequence timeout to use (if any).
         const AZStd::optional<AZStd::chrono::milliseconds>& GetGlobalTimeout() const;
 
-        //! Returns the filter for test suite that will be allowed to be run.
-        SuiteType GetSuiteFilter() const;
+        //! Returns the filter for test suites that will be allowed to be run.
+        const SuiteSet& GetSuiteSet() const;
+
+        //! Returns the test suite label exclude set that will be used to exclude any test targets with any matching suite labels.
+        const SuiteLabelExcludeSet& GetSuiteLabelExcludeSet() const;
 
         //! Returns the tests to exclude from this run of TIAF (if any).
         const AZStd::vector<ExcludedTarget>& GetExcludedTests() const;
@@ -128,7 +131,8 @@ namespace TestImpact
         Policy::TestRunner m_testRunnerPolicy = Policy::TestRunner::UseLiveTestRunner;
         AZStd::optional<AZStd::chrono::milliseconds> m_testTargetTimeout;
         AZStd::optional<AZStd::chrono::milliseconds> m_globalTimeout;
-        SuiteType m_suiteFilter;
+        SuiteSet m_suiteSet;
+        SuiteLabelExcludeSet m_suiteLabelExcludes;
         bool m_draftFailingTests = false;
         AZStd::vector<ExcludedTarget> m_excludedTests;
         bool m_safeMode = false;

+ 3 - 0
Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Include/Static/TestImpactCommandLineOptionsUtils.h

@@ -106,6 +106,9 @@ namespace TestImpact
         return ParseBinaryStateOption(optionName, BinaryStateOption<T>{ { "live", states.first }, { "null" , states.second } }, cmd);
     }
 
+    //! Attempts to parse a multi-value option.
+    AZStd::set<AZStd::string> ParseMultiValueOption(const AZStd::string& optionName, const AZ::CommandLine& cmd);
+
     //! Attempts to parse a path option value.
     AZStd::optional<RepoPath> ParsePathOption(const AZStd::string& optionName, const AZ::CommandLine& cmd);
 

+ 8 - 3
Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Include/Static/TestImpactConsoleTestSequenceEventHandler.h

@@ -22,18 +22,23 @@ namespace TestImpact
     namespace Console
     {
         //! Handler for TestSequenceStartCallback event.
-        void TestSequenceStartCallback(SuiteType suiteType, const Client::TestRunSelection& selectedTests);
+        void TestSequenceStartCallback(
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
+            const Client::TestRunSelection& selectedTests);
 
         //! Handler for TestSequenceStartCallback event.
         void ImpactAnalysisTestSequenceStartCallback(
-            SuiteType suiteType,
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
             const Client::TestRunSelection& selectedTests,
             const AZStd::vector<AZStd::string>& discardedTests,
             const AZStd::vector<AZStd::string>& draftedTests);
 
         //! Handler for SafeImpactAnalysisTestSequenceStartCallback event.
         void SafeImpactAnalysisTestSequenceStartCallback(
-            SuiteType suiteType,
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
             const Client::TestRunSelection& selectedTests,
             const Client::TestRunSelection& discardedTests,
             const AZStd::vector<AZStd::string>& draftedTests);

+ 22 - 15
Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Source/TestImpactCommandLineOptions.cpp

@@ -34,7 +34,8 @@ namespace TestImpact
             TargetOutputCaptureKey,
             TestTargetTimeoutKey,
             GlobalTimeoutKey,
-            SuiteFilterKey,
+            SuiteSetKey,
+            SuiteLabelExcludeKey,
             DraftFailingTestsKey,
             ExcludedTestsKey,
             SafeModeKey,
@@ -72,7 +73,8 @@ namespace TestImpact
             "targetout",
             "ttimeout",
             "gtimeout",
-            "suite",
+            "suites",
+            "labelexcludes",
             "draftfailingtests",
             "excluded",
             "safemode",
@@ -259,17 +261,14 @@ namespace TestImpact
             return ParseOnOffOption(OptionKeys[DraftFailingTestsKey], states, cmd).value_or(false);
         }
 
-        SuiteType ParseSuiteFilter(const AZ::CommandLine& cmd)
+        SuiteSet ParseSuiteSet(const AZ::CommandLine& cmd)
         {
-            const AZStd::vector<AZStd::pair<AZStd::string, SuiteType>> states =
-            {
-                { SuiteTypeAsString(SuiteType::Main), SuiteType::Main },
-                { SuiteTypeAsString(SuiteType::Periodic), SuiteType::Periodic },
-                { SuiteTypeAsString(SuiteType::Sandbox), SuiteType::Sandbox },
-                { SuiteTypeAsString(SuiteType::AWSI), SuiteType::AWSI }
-            };
+            return ParseMultiValueOption(OptionKeys[SuiteSetKey], cmd);
+        }
 
-            return ParseMultiStateOption(OptionKeys[SuiteFilterKey], states, cmd).value_or(SuiteType::Main);
+        SuiteLabelExcludeSet ParseSuiteLabelExcludeSet(const AZ::CommandLine& cmd)
+        {
+            return ParseMultiValueOption(OptionKeys[SuiteLabelExcludeKey], cmd);
         }
 
         AZStd::vector<ExcludedTarget> ParseExcludedTestsFile(const AZ::CommandLine& cmd)
@@ -309,7 +308,8 @@ namespace TestImpact
         m_targetOutputCapture = ParseTargetOutputCapture(cmd);
         m_globalTimeout = ParseGlobalTimeout(cmd);
         m_draftFailingTests = ParseDraftFailingTests(cmd);
-        m_suiteFilter = ParseSuiteFilter(cmd);
+        m_suiteSet = ParseSuiteSet(cmd);
+        m_suiteLabelExcludes = ParseSuiteLabelExcludeSet(cmd);
         m_excludedTests = ParseExcludedTestsFile(cmd);
         m_safeMode = ParseSafeMode(cmd);
         m_testTargetTimeout = ParseTestTargetTimeout(cmd);
@@ -421,9 +421,14 @@ namespace TestImpact
         return m_globalTimeout;
     }
 
-    SuiteType CommandLineOptions::GetSuiteFilter() const
+    const SuiteSet& CommandLineOptions::GetSuiteSet() const
+    {
+        return m_suiteSet;
+    }
+
+    const SuiteLabelExcludeSet& CommandLineOptions::GetSuiteLabelExcludeSet() const
     {
-        return m_suiteFilter;
+        return m_suiteLabelExcludes;
     }
 
     bool CommandLineOptions::HasExcludedTests() const
@@ -512,7 +517,9 @@ namespace TestImpact
             "    -safemode=<on,off>                                          Flag to specify a safe mode sequence where the set of unselected \n"
             "    -testrunner=<live,null>                                     Whether to use the null test runner (on) or run the tests (off). \n"
             "                                                                If not set, defaults to running the tests.                          \n"
-            "    -suite=<main, periodic, sandbox, awsi>                      The test suite to select from for this test sequence.";
+            "    -suite=<...>                                                The test suites to select from for this test sequence.\n"
+            "    -labelexcludes=<...>                                        The list of labels that will exclude any tests with any of these labels\n"
+            "                                                                in their suite.";
 
         return help;
     }

+ 17 - 6
Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Source/TestImpactCommandLineOptionsUtils.cpp

@@ -13,11 +13,25 @@
 
 namespace TestImpact
 {
-    //! Attempts to parse a path option value.
+    AZStd::set<AZStd::string> ParseMultiValueOption(const AZStd::string& optionName, const AZ::CommandLine& cmd)
+    {
+        AZStd::set<AZStd::string> values;
+        if (const auto numSwitchValues = cmd.GetNumSwitchValues(optionName);
+            numSwitchValues > 0)
+        {
+            for (size_t i = 0; i < numSwitchValues; i++)
+            {
+                values.insert(cmd.GetSwitchValue(optionName, i));
+            }
+        }
+
+        return values;
+    }
+
     AZStd::optional<RepoPath> ParsePathOption(const AZStd::string& optionName, const AZ::CommandLine& cmd)
     {  
         if (const auto numSwitchValues = cmd.GetNumSwitchValues(optionName);
-            numSwitchValues)
+            numSwitchValues > 0)
         {
             AZ_TestImpact_Eval(
                 numSwitchValues == 1,
@@ -36,11 +50,10 @@ namespace TestImpact
         return AZStd::nullopt;
     }
 
-    //! Attempts to pass an unsigned integer option value.
     AZStd::optional<size_t> ParseUnsignedIntegerOption(const AZStd::string& optionName, const AZ::CommandLine& cmd)
     {
         if (const auto numSwitchValues = cmd.GetNumSwitchValues(optionName);
-            numSwitchValues)
+            numSwitchValues > 0)
         {
             AZ_TestImpact_Eval(
                 numSwitchValues == 1,
@@ -62,7 +75,6 @@ namespace TestImpact
         return AZStd::nullopt;
     }
 
-    //! Attempts to parse an option value in seconds.
     AZStd::optional<AZStd::chrono::milliseconds> ParseSecondsOption(const AZStd::string& optionName, const AZ::CommandLine& cmd)
     {
         if (const auto option = ParseUnsignedIntegerOption(optionName, cmd);
@@ -74,7 +86,6 @@ namespace TestImpact
         return AZStd::nullopt;
     }
 
-    //! Attempts to parse the JSON in fileData into an array of test names.
     AZStd::vector<ExcludedTarget> ParseExcludedTestTargetsFromFile(const AZStd::string& fileData)
     {
         rapidjson::Document excludeData;

+ 13 - 10
Code/Tools/TestImpactFramework/Frontend/Console/Common/Code/Source/TestImpactConsoleTestSequenceEventHandler.cpp

@@ -24,9 +24,10 @@ namespace TestImpact
 
         namespace Output
         {
-            void TestSuiteFilter(SuiteType filter)
+            void TestSuiteSet(const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet)
             {
-                std::cout << "Test suite filter: " << SuiteTypeAsString(filter).c_str() << "\n";
+                std::cout << "Test suite set: " << SuiteSetAsString(suiteSet).c_str() << "\n";
+                std::cout << "Test suite label exclude set: " << SuiteLabelExcludeSetAsString(suiteLabelExcludeSet).c_str() << "\n";
             }
 
             void ImpactAnalysisTestSelection(size_t numSelectedTests, size_t numDiscardedTests, size_t numExcludedTests, size_t numDraftedTests)
@@ -114,28 +115,29 @@ namespace TestImpact
             }
         }
 
-        void TestSequenceStartCallback(SuiteType suiteType, const Client::TestRunSelection& selectedTests)
+        void TestSequenceStartCallback(const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet, const Client::TestRunSelection& selectedTests)
         {
-            Output::TestSuiteFilter(suiteType);
+            Output::TestSuiteSet(suiteSet, suiteLabelExcludeSet);
             std::cout << selectedTests.GetNumIncludedTestRuns() << " tests selected, " << selectedTests.GetNumExcludedTestRuns()
                       << " excluded.\n";
 
             PrintDivider();
         }
 
-        void TestSequenceCompleteCallback(SuiteType suiteType, const Client::TestRunSelection& selectedTests)
+        void TestSequenceCompleteCallback(const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet, const Client::TestRunSelection& selectedTests)
         {
-            Output::TestSuiteFilter(suiteType);
+            Output::TestSuiteSet(suiteSet, suiteLabelExcludeSet);
             std::cout << selectedTests.GetNumIncludedTestRuns() << " tests selected, " << selectedTests.GetNumExcludedTestRuns() << " excluded.\n";
         }
 
         void ImpactAnalysisTestSequenceStartCallback(
-            SuiteType suiteType,
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
             const Client::TestRunSelection& selectedTests,
             const AZStd::vector<AZStd::string>& discardedTests,
             const AZStd::vector<AZStd::string>& draftedTests)
         {
-            Output::TestSuiteFilter(suiteType);
+            Output::TestSuiteSet(suiteSet, suiteLabelExcludeSet);
             Output::ImpactAnalysisTestSelection(
                 selectedTests.GetTotalNumTests(), discardedTests.size(), selectedTests.GetNumExcludedTestRuns(), draftedTests.size());
 
@@ -143,12 +145,13 @@ namespace TestImpact
         }
 
         void SafeImpactAnalysisTestSequenceStartCallback(
-            SuiteType suiteType,
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
             const Client::TestRunSelection& selectedTests,
             const Client::TestRunSelection& discardedTests,
             const AZStd::vector<AZStd::string>& draftedTests)
         {
-            Output::TestSuiteFilter(suiteType);
+            Output::TestSuiteSet(suiteSet, suiteLabelExcludeSet);
             Output::ImpactAnalysisTestSelection(
                 selectedTests.GetTotalNumTests(),
                 discardedTests.GetTotalNumTests(),

+ 3 - 2
Code/Tools/TestImpactFramework/Frontend/Console/Native/Code/Source/TestImpactConsoleMain.cpp

@@ -48,13 +48,14 @@ namespace TestImpact::Console
             }
 
             std::cout << "Constructing in-memory model of source tree and test coverage for test suite ";
-            std::cout << SuiteTypeAsString(options.GetSuiteFilter()).c_str() << ", this may take a moment...\n";
+            std::cout << SuiteSetAsString(options.GetSuiteSet()).c_str() << ", this may take a moment...\n";
             NativeRuntime runtime(
                 NativeRuntimeConfigurationFactory(ReadFileContents<CommandLineOptionsException>(options.GetConfigurationFilePath())),
                 options.GetDataFilePath(),
                 options.GetPreviousRunDataFilePath(),
                 options.GetExcludedTests(),
-                options.GetSuiteFilter(),
+                options.GetSuiteSet(),
+                options.GetSuiteLabelExcludeSet(),
                 options.GetExecutionFailurePolicy(),
                 options.GetFailedTestCoveragePolicy(),
                 options.GetTestFailurePolicy(),

+ 3 - 2
Code/Tools/TestImpactFramework/Frontend/Console/Python/Code/Source/TestImpactConsoleMain.cpp

@@ -39,13 +39,14 @@ namespace TestImpact::Console
             }
 
             std::cout << "Constructing in-memory model of source tree and test coverage for test suite ";
-            std::cout << SuiteTypeAsString(options.GetSuiteFilter()).c_str() << ", this may take a moment...\n";
+            std::cout << SuiteSetAsString(options.GetSuiteSet()).c_str() << ", this may take a moment...\n";
             PythonRuntime runtime(
                 PythonRuntimeConfigurationFactory(ReadFileContents<CommandLineOptionsException>(options.GetConfigurationFilePath())),
                 options.GetDataFilePath(),
                 options.GetPreviousRunDataFilePath(),
                 options.GetExcludedTests(),
-                options.GetSuiteFilter(),
+                options.GetSuiteSet(),
+                options.GetSuiteLabelExcludeSet(),
                 options.GetExecutionFailurePolicy(),
                 options.GetFailedTestCoveragePolicy(),
                 options.GetTestFailurePolicy(),

+ 35 - 16
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactClientSequenceReport.h

@@ -134,7 +134,8 @@ namespace TestImpact
             //! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
             //! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
             //! @param policyState The policy state this sequence was executed under.
-            //! @param suiteType The suite from which the tests have been selected from.
+            //! @param suiteSet The suites from which the tests have been selected from.
+            //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
             //! @param selectedTestRuns The target names of the selected test runs.
             //! @param selectedTestRunReport The report for the set of selected test runs.
             SequenceReportBase(
@@ -142,14 +143,16 @@ namespace TestImpact
                 AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
                 AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
                 PolicyStateType policyState,
-                SuiteType suiteType,
+                const SuiteSet& suiteSet,
+                const SuiteLabelExcludeSet& suiteLabelExcludeSet,
                 TestRunSelection selectedTestRuns,
                 TestRunReport selectedTestRunReport)
                 : m_maxConcurrency(maxConcurrency)
                 , m_testTargetTimeout(AZStd::move(testTargetTimeout))
                 , m_globalTimeout(AZStd::move(globalTimeout))
                 , m_policyState(AZStd::move(policyState))
-                , m_suite(suiteType)
+                , m_suiteSet(suiteSet)
+                , m_suiteLabelExcludeSet(suiteLabelExcludeSet)
                 , m_selectedTestRuns(AZStd::move(selectedTestRuns))
                 , m_selectedTestRunReport(AZStd::move(selectedTestRunReport))
             {
@@ -161,7 +164,8 @@ namespace TestImpact
                     AZStd::move(report.m_testTargetTimeout),
                     AZStd::move(report.m_globalTimeout),
                     AZStd::move(report.m_policyState),
-                    AZStd::move(report.m_suite),
+                    AZStd::move(report.m_suiteSet),
+                    AZStd::move(report.m_suiteLabelExcludeSet),
                     AZStd::move(report.m_selectedTestRuns),
                     AZStd::move(report.m_selectedTestRunReport))
             {
@@ -173,7 +177,8 @@ namespace TestImpact
                     report.m_testTargetTimeout,
                     report.m_globalTimeout,
                     report.m_policyState,
-                    report.m_suite,
+                    report.m_suiteSet,
+                    AZStd::move(report.m_suiteLabelExcludeSet),
                     report.m_selectedTestRuns,
                     report.m_selectedTestRunReport)
             {
@@ -205,10 +210,16 @@ namespace TestImpact
                 return m_policyState;
             }
 
-            //! Returns the suite for this sequence.
-            SuiteType GetSuite() const
+            //! Returns the suite set for this sequence.
+            const SuiteSet& GetSuiteSet() const
             {
-                return m_suite;
+                return m_suiteSet;
+            }
+
+            //! Returns the suite label exclude set for this sequence.
+            const SuiteSet& GetSuiteLabelExcludeSet() const
+            {
+                return m_suiteLabelExcludeSet;
             }
 
              //! Returns the result of the sequence.
@@ -306,7 +317,8 @@ namespace TestImpact
             AZStd::optional<AZStd::chrono::milliseconds> m_testTargetTimeout;
             AZStd::optional<AZStd::chrono::milliseconds> m_globalTimeout;
             PolicyStateType m_policyState;
-            SuiteType m_suite = SuiteType::Main;
+            SuiteSet m_suiteSet;
+            SuiteLabelExcludeSet m_suiteLabelExcludeSet;
             TestRunSelection m_selectedTestRuns;
             TestRunReport m_selectedTestRunReport;
         };
@@ -338,7 +350,8 @@ namespace TestImpact
             //! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
             //! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
             //! @param policyState The policy state this sequence was executed under.
-            //! @param suiteType The suite from which the tests have been selected from.
+            //! @param suiteSet The suites suite from which the tests have been selected from.
+            //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
             //! @param selectedTestRuns The target names of the selected test runs.
             //! @param draftedTestRuns The target names of the drafted test runs.
             //! @param selectedTestRunReport The report for the set of selected test runs.
@@ -348,7 +361,8 @@ namespace TestImpact
                 AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
                 AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
                 PolicyStateType policyState,
-                SuiteType suiteType,
+                SuiteSet suiteSet,
+                const SuiteLabelExcludeSet& suiteLabelExcludeSet,
                 TestRunSelection selectedTestRuns,
                 AZStd::vector<AZStd::string> draftedTestRuns,
                 TestRunReport&& selectedTestRunReport,
@@ -358,7 +372,8 @@ namespace TestImpact
                     testTargetTimeout,
                     globalTimeout,
                     policyState,
-                    suiteType,
+                    suiteSet,
+                    suiteLabelExcludeSet,
                     selectedTestRuns,
                     AZStd::move(selectedTestRunReport))
                 , m_draftedTestRuns(AZStd::move(draftedTestRuns))
@@ -459,7 +474,8 @@ namespace TestImpact
             //! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
             //! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
             //! @param policyState The policy state this sequence was executed under.
-            //! @param suiteType The suite from which the tests have been selected from.
+            //! @param suiteSet The suites from which the tests have been selected from.
+            //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
             //! @param selectedTestRuns The target names of the selected test runs.
             //! @param draftedTestRuns The target names of the drafted test runs.
             //! @param selectedTestRunReport The report for the set of selected test runs.
@@ -469,7 +485,8 @@ namespace TestImpact
                 AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
                 AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
                 ImpactAnalysisSequencePolicyState policyState,
-                SuiteType suiteType,
+                SuiteSet suiteSet,
+                SuiteLabelExcludeSet suiteLabelExcludeSet,
                 TestRunSelection selectedTestRuns,
                 AZStd::vector<AZStd::string> discardedTestRuns,
                 AZStd::vector<AZStd::string> draftedTestRuns,
@@ -494,7 +511,8 @@ namespace TestImpact
             //! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
             //! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
             //! @param policyState The policy state this sequence was executed under.
-            //! @param suiteType The suite from which the tests have been selected from.
+            //! @param suiteSet The suites from which the tests have been selected from.
+            //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
             //! @param selectedTestRuns The target names of the selected test runs.
             //! @param discardedTestRuns The target names of the discarded test runs.
             //! @param draftedTestRuns The target names of the drafted test runs.
@@ -506,7 +524,8 @@ namespace TestImpact
                 AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
                 AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
                 SafeImpactAnalysisSequencePolicyState policyState,
-                SuiteType suiteType,
+                SuiteSet suiteSet,
+                SuiteLabelExcludeSet suiteLabelExcludeSet,
                 TestRunSelection selectedTestRuns,
                 TestRunSelection discardedTestRuns,
                 AZStd::vector<AZStd::string> draftedTestRuns,

+ 0 - 12
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactClientSequenceReportSerializer.h

@@ -25,16 +25,4 @@ namespace TestImpact
 
     //! Serializes a safe impact analysis sequence report to Json format.
     AZStd::string SerializeSequenceReport(const Client::SafeImpactAnalysisSequenceReport& sequenceReport);
-
-    //! Deserialize a regular sequence report from Json format.
-    Client::RegularSequenceReport DeserializeRegularSequenceReport(const AZStd::string& sequenceReportJson);
-
-    //! Deserialize a seed sequence report from Json format.
-    Client::SeedSequenceReport DeserializeSeedSequenceReport(const AZStd::string& sequenceReportJson);
-
-    //! Deserialize an impact analysis sequence report from Json format.
-    Client::ImpactAnalysisSequenceReport DeserializeImpactAnalysisSequenceReport(const AZStd::string& sequenceReportJson);
-
-    //! Deserialize a safe impact analysis sequence report from Json format.
-    Client::SafeImpactAnalysisSequenceReport DeserializeSafeImpactAnalysisSequenceReport(const AZStd::string& sequenceReportJson);
 } // namespace TestImpact

+ 14 - 6
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactRuntime.h

@@ -38,12 +38,17 @@ namespace TestImpact
     class TestTargetExclusionList;
 
     //! Callback for a test sequence that isn't using test impact analysis to determine selected tests.
-    //! @parm suiteType The test suite to select tests from.
+    //! @parm suiteSet The test suites to select tests from.
+    //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
     //! @param tests The tests that will be run for this sequence.
-    using TestSequenceStartCallback = AZStd::function<void(SuiteType suiteType, const Client::TestRunSelection& tests)>;
+    using TestSequenceStartCallback = AZStd::function<void(
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
+        const Client::TestRunSelection& tests)>;
 
     //! Callback for a test sequence using test impact analysis.
-    //! @parm suiteType The test suite to select tests from.
+    //! @parm suiteSet The test suites suite to select tests from.
+    //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
     //! @param selectedTests The tests that have been selected for this run by test impact analysis.
     //! @param discardedTests The tests that have been rejected for this run by test impact analysis. 
     //! @param draftedTests The tests that have been drafted in for this run due to requirements outside of test impact analysis
@@ -52,13 +57,15 @@ namespace TestImpact
     //! These tests will be run with coverage instrumentation.
     //! @note discardedTests and draftedTests may contain overlapping tests.
     using ImpactAnalysisTestSequenceStartCallback = AZStd::function<void(
-        SuiteType suiteType,
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
         const Client::TestRunSelection& selectedTests,
         const AZStd::vector<AZStd::string>& discardedTests,
         const AZStd::vector<AZStd::string>& draftedTests)>;
 
     //! Callback for a test sequence using test impact analysis.
-    //! @parm suiteType The test suite to select tests from.
+    //! @parm suiteSet The test suites to select tests from.
+    //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
     //! @param selectedTests The tests that have been selected for this run by test impact analysis.
     //! @param discardedTests The tests that have been rejected for this run by test impact analysis.
     //! These tests will not be run without coverage instrumentation unless there is an entry in the draftedTests list.
@@ -67,7 +74,8 @@ namespace TestImpact
     //! to execute previously).
     //! @note discardedTests and draftedTests may contain overlapping tests.
     using SafeImpactAnalysisTestSequenceStartCallback = AZStd::function<void(
-        SuiteType suiteType,
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
         const Client::TestRunSelection& selectedTests,
         const Client::TestRunSelection& discardedTests,
         const AZStd::vector<AZStd::string>& draftedTests)>;

+ 13 - 8
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactTestSequence.h

@@ -12,6 +12,7 @@
 #include <TestImpactFramework/TestImpactPolicy.h>
 
 #include <AzCore/std/containers/array.h>
+#include <AzCore/std/containers/set.h>
 
 namespace TestImpact
 {
@@ -25,14 +26,18 @@ namespace TestImpact
         TestInterleaved //!< Tests are interlaced across shards agnostic of fixtures (fastest but prone to inter-test dependency problems).
     };
 
-    //! Test suite types to select from.
-    enum class SuiteType : AZ::u8
-    {
-        Main = 0,
-        Periodic,
-        Sandbox,
-        AWSI
-    };
+    //! Set of test suites that tests can be drawn from.
+    //! @note An ordered set is used so that the serialized string of the set order is always the same regardless of the order that the
+    //! suites are specified.
+    using SuiteSet = AZStd::set<AZStd::string>;
+
+    //! Set of test suite labels that will be used to exclude any test targets that have test suite labels matching any labels in this set.
+    //! @note An ordered set is used so that the serialized string of the set order is always the same regardless of the order that the
+    //! labels are specified.
+    using SuiteLabelExcludeSet = AZStd::set<AZStd::string>;
+
+    //! The CTest label that test target suites need to have in order to be run as part of TIAF.
+    inline constexpr auto RequiresTiafLabel = "REQUIRES_tiaf";
 
     //! Result of a test sequence that was run.
     enum class TestSequenceResult

+ 23 - 40
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Headers/TestImpactFramework/TestImpactUtils.h

@@ -56,6 +56,25 @@ namespace TestImpact
             file.Write(bytes.data(), bytes.size()), ExceptionType, AZStd::string::format("Couldn't write contents for file %s", path.c_str()));
     }
 
+    //! Returns a string of the concatenated container contents separated by the specified separator.
+    template<typename Container>
+    AZStd::string ConcatenateContainerContentsAsString(const Container& container, const AZStd::string& separator)
+    {
+        AZStd::string concatenatedString;
+        size_t i = 1;
+        for (const auto& value : container)
+        {
+            concatenatedString += value;
+            if (i != container.size())
+            {
+                concatenatedString += separator;
+            }
+            i++;
+        }
+
+        return concatenatedString;
+    }
+
     //! Delete the files that match the pattern from the specified directory.
     //! @param path The path to the directory to pattern match the files for deletion.
     //! @param pattern The pattern to match files for deletion.
@@ -74,7 +93,10 @@ namespace TestImpact
     [[nodiscard]] size_t FileCount(const RepoPath& path, const AZStd::string& pattern);
 
     //! User-friendly names for the test suite types.
-    AZStd::string SuiteTypeAsString(SuiteType suiteType);
+    AZStd::string SuiteSetAsString(const SuiteSet& suiteSet);
+
+    //! User-friendly names for the test suite label excludes.
+    AZStd::string SuiteLabelExcludeSetAsString(const SuiteLabelExcludeSet& suiteLabelExcludeSet);
 
     //! User-friendly names for the sequence report types.
     AZStd::string SequenceReportTypeAsString(Client::SequenceReportType type);
@@ -111,43 +133,4 @@ namespace TestImpact
 
     //! User-friendly names for the client test result types.
     AZStd::string ClientTestResultAsString(Client::TestResult result);
-
-    //! User-friendly names for the suite types.
-    SuiteType SuiteTypeFromString(const AZStd::string& suiteType);
-
-    //! Returns the sequence report type for the specified string.
-    Client::SequenceReportType SequenceReportTypeFromString(const AZStd::string& type);
-
-    //! Returns the test run result for the specified string.
-    Client::TestRunResult TestRunResultFromString(const AZStd::string& result);
-
-    //! Returns the test result for the specified string.
-    Client::TestResult TestResultFromString(const AZStd::string& result);
-
-    //! Returns the test sequence result for the specified string.
-    TestSequenceResult TestSequenceResultFromString(const AZStd::string& result);
-
-    //! Returns the execution failure policy for the specified string.
-    Policy::ExecutionFailure ExecutionFailurePolicyFromString(const AZStd::string& executionFailurePolicy);
-
-    //! Returns the failed test coverage policy for the specified string.
-    Policy::FailedTestCoverage FailedTestCoveragePolicyFromString(const AZStd::string& failedTestCoveragePolicy);
-
-    //! Returns the test prioritization policy for the specified string.
-    Policy::TestPrioritization TestPrioritizationPolicyFromString(const AZStd::string& testPrioritizationPolicy);
-
-    //! Returns the test failure policy for the specified string.
-    Policy::TestFailure TestFailurePolicyFromString(const AZStd::string& testFailurePolicy);
-
-    //! Returns the integrity failure policy for the specified string.
-    Policy::IntegrityFailure IntegrityFailurePolicyFromString(const AZStd::string& integrityFailurePolicy);
-
-    //! Returns the dynamic dependency map policy for the specified string.
-    Policy::DynamicDependencyMap DynamicDependencyMapPolicyFromString(const AZStd::string& dynamicDependencyMapPolicy);
-
-    //! Returns the test sharding policy for the specified string.
-    Policy::TestSharding TestShardingPolicyFromString(const AZStd::string& testShardingPolicy);
-
-    //! Returns the target output capture policy for the specified string.
-    Policy::TargetOutputCapture TargetOutputCapturePolicyFromString(const AZStd::string& targetOutputCapturePolicy);
 } // namespace TestImpact

+ 24 - 0
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.h

@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) Contributors to the Open 3D Engine Project.
+ * For complete copyright and license terms please see the LICENSE at the root of this distribution.
+ *
+ * SPDX-License-Identifier: Apache-2.0 OR MIT
+ *
+ */
+
+#pragma once
+
+#include <TestImpactFramework/TestImpactTestSequence.h>
+
+#include <Artifact/Static/TestImpactTestSuiteMeta.h>
+
+#include <AzCore/JSON/document.h>
+#include <AzCore/std/optional.h>
+
+namespace TestImpact
+{
+    //! Extracts the suite labels and places them in a suite label set.
+    //! @returns If the label set contains a label in the suite label exclude set, `AZStd::nullopt`. Otherwise, the suite label set.
+    std::optional<SuiteLabelSet> ExtractTestSuiteLabelSet(
+        const rapidjson_ly::GenericArray<true, rapidjson_ly::Value>& suite, const SuiteLabelExcludeSet& suiteLabelExcludeSet);
+} // namespace TestImpact

+ 5 - 0
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Artifact/Static/TestImpactTestSuiteMeta.h

@@ -9,14 +9,19 @@
 #pragma once
 
 #include <AzCore/std/chrono/chrono.h>
+#include <AzCore/std/containers/unordered_set.h>
 #include <AzCore/std/string/string.h>
 
 namespace TestImpact
 {
+    //! Set for the labels appended to each test target's suite.
+    using SuiteLabelSet = AZStd::unordered_set<AZStd::string>;
+
     //! Artifact produced by the build system for each test target containing the additional meta-data about the test.
     struct TestSuiteMeta
     {
         AZStd::string m_name; //!< The name of the test suite.
         AZStd::chrono::milliseconds m_timeout = AZStd::chrono::milliseconds{ 0 }; //!< The timeout for the test suite time to run in.
+        SuiteLabelSet m_labelSet; //!< The set of labels for this suite.
     };
 } // namespace TestImpact

+ 9 - 4
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Target/Common/TestImpactTestTarget.cpp

@@ -13,22 +13,27 @@ namespace TestImpact
     TestTarget::TestTarget(
         TargetDescriptor&& descriptor, TestTargetMeta&& testMetaData)
         : Target(AZStd::move(descriptor))
-        , testTargetMeta(AZStd::move(testMetaData))
+        , m_testTargetMeta(AZStd::move(testMetaData))
     {
     }
 
     const AZStd::string& TestTarget::GetSuite() const
     {
-        return testTargetMeta.m_suiteMeta.m_name;
+        return m_testTargetMeta.m_suiteMeta.m_name;
     }
 
     AZStd::chrono::milliseconds TestTarget::GetTimeout() const
     {
-        return testTargetMeta.m_suiteMeta.m_timeout;
+        return m_testTargetMeta.m_suiteMeta.m_timeout;
     }
     
     const AZStd::string& TestTarget::GetNamespace() const
     {
-        return testTargetMeta.m_namespace;
+        return m_testTargetMeta.m_namespace;
+    }
+
+    const SuiteLabelSet& TestTarget::GetSuiteLabelSet() const
+    {
+        return m_testTargetMeta.m_suiteMeta.m_labelSet;
     }
 } // namespace TestImpact

+ 4 - 1
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/Target/Common/TestImpactTestTarget.h

@@ -31,7 +31,10 @@ namespace TestImpact
         //! Returns the namespace this test target resides in (if any).
         const AZStd::string& GetNamespace() const;
 
+        //! Returns the suite label set.
+        const SuiteLabelSet& GetSuiteLabelSet() const;
+
     private:
-        TestTargetMeta testTargetMeta;
+        TestTargetMeta m_testTargetMeta;
     };
 } // namespace TestImpact

+ 0 - 27
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/TestEngine/Common/TestImpactTestEngine.h

@@ -265,31 +265,4 @@ namespace TestImpact
         auto engineRuns = CompileTestEngineRuns<TestJobRunner, TestTarget>(testTargets, runnerJobs, AZStd::move(engineJobs));
         return AZStd::pair{ CalculateSequenceResult(result, engineRuns, executionFailurePolicy), AZStd::move(engineRuns) };
     }
-
-    template<typename TestEngineJob>
-    auto GenerateInstrumentedRunResult(const AZStd::pair<TestSequenceResult, AZStd::vector<TestEngineJob>>& engineJobs, Policy::IntegrityFailure integrityFailurePolicy)
-    {
-        const auto& [result, engineRuns] = engineJobs;
-
-        // Now that we know the true result of successful jobs that return non-zero we can deduce if we have any integrity failures
-        // where a test target ran and completed its tests without incident yet failed to produce coverage data
-        if (integrityFailurePolicy == Policy::IntegrityFailure::Abort)
-        {
-            for (const auto& engineRun : engineRuns)
-            {
-                if (const auto testResult = engineRun.GetTestResult();
-                    testResult == Client::TestRunResult::AllTestsPass || testResult == Client::TestRunResult::TestFailures)
-                {
-                    AZ_TestImpact_Eval(
-                        engineRun.GetCoverge().has_value(),
-                        TestEngineException,
-                        AZStd::string::format(
-                            "Test target %s completed its test run but failed to produce coverage data",
-                            engineRun.GetTestTarget()->GetName().c_str()));
-                }
-            }
-        }
-
-        return AZStd::pair{ result, engineRuns };
-    }
 } // namespace TestImpact

+ 7 - 4
Code/Tools/TestImpactFramework/Runtime/Common/Code/Include/Static/TestImpactRuntimeUtils.h

@@ -450,7 +450,8 @@ namespace TestImpact
     //! @tparam TestJob The test engine job type returned by the functor.
     //! @param maxConcurrency The maximum concurrency being used for this sequence.
     //! @param policyState The policy state being used for the sequence.
-    //! @param suiteType The suite type used for this sequence.
+    //! @param suiteSet The suites type used for this sequence.
+    //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
     //! @param timer The timer to use for the test run timings.
     //! @param testRunner The test runner functor to use for each of the test runs.
     //! @param includedSelectedTestTargets The subset of test targets that were selected to run and not also fully excluded from running.
@@ -466,7 +467,8 @@ namespace TestImpact
     Client::ImpactAnalysisSequenceReport ImpactAnalysisTestSequenceWrapper(
         size_t maxConcurrency,
         const ImpactAnalysisSequencePolicyState& policyState,
-        SuiteType suiteType,
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
         const Timer& sequenceTimer,
         const TestRunnerFunctor& testRunner,
         const AZStd::vector<const TestTarget*>& includedSelectedTestTargets,
@@ -492,7 +494,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(suiteType, selectedTests, discardedTests, draftedTests);
+            (*testSequenceStartCallback)(suiteSet, suiteLabelExcludeSet, selectedTests, discardedTests, draftedTests);
         }
 
         // We share the test run complete handler between the selected and drafted test runs as to present them together as one
@@ -536,7 +538,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             policyState,
-            suiteType,
+            suiteSet,
+            suiteLabelExcludeSet,
             selectedTests,
             discardedTests,
             draftedTests,

+ 31 - 0
Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.cpp

@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) Contributors to the Open 3D Engine Project.
+ * For complete copyright and license terms please see the LICENSE at the root of this distribution.
+ *
+ * SPDX-License-Identifier: Apache-2.0 OR MIT
+ *
+ */
+
+#include <Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.h>
+
+namespace TestImpact
+{
+    std::optional<SuiteLabelSet> ExtractTestSuiteLabelSet(
+        const rapidjson_ly::GenericArray<true, rapidjson_ly::Value>& suiteLabels, const SuiteLabelExcludeSet& suiteLabelExcludeSet)
+    {
+        SuiteLabelSet labelSet;
+        for (const auto& label : suiteLabels)
+        {
+            const auto labelString = label.GetString();
+            if (suiteLabelExcludeSet.contains(labelString))
+            {
+                return AZStd::nullopt;
+            }
+
+            labelSet.insert(labelString);
+        }
+
+        // Only test suite labels that contain the TIAF requirement label
+        return labelSet.contains(RequiresTiafLabel) ? AZStd::optional<SuiteLabelSet>{ labelSet } : AZStd::nullopt;
+    }
+} // namespace TestImpact

+ 8 - 4
Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/TestImpactClientSequenceReport.cpp

@@ -164,7 +164,8 @@ namespace TestImpact
             AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
             AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
             ImpactAnalysisSequencePolicyState policyState,
-            SuiteType suiteType,
+            SuiteSet suiteSet,
+            SuiteLabelExcludeSet suiteLabelExcludeSet,
             TestRunSelection selectedTestRuns,
             AZStd::vector<AZStd::string> discardedTestRuns,
             AZStd::vector<AZStd::string> draftedTestRuns,
@@ -175,7 +176,8 @@ namespace TestImpact
                 AZStd::move(testTargetTimeout),
                 AZStd::move(globalTimeout),
                 AZStd::move(policyState),
-                suiteType,
+                AZStd::move(suiteSet),
+                AZStd::move(suiteLabelExcludeSet),
                 AZStd::move(selectedTestRuns),
                 AZStd::move(draftedTestRuns),
                 AZStd::move(selectedTestRunReport),
@@ -201,7 +203,8 @@ namespace TestImpact
             AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
             AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
             SafeImpactAnalysisSequencePolicyState policyState,
-            SuiteType suiteType,
+            SuiteSet suiteSet,
+            SuiteLabelExcludeSet suiteLabelExcludeSet,
             TestRunSelection selectedTestRuns,
             TestRunSelection discardedTestRuns,
             AZStd::vector<AZStd::string> draftedTestRuns,
@@ -213,7 +216,8 @@ namespace TestImpact
                 AZStd::move(testTargetTimeout),
                 AZStd::move(globalTimeout),
                 AZStd::move(policyState),
-                suiteType,
+                AZStd::move(suiteSet),
+                AZStd::move(suiteLabelExcludeSet),
                 AZStd::move(selectedTestRuns),
                 AZStd::move(draftedTestRuns),
                 AZStd::move(selectedTestRunReport),

+ 7 - 252
Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/TestImpactClientSequenceReportSerializer.cpp

@@ -66,6 +66,7 @@ namespace TestImpact
                 "max_concurrency",
                 "policy",
                 "suite",
+                "exclude_labels",
                 "selected_test_runs",
                 "selected_test_run_report",
                 "total_num_passing_test_runs",
@@ -123,6 +124,7 @@ namespace TestImpact
                 MaxConcurrency,
                 Policy,
                 Suite,
+                SuiteLabelExclude,
                 SelectedTestRuns,
                 SelectedTestRunReport,
                 TotalNumPassingTestRuns,
@@ -459,7 +461,11 @@ namespace TestImpact
 
             // Suite
             writer.Key(SequenceReportFields::Keys[SequenceReportFields::Suite]);
-            writer.String(SuiteTypeAsString(sequenceReport.GetSuite()).c_str());
+            writer.String(SuiteSetAsString(sequenceReport.GetSuiteSet()).c_str());
+
+            // Suite label excludes
+            writer.Key(SequenceReportFields::Keys[SequenceReportFields::SuiteLabelExclude]);
+            writer.String(SuiteLabelExcludeSetAsString(sequenceReport.GetSuiteLabelExcludeSet()).c_str());
 
             // Selected test runs
             writer.Key(SequenceReportFields::Keys[SequenceReportFields::SelectedTestRuns]);
@@ -620,255 +626,4 @@ namespace TestImpact
     {
         return AZStd::chrono::steady_clock::time_point(AZStd::chrono::milliseconds(ms));
     }
-
-    AZStd::vector<Client::Test> DeserializeTests(const rapidjson::Value& serialTests)
-    {
-        AZStd::vector<Client::Test> tests;
-        tests.reserve(serialTests[SequenceReportFields::Keys[SequenceReportFields::Tests]].GetArray().Size());
-        for (const auto& test : serialTests[SequenceReportFields::Keys[SequenceReportFields::Tests]].GetArray())
-        {
-            const AZStd::string name = test[SequenceReportFields::Keys[SequenceReportFields::Name]].GetString();
-            const auto result = TestResultFromString(test[SequenceReportFields::Keys[SequenceReportFields::Result]].GetString());
-            tests.emplace_back(name, result);
-        }
-
-        return tests;
-    }
-
-    Client::TestRunBase DeserializeTestRunBase(const rapidjson::Value& serialTestRun)
-    {
-        return Client::TestRunBase(
-            "", // Namespace
-            serialTestRun[SequenceReportFields::Keys[SequenceReportFields::Name]].GetString(),
-            serialTestRun[SequenceReportFields::Keys[SequenceReportFields::CommandArgs]].GetString(),
-            "", // StdOut
-            "", // StdError
-            TimePointFromMsInt64(serialTestRun[SequenceReportFields::Keys[SequenceReportFields::StartTime]].GetInt64()),
-            AZStd::chrono::milliseconds(serialTestRun[SequenceReportFields::Keys[SequenceReportFields::Duration]].GetInt64()),
-            TestRunResultFromString(serialTestRun[SequenceReportFields::Keys[SequenceReportFields::Result]].GetString()));
-    }
-
-    template<typename TestRunType>
-    AZStd::vector<TestRunType> DeserializeTestRuns(const rapidjson::Value& serialTestRuns)
-    {
-        AZStd::vector<TestRunType> testRuns;
-        testRuns.reserve(serialTestRuns.GetArray().Size());
-        for (const auto& testRun : serialTestRuns.GetArray())
-        {
-            testRuns.emplace_back(DeserializeTestRunBase(testRun));
-        }
-
-        return testRuns;
-    }
-
-    template<typename CompletedTestRunType>
-    AZStd::vector<CompletedTestRunType> DeserializeCompletedTestRuns(const rapidjson::Value& serialCompletedTestRuns)
-    {
-        AZStd::vector<CompletedTestRunType> testRuns;
-        testRuns.reserve(serialCompletedTestRuns.GetArray().Size());
-        for (const auto& testRun : serialCompletedTestRuns.GetArray())
-        {
-            testRuns.emplace_back(
-                DeserializeTestRunBase(testRun), DeserializeTests(testRun[SequenceReportFields::Keys[SequenceReportFields::Tests]]));
-        }
-
-        return testRuns;
-    }
-
-    Client::TestRunReport DeserializeTestRunReport(const rapidjson::Value& serialTestRunReport)
-    {
-        return Client::TestRunReport(
-            TestSequenceResultFromString(serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::Result]].GetString()),
-            TimePointFromMsInt64(serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::StartTime]].GetInt64()),
-            AZStd::chrono::milliseconds(serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::Duration]].GetInt64()),
-            DeserializeCompletedTestRuns<Client::PassingTestRun>(
-                serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::PassingTestRuns]]),
-            DeserializeCompletedTestRuns<Client::FailingTestRun>(
-                serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::FailingTestRuns]]),
-            DeserializeTestRuns<Client::TestRunWithExecutionFailure>(
-                serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::ExecutionFailureTestRuns]]),
-            DeserializeTestRuns<Client::TimedOutTestRun>(
-                serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::TimedOutTestRuns]]),
-            DeserializeTestRuns<Client::UnexecutedTestRun>(
-                serialTestRunReport[SequenceReportFields::Keys[SequenceReportFields::UnexecutedTestRuns]]));
-    }
-
-    Client::TestRunSelection DeserializeTestSelection(const rapidjson::Value& serialTestRunSelection)
-    {
-        const auto extractTestTargetNames = [](const rapidjson::Value& serialTestTargets)
-        {
-            AZStd::vector<AZStd::string> testTargets;
-            testTargets.reserve(serialTestTargets.GetArray().Size());
-            for (const auto& testTarget : serialTestTargets.GetArray())
-            {
-                testTargets.emplace_back(testTarget.GetString());
-            }
-
-            return testTargets;
-        };
-
-        return Client::TestRunSelection(
-            extractTestTargetNames(serialTestRunSelection[SequenceReportFields::Keys[SequenceReportFields::IncludedTestRuns]]),
-            extractTestTargetNames(serialTestRunSelection[SequenceReportFields::Keys[SequenceReportFields::ExcludedTestRuns]]));
-    }
-
-    PolicyStateBase DeserializePolicyStateBaseMembers(const rapidjson::Value& serialPolicyState)
-    {
-        return
-        {
-            ExecutionFailurePolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::ExecutionFailure]].GetString()),
-            FailedTestCoveragePolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::CoverageFailure]].GetString()),
-            TestFailurePolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::TestFailure]].GetString()),
-            IntegrityFailurePolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::IntegrityFailure]].GetString()),
-            TestShardingPolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::TestSharding]].GetString()),
-            TargetOutputCapturePolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::TargetOutputCapture]].GetString())
-        };
-    }
-
-    SequencePolicyState DeserializePolicyStateMembers(const rapidjson::Value& serialPolicyState)
-    {
-        return { DeserializePolicyStateBaseMembers(serialPolicyState) };
-    }
-
-    SafeImpactAnalysisSequencePolicyState DeserializeSafeImpactAnalysisPolicyStateMembers(const rapidjson::Value& serialPolicyState)
-    {
-        return
-        {
-            DeserializePolicyStateBaseMembers(serialPolicyState),
-            TestPrioritizationPolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::TestPrioritization]].GetString())
-        };
-    }
-
-    ImpactAnalysisSequencePolicyState DeserializeImpactAnalysisSequencePolicyStateMembers(const rapidjson::Value& serialPolicyState)
-    {
-        return
-        {
-            DeserializePolicyStateBaseMembers(serialPolicyState),
-            TestPrioritizationPolicyFromString(serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::TestPrioritization]].GetString()),
-            DynamicDependencyMapPolicyFromString(
-                serialPolicyState[SequenceReportFields::Keys[SequenceReportFields::DynamicDependencyMap]].GetString())
-        };
-    }
-
-    template<typename PolicyStateType>
-    PolicyStateType DeserializePolicyStateType(const rapidjson::Value& serialPolicyStateType)
-    {
-        if constexpr (AZStd::is_same_v<PolicyStateType, SequencePolicyState>)
-        {
-            return DeserializePolicyStateMembers(serialPolicyStateType);
-        }
-        else if constexpr (AZStd::is_same_v<PolicyStateType, SafeImpactAnalysisSequencePolicyState>)
-        {
-            return DeserializeSafeImpactAnalysisPolicyStateMembers(serialPolicyStateType);
-        }
-        else if constexpr (AZStd::is_same_v<PolicyStateType, ImpactAnalysisSequencePolicyState>)
-        {
-            return DeserializeImpactAnalysisSequencePolicyStateMembers(serialPolicyStateType);
-        }
-        else
-        {
-            // static assert needs to be depend on template parameters to defer evaluation until the function is instantiate
-            // with the type that isn't a valid policy state type, otherwise the compiler is free to evaluate the static assert
-            // without an instantiation of the template
-            static_assert(!AZStd::same_as<PolicyStateType, PolicyStateType>, "Template parameter must be a valid policy state type");
-        }
-    }
-
-    template<typename SequenceReportBaseType>
-    SequenceReportBaseType DeserialiseSequenceReportBase(const rapidjson::Value& serialSequenceReportBase)
-    {
-        const auto type = SequenceReportTypeFromString(serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::Type]].GetString());
-        AZ_TestImpact_Eval(
-            type == SequenceReportBaseType::ReportType,
-            SequenceReportException, AZStd::string::format(
-                "The JSON sequence report type '%s' does not match the constructed report type",
-                serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::Type]].GetString()));
-
-        const auto testTargetTimeout =
-            serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::TestTargetTimeout]].GetUint64();
-        const auto globalTimeout =
-            serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::GlobalTimeout]].GetUint64();
-
-        return SequenceReportBaseType(
-            serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::MaxConcurrency]].GetUint64(),
-            testTargetTimeout ? AZStd::optional<AZStd::chrono::milliseconds>{ testTargetTimeout } : AZStd::nullopt,
-            globalTimeout ? AZStd::optional<AZStd::chrono::milliseconds>{ globalTimeout } : AZStd::nullopt,
-            DeserializePolicyStateType<typename SequenceReportBaseType::PolicyState>(serialSequenceReportBase),
-            SuiteTypeFromString(serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::Suite]].GetString()),
-            DeserializeTestSelection(serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::SelectedTestRuns]]),
-            DeserializeTestRunReport(serialSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::SelectedTestRunReport]]));
-    }
-
-    template<typename DerivedDraftingSequenceReportType>
-    Client::DraftingSequenceReportBase<DerivedDraftingSequenceReportType::ReportType, typename DerivedDraftingSequenceReportType::PolicyState>
-        DeserializeDraftingSequenceReportBase(const rapidjson::Value& serialDraftingSequenceReportBase)
-    {
-        AZStd::vector<AZStd::string> draftingTestRuns;
-        draftingTestRuns.reserve(
-            serialDraftingSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::DraftedTestRuns]].GetArray().Size());
-        for (const auto& testRun :
-             serialDraftingSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::DraftedTestRuns]].GetArray())
-        {
-            draftingTestRuns.emplace_back(testRun.GetString());
-        }
-
-        using SequenceBase =
-            Client::SequenceReportBase<DerivedDraftingSequenceReportType::ReportType, typename DerivedDraftingSequenceReportType::PolicyState>;
-        using DraftingSequenceBase =
-            Client::DraftingSequenceReportBase<DerivedDraftingSequenceReportType::ReportType, typename DerivedDraftingSequenceReportType::PolicyState>;
-
-        return DraftingSequenceBase(
-            DeserialiseSequenceReportBase<SequenceBase>(serialDraftingSequenceReportBase),
-            AZStd::move(draftingTestRuns),
-            DeserializeTestRunReport(serialDraftingSequenceReportBase[SequenceReportFields::Keys[SequenceReportFields::DraftedTestRunReport]]));
-    }
-
-    rapidjson::Document OpenSequenceReportJson(const AZStd::string& sequenceReportJson)
-    {
-        rapidjson::Document doc;
-
-        if (doc.Parse<0>(sequenceReportJson.c_str()).HasParseError())
-        {
-            throw SequenceReportException("Could not parse sequence report data");
-        }
-
-        return doc;
-    }
-
-    Client::RegularSequenceReport DeserializeRegularSequenceReport(const AZStd::string& sequenceReportJson)
-    {
-        const auto doc = OpenSequenceReportJson(sequenceReportJson);
-        return DeserialiseSequenceReportBase<Client::RegularSequenceReport>(doc);
-    }
-
-    Client::SeedSequenceReport DeserializeSeedSequenceReport(const AZStd::string& sequenceReportJson)
-    {
-        const auto doc = OpenSequenceReportJson(sequenceReportJson);
-        return DeserialiseSequenceReportBase<Client::SeedSequenceReport>(doc);
-    }
-
-    Client::ImpactAnalysisSequenceReport DeserializeImpactAnalysisSequenceReport(const AZStd::string& sequenceReportJson)
-    {
-        const auto doc = OpenSequenceReportJson(sequenceReportJson);
-
-        AZStd::vector<AZStd::string> discardedTestRuns;
-        discardedTestRuns.reserve(doc[SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRuns]].GetArray().Size());
-        for (const auto& testRun : doc[SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRuns]].GetArray())
-        {
-            discardedTestRuns.emplace_back(testRun.GetString());
-        }
-
-        return Client::ImpactAnalysisSequenceReport(
-            DeserializeDraftingSequenceReportBase<Client::ImpactAnalysisSequenceReport>(doc), AZStd::move(discardedTestRuns));
-    }
-
-    Client::SafeImpactAnalysisSequenceReport DeserializeSafeImpactAnalysisSequenceReport(const AZStd::string& sequenceReportJson)
-    {
-        const auto doc = OpenSequenceReportJson(sequenceReportJson);
-
-        return Client::SafeImpactAnalysisSequenceReport(
-            DeserializeDraftingSequenceReportBase<Client::SafeImpactAnalysisSequenceReport>(doc),
-            DeserializeTestSelection(doc[SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRuns]]),
-            DeserializeTestRunReport(doc[SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRunReport]]));
-    }
 } // namespace TestImpact

+ 7 - 266
Code/Tools/TestImpactFramework/Runtime/Common/Code/Source/TestImpactUtils.cpp

@@ -76,21 +76,14 @@ namespace TestImpact
         return files;
     }
 
-    AZStd::string SuiteTypeAsString(SuiteType suiteType)
+    AZStd::string SuiteSetAsString(const SuiteSet& suiteSet)
     {
-        switch (suiteType)
-        {
-        case SuiteType::Main:
-            return "main";
-        case SuiteType::Periodic:
-            return "periodic";
-        case SuiteType::Sandbox:
-            return "sandbox";
-        case SuiteType::AWSI:
-            return "awsi";
-        default:
-            throw(Exception("Unexpected suite type"));
-        }
+        return ConcatenateContainerContentsAsString(suiteSet, "-");
+    }
+
+    AZStd::string SuiteLabelExcludeSetAsString(const SuiteLabelExcludeSet& suiteLabelExcludeSet)
+    {
+        return ConcatenateContainerContentsAsString(suiteLabelExcludeSet, "-");
     }
 
     AZStd::string SequenceReportTypeAsString(Client::SequenceReportType type)
@@ -276,256 +269,4 @@ namespace TestImpact
             throw(Exception(AZStd::string::format("Unexpected client test case result: %u", aznumeric_cast<AZ::u32>(result))));
         }
     }
-
-    SuiteType SuiteTypeFromString(const AZStd::string& suiteType)
-    {
-        if (suiteType == SuiteTypeAsString(SuiteType::Main))
-        {
-            return SuiteType::Main;
-        }
-        else if (suiteType == SuiteTypeAsString(SuiteType::Periodic))
-        {
-            return SuiteType::Periodic;
-        }
-        else if (suiteType == SuiteTypeAsString(SuiteType::Sandbox))
-        {
-            return SuiteType::Sandbox;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected suite type: '%s'", suiteType.c_str()));
-        }
-    }
-
-    Client::SequenceReportType SequenceReportTypeFromString(const AZStd::string& type)
-    {
-        if (type == SequenceReportTypeAsString(Client::SequenceReportType::ImpactAnalysisSequence))
-        {
-            return Client::SequenceReportType::ImpactAnalysisSequence;
-        }
-        else if (type == SequenceReportTypeAsString(Client::SequenceReportType::RegularSequence))
-        {
-            return Client::SequenceReportType::RegularSequence;
-        }
-        else if (type == SequenceReportTypeAsString(Client::SequenceReportType::SafeImpactAnalysisSequence))
-        {
-            return Client::SequenceReportType::SafeImpactAnalysisSequence;
-        }
-        else if (type == SequenceReportTypeAsString(Client::SequenceReportType::SeedSequence))
-        {
-            return Client::SequenceReportType::SeedSequence;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected sequence report type: '%s'", type.c_str()));
-        }
-    }
-
-    Client::TestRunResult TestRunResultFromString(const AZStd::string& result)
-    {
-        if (result == TestRunResultAsString(Client::TestRunResult::AllTestsPass))
-        {
-            return Client::TestRunResult::AllTestsPass;
-        }
-        else if (result == TestRunResultAsString(Client::TestRunResult::FailedToExecute))
-        {
-            return Client::TestRunResult::FailedToExecute;
-        }
-        else if (result == TestRunResultAsString(Client::TestRunResult::NotRun))
-        {
-            return Client::TestRunResult::NotRun;
-        }
-        else if (result == TestRunResultAsString(Client::TestRunResult::TestFailures))
-        {
-            return Client::TestRunResult::TestFailures;
-        }
-        else if (result == TestRunResultAsString(Client::TestRunResult::Timeout))
-        {
-            return Client::TestRunResult::Timeout;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected client test run result: '%s'", result.c_str()));
-        }
-    }
-
-    Client::TestResult TestResultFromString(const AZStd::string& result)
-    {
-        if (result == ClientTestResultAsString(Client::TestResult::Failed))
-        {
-            return Client::TestResult::Failed;
-        }
-        else if (result == ClientTestResultAsString(Client::TestResult::NotRun))
-        {
-            return Client::TestResult::NotRun;
-        }
-        else if (result == ClientTestResultAsString(Client::TestResult::Passed))
-        {
-            return Client::TestResult::Passed;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected client test result: '%s'", result.c_str()));
-        }
-    }
-
-    TestSequenceResult TestSequenceResultFromString(const AZStd::string& result)
-    {
-        if (result == TestSequenceResultAsString(TestSequenceResult::Failure))
-        {
-            return TestSequenceResult::Failure;
-        }
-        else if (result == TestSequenceResultAsString(TestSequenceResult::Success))
-        {
-            return TestSequenceResult::Success;
-        }
-        else if (result == TestSequenceResultAsString(TestSequenceResult::Timeout))
-        {
-            return TestSequenceResult::Timeout;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected test sequence result: '%s'", result.c_str()));
-        }
-    }
-
-    Policy::ExecutionFailure ExecutionFailurePolicyFromString(const AZStd::string& executionFailurePolicy)
-    {
-        if (executionFailurePolicy == ExecutionFailurePolicyAsString(Policy::ExecutionFailure::Abort))
-        {
-            return Policy::ExecutionFailure::Abort;
-        }
-        else if (executionFailurePolicy == ExecutionFailurePolicyAsString(Policy::ExecutionFailure::Continue))
-        {
-            return Policy::ExecutionFailure::Continue;
-        }
-        else if (executionFailurePolicy == ExecutionFailurePolicyAsString(Policy::ExecutionFailure::Ignore))
-        {
-            return Policy::ExecutionFailure::Ignore;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected execution failure policy: '%s'", executionFailurePolicy.c_str()));
-        }
-    }
-
-    Policy::FailedTestCoverage FailedTestCoveragePolicyFromString(const AZStd::string& failedTestCoveragePolicy)
-    {
-        if (failedTestCoveragePolicy == FailedTestCoveragePolicyAsString(Policy::FailedTestCoverage::Discard))
-        {
-            return Policy::FailedTestCoverage::Discard;
-        }
-        else if (failedTestCoveragePolicy == FailedTestCoveragePolicyAsString(Policy::FailedTestCoverage::Keep))
-        {
-            return Policy::FailedTestCoverage::Keep;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected failed test coverage policy: '%s'", failedTestCoveragePolicy.c_str()));
-        }
-    }
-
-    Policy::TestPrioritization TestPrioritizationPolicyFromString(const AZStd::string& testPrioritizationPolicy)
-    {
-        if (testPrioritizationPolicy == TestPrioritizationPolicyAsString(Policy::TestPrioritization::DependencyLocality))
-        {
-            return Policy::TestPrioritization::DependencyLocality;
-        }
-        else if (testPrioritizationPolicy == TestPrioritizationPolicyAsString(Policy::TestPrioritization::None))
-        {
-            return Policy::TestPrioritization::None;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected test prioritization policy: '%s'", testPrioritizationPolicy.c_str()));
-        }
-    }
-
-    Policy::TestFailure TestFailurePolicyFromString(const AZStd::string& testFailurePolicy)
-    {
-        if (testFailurePolicy == TestFailurePolicyAsString(Policy::TestFailure::Abort))
-        {
-            return Policy::TestFailure::Abort;
-        }
-        else if (testFailurePolicy == TestFailurePolicyAsString(Policy::TestFailure::Continue))
-        {
-            return Policy::TestFailure::Continue;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected test failure policy: '%s'", testFailurePolicy.c_str()));
-        }
-    }
-
-    Policy::IntegrityFailure IntegrityFailurePolicyFromString(const AZStd::string& integrityFailurePolicy)
-    {
-        if (integrityFailurePolicy == IntegrityFailurePolicyAsString(Policy::IntegrityFailure::Abort))
-        {
-            return Policy::IntegrityFailure::Abort;
-        }
-        else if (integrityFailurePolicy == IntegrityFailurePolicyAsString(Policy::IntegrityFailure::Continue))
-        {
-            return Policy::IntegrityFailure::Continue;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected integration failure policy: '%s'", integrityFailurePolicy.c_str()));
-        }
-    }
-
-    Policy::DynamicDependencyMap DynamicDependencyMapPolicyFromString(const AZStd::string& dynamicDependencyMapPolicy)
-    {
-        if (dynamicDependencyMapPolicy == DynamicDependencyMapPolicyAsString(Policy::DynamicDependencyMap::Discard))
-        {
-            return Policy::DynamicDependencyMap::Discard;
-        }
-        else if (dynamicDependencyMapPolicy == DynamicDependencyMapPolicyAsString(Policy::DynamicDependencyMap::Update))
-        {
-            return Policy::DynamicDependencyMap::Update;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected dynamic dependency map policy: '%s'", dynamicDependencyMapPolicy.c_str()));
-        }
-    }
-
-    Policy::TestSharding TestShardingPolicyFromString(const AZStd::string& testShardingPolicy)
-    {
-        if (testShardingPolicy == TestShardingPolicyAsString(Policy::TestSharding::Always))
-        {
-            return Policy::TestSharding::Always;
-        }
-        else if (testShardingPolicy == TestShardingPolicyAsString(Policy::TestSharding::Never))
-        {
-            return Policy::TestSharding::Never;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected test sharding policy: '%s'", testShardingPolicy.c_str()));
-        }
-    }
-
-    Policy::TargetOutputCapture TargetOutputCapturePolicyFromString(const AZStd::string& targetOutputCapturePolicy)
-    {
-        if (targetOutputCapturePolicy == TargetOutputCapturePolicyAsString(Policy::TargetOutputCapture::File))
-        {
-            return Policy::TargetOutputCapture::File;
-        }
-        else if (targetOutputCapturePolicy == TargetOutputCapturePolicyAsString(Policy::TargetOutputCapture::None))
-        {
-            return Policy::TargetOutputCapture::None;
-        }
-        else if (targetOutputCapturePolicy == TargetOutputCapturePolicyAsString(Policy::TargetOutputCapture::StdOut))
-        {
-            return Policy::TargetOutputCapture::StdOut;
-        }
-        else if (targetOutputCapturePolicy == TargetOutputCapturePolicyAsString(Policy::TargetOutputCapture::StdOutAndFile))
-        {
-            return Policy::TargetOutputCapture::StdOutAndFile;
-        }
-        else
-        {
-            throw Exception(AZStd::string::format("Unexpected target output capture policy: '%s'", targetOutputCapturePolicy.c_str()));
-        }
-    }
 } // namespace TestImpact

+ 2 - 0
Code/Tools/TestImpactFramework/Runtime/Common/Code/testimpactframework_runtime_common_files.cmake

@@ -11,6 +11,7 @@ set(FILES
     Include/Static/Artifact/Factory/TestImpactTestEnumerationSuiteFactory.h
     Include/Static/Artifact/Factory/TestImpactTestRunSuiteFactory.h
     Include/Static/Artifact/Factory/TestImpactModuleCoverageFactory.h
+    Include/Static/Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.h
     Include/Static/Artifact/Static/TestImpactTestSuiteMeta.h
     Include/Static/Artifact/Static/TestImpactTestTargetMeta.h
     Include/Static/Artifact/Static/TestImpactTargetDescriptor.h
@@ -74,6 +75,7 @@ set(FILES
     Source/Artifact/Factory/TestImpactTestRunSuiteFactory.cpp
     Source/Artifact/Factory/TestImpactModuleCoverageFactory.cpp
     Source/Artifact/Factory/TestImpactTargetDescriptorFactory.cpp
+    Source/Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.cpp
     Source/Process/TestImpactProcess.cpp
     Source/Process/TestImpactProcessInfo.cpp
     Source/Process/JobRunner/TestImpactProcessJobMeta.cpp

+ 6 - 3
Code/Tools/TestImpactFramework/Runtime/Native/Code/Include/TestImpactFramework/Native/TestImpactNativeRuntime.h

@@ -30,7 +30,8 @@ namespace TestImpact
         //! @param dataFile The optional data file to be used instead of that specified in the config file.
         //! @param previousRunDataFile The optional previous run data file to be used instead of that specified in the config file.
         //! @param testsToExclude The tests to exclude from the run (will override any excluded tests in the config file).
-        //! @param suiteFilter The test suite for which the coverage data and test selection will draw from.
+        //! @param suiteSet The test suites from which the coverage data and test selection will draw from.
+        //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
         //! @param executionFailurePolicy Determines how to handle test targets that fail to execute.
         //! @param executionFailureDraftingPolicy Determines how test targets that previously failed to execute are drafted into subsequent test sequences.
         //! @param testFailurePolicy Determines how to handle test targets that report test failures.
@@ -41,7 +42,8 @@ namespace TestImpact
             const AZStd::optional<RepoPath>& dataFile,
             [[maybe_unused]]const AZStd::optional<RepoPath>& previousRunDataFile,
             const AZStd::vector<ExcludedTarget>& testsToExclude,
-            SuiteType suiteFilter,
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
             Policy::ExecutionFailure executionFailurePolicy,
             Policy::FailedTestCoverage failedTestCoveragePolicy,
             Policy::TestFailure testFailurePolicy,
@@ -159,7 +161,8 @@ namespace TestImpact
 
         RuntimeConfig m_config;
         RepoPath m_sparTiaFile;
-        SuiteType m_suiteFilter;
+        SuiteSet m_suiteSet;
+        SuiteLabelExcludeSet m_suiteLabelExcludeSet;
         Policy::ExecutionFailure m_executionFailurePolicy;
         Policy::FailedTestCoverage m_failedTestCoveragePolicy;
         Policy::TestFailure m_testFailurePolicy;

+ 36 - 23
Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/Artifact/Factory/TestImpactNativeTestTargetMetaMapFactory.cpp

@@ -9,13 +9,15 @@
 #include <TestImpactFramework/TestImpactUtils.h>
 
 #include <Artifact/Factory/TestImpactNativeTestTargetMetaMapFactory.h>
+#include <Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.h>
 #include <Artifact/TestImpactArtifactException.h>
 
 #include <AzCore/JSON/document.h>
 
 namespace TestImpact
 {
-    NativeTestTargetMetaMap NativeTestTargetMetaMapFactory(const AZStd::string& masterTestListData, SuiteType suiteType)
+    NativeTestTargetMetaMap NativeTestTargetMetaMapFactory(
+        const AZStd::string& masterTestListData, const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet)
     {
         // Keys for pertinent JSON node and attribute names
         constexpr const char* Keys[] =
@@ -31,7 +33,8 @@ namespace TestImpact
             "namespace",
             "name",
             "command",
-            "timeout"
+            "timeout",
+            "labels"
         };
 
         enum
@@ -47,7 +50,8 @@ namespace TestImpact
             Namespacekey,
             NameKey,
             CommandKey,
-            TimeoutKey
+            TimeoutKey,
+            SuiteLabelsKey
         };
 
         AZ_TestImpact_Eval(!masterTestListData.empty(), ArtifactException, "Test meta-data cannot be empty");
@@ -64,34 +68,43 @@ namespace TestImpact
         for (const auto& test : tests)
         {
             NativeTestTargetMeta testMeta;
+            AZStd::string name = test[Keys[NameKey]].GetString();
+            AZ_TestImpact_Eval(!name.empty(), ArtifactException, "Test name field cannot be empty");
+            testMeta.m_testTargetMeta.m_namespace = test[Keys[Namespacekey]].GetString();
+
+            if (const auto buildTypeString = test[Keys[LaunchMethodKey]].GetString(); strcmp(buildTypeString, Keys[TestRunnerKey]) == 0)
+            {
+                testMeta.m_launchMeta.m_launchMethod = LaunchMethod::TestRunner;
+            }
+            else if (strcmp(buildTypeString, Keys[StandAloneKey]) == 0)
+            {
+                testMeta.m_launchMeta.m_launchMethod = LaunchMethod::StandAlone;
+            }
+            else
+            {
+                throw(ArtifactException("Unexpected test build type"));
+            }
+
             const auto testSuites = test[Keys[TestSuitesKey]].GetArray();
             for (const auto& suite : testSuites)
             {
-                // Check to see if this test target has the suite we're looking for
+                // Check to see if this test target has a suite we're looking for (first suite to
+                // match will be "the" suite for this test)
                 if (const auto suiteName = suite[Keys[SuiteKey]].GetString();
-                    strcmp(SuiteTypeAsString(suiteType).c_str(), suiteName) == 0)
+                    suiteSet.contains(suiteName))
                 {
-                    testMeta.m_testTargetMeta.m_namespace = test[Keys[Namespacekey]].GetString();
-                    testMeta.m_testTargetMeta.m_suiteMeta.m_name = suiteName;
-                    testMeta.m_testTargetMeta.m_suiteMeta.m_timeout = AZStd::chrono::seconds{ suite[Keys[TimeoutKey]].GetUint() };
-                    testMeta.m_launchMeta.m_customArgs = suite[Keys[CommandKey]].GetString();
-                    if (const auto buildTypeString = test[Keys[LaunchMethodKey]].GetString();
-                        strcmp(buildTypeString, Keys[TestRunnerKey]) == 0)
-                    {
-                        testMeta.m_launchMeta.m_launchMethod = LaunchMethod::TestRunner;
-                    }
-                    else if (strcmp(buildTypeString, Keys[StandAloneKey]) == 0)
-                    {
-                        testMeta.m_launchMeta.m_launchMethod = LaunchMethod::StandAlone;
-                    }
-                    else
+                    if (auto labelSet = ExtractTestSuiteLabelSet(suite[Keys[SuiteLabelsKey]].GetArray(), suiteLabelExcludeSet);
+                        labelSet.has_value())
                     {
-                        throw(ArtifactException("Unexpected test build type"));
+                        testMeta.m_testTargetMeta.m_suiteMeta.m_labelSet = AZStd::move(labelSet.value());
+                        testMeta.m_testTargetMeta.m_suiteMeta.m_name = suiteName;
+                        testMeta.m_testTargetMeta.m_suiteMeta.m_timeout = AZStd::chrono::seconds{ suite[Keys[TimeoutKey]].GetUint() };
+                        testMeta.m_launchMeta.m_customArgs = suite[Keys[CommandKey]].GetString();
+                        testMetas.emplace(AZStd::move(name), AZStd::move(testMeta));
                     }
 
-                    AZStd::string name = test[Keys[NameKey]].GetString();
-                    AZ_TestImpact_Eval(!name.empty(), ArtifactException, "Test name field cannot be empty");
-                    testMetas.emplace(AZStd::move(name), AZStd::move(testMeta));
+                    // We either have one matching suite or the suite contians a label in the exclude set so we will break
+                    // out of the suite loop
                     break;
                 }
             }

+ 5 - 2
Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/Artifact/Factory/TestImpactNativeTestTargetMetaMapFactory.h

@@ -9,13 +9,16 @@
 #pragma once
 
 #include <TestImpactFramework/TestImpactTestSequence.h>
+
 #include <Artifact/Static/TestImpactNativeTestTargetMeta.h>
 
 namespace TestImpact
 {
     //! Constructs a list of test target meta-data artifacts of the specified suite type from the specified master test list data.
     //! @param masterTestListData The raw master test list data in JSON format.
-    //! @param suiteType The suite type to select the target meta-data artifacts from.
+    //! @param suiteSet The suites to select the target meta-data artifacts from.
+    //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
     //! @return The constructed list of test target meta-data artifacts.
-    NativeTestTargetMetaMap NativeTestTargetMetaMapFactory(const AZStd::string& masterTestListData, SuiteType suiteType);
+    NativeTestTargetMetaMap NativeTestTargetMetaMapFactory(
+        const AZStd::string& masterTestListData, const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet);
 } // namespace TestImpact

+ 38 - 5
Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/TestEngine/Native/TestImpactNativeTestEngine.cpp

@@ -53,7 +53,7 @@ namespace TestImpact
         return AZStd::nullopt;
     }
 
-    //!
+    //! Determines the test run result of a native instrumented test run.
     AZStd::optional<Client::TestRunResult> NativeInstrumentedTestRunnerErrorCodeChecker(
         const typename NativeInstrumentedTestRunner::JobInfo& jobInfo, const JobMeta& meta)
     {
@@ -93,6 +93,28 @@ namespace TestImpact
         return AZStd::nullopt;
     }
 
+    //! Checks the successfully completed test runs for missing coverage whoch would compromise the integrity of the dynamic dependency map.
+    AZStd::string GenerateIntegrityErrorString(const TestEngineInstrumentedRunResult<NativeTestTarget, TestCoverage>& engineJobs)
+    {
+        // Now that we know the true result of successful jobs that return non-zero we can deduce if we have any integrity failures
+        // where a test target ran and completed its tests without incident yet failed to produce coverage data
+        AZStd::string integrityErrors;
+        const auto& [result, engineRuns] = engineJobs;
+        for (const auto& engineRun : engineRuns)
+        {
+            if (const auto testResult = engineRun.GetTestResult();
+                (testResult == Client::TestRunResult::AllTestsPass || testResult == Client::TestRunResult::TestFailures) &&
+                !engineRun.GetCoverge().has_value())
+            {
+                integrityErrors += AZStd::string::format(
+                    "Test target %s completed its test run but failed to produce coverage data\n",
+                    engineRun.GetTestTarget()->GetName().c_str());
+            }
+        }
+
+        return integrityErrors;
+    }
+
     // Type trait for the test enumerator
     template<>
     struct TestJobRunnerTrait<NativeTestEnumerator>
@@ -191,8 +213,7 @@ namespace TestImpact
 
         const auto jobInfos = m_instrumentedTestJobInfoGenerator->GenerateJobInfos(testTargets);
 
-        return GenerateInstrumentedRunResult(
-            RunTests(
+        const auto result = RunTests(
                 m_instrumentedTestRunner.get(),
                 jobInfos,
                 testTargets,
@@ -203,7 +224,19 @@ namespace TestImpact
                 testTargetTimeout,
                 globalTimeout,
                 callback,
-                AZStd::nullopt),
-            integrityFailurePolicy);
+                AZStd::nullopt);
+
+            if(const auto integrityErrors = GenerateIntegrityErrorString(result);
+                !integrityErrors.empty())
+            {
+                AZ_TestImpact_Eval(
+                        integrityFailurePolicy != Policy::IntegrityFailure::Abort,
+                        TestEngineException,
+                        integrityErrors);
+
+                AZ_Error("InstrumentedRun", false, integrityErrors.c_str());
+            }
+
+            return result;
     }
 } // namespace TestImpact

+ 28 - 16
Code/Tools/TestImpactFramework/Runtime/Native/Code/Source/TestImpactNativeRuntime.cpp

@@ -25,10 +25,11 @@
 
 namespace TestImpact
 {
-    NativeTestTargetMetaMap ReadNativeTestTargetMetaMapFile(SuiteType suiteFilter, const RepoPath& testTargetMetaConfigFile)
+    NativeTestTargetMetaMap ReadNativeTestTargetMetaMapFile(
+        const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet, const RepoPath& testTargetMetaConfigFile)
     {
         const auto masterTestListData = ReadFileContents<RuntimeException>(testTargetMetaConfigFile);
-        return NativeTestTargetMetaMapFactory(masterTestListData, suiteFilter);
+        return NativeTestTargetMetaMapFactory(masterTestListData, suiteSet, suiteLabelExcludeSet);
     }
 
     NativeRuntime::NativeRuntime(
@@ -36,7 +37,8 @@ namespace TestImpact
         const AZStd::optional<RepoPath>& dataFile,
         [[maybe_unused]] const AZStd::optional<RepoPath>& previousRunDataFile,
         const AZStd::vector<ExcludedTarget>& testsToExclude,
-        SuiteType suiteFilter,
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
         Policy::ExecutionFailure executionFailurePolicy,
         Policy::FailedTestCoverage failedTestCoveragePolicy,
         Policy::TestFailure testFailurePolicy,
@@ -45,7 +47,8 @@ namespace TestImpact
         Policy::TargetOutputCapture targetOutputCapture,
         AZStd::optional<size_t> maxConcurrency)
         : m_config(AZStd::move(config))
-        , m_suiteFilter(suiteFilter)
+        , m_suiteSet(suiteSet)
+        , m_suiteLabelExcludeSet(suiteLabelExcludeSet)
         , m_executionFailurePolicy(executionFailurePolicy)
         , m_failedTestCoveragePolicy(failedTestCoveragePolicy)
         , m_testFailurePolicy(testFailurePolicy)
@@ -58,7 +61,7 @@ namespace TestImpact
         auto targetDescriptors = ReadTargetDescriptorFiles(m_config.m_commonConfig.m_buildTargetDescriptor);
         auto buildTargets = CompileNativeTargetLists(
             AZStd::move(targetDescriptors),
-            ReadNativeTestTargetMetaMapFile(suiteFilter, m_config.m_commonConfig.m_testTargetMeta.m_metaFile));
+            ReadNativeTestTargetMetaMapFile(m_suiteSet, m_suiteLabelExcludeSet, m_config.m_commonConfig.m_testTargetMeta.m_metaFile));
         auto&& [productionTargets, testTargets] = buildTargets;
         m_buildTargets = AZStd::make_unique<BuildTargetList<ProductionTarget, TestTarget>>(
             AZStd::move(testTargets), AZStd::move(productionTargets));
@@ -107,8 +110,8 @@ namespace TestImpact
             }
             else
             {
-                m_sparTiaFile =
-                    m_config.m_workspace.m_active.m_root / RepoPath(SuiteTypeAsString(m_suiteFilter)) / m_config.m_workspace.m_active.m_sparTiaFile;
+                m_sparTiaFile = m_config.m_workspace.m_active.m_root / RepoPath(SuiteSetAsString(m_suiteSet)) /
+                    m_config.m_workspace.m_active.m_sparTiaFile;
             }
            
             // Populate the dynamic dependency map with the existing source coverage data (if any)
@@ -145,7 +148,10 @@ namespace TestImpact
             AZ_Printf(
                 LogCallSite,
                 AZStd::string::format(
-                    "No test impact analysis data found for suite '%s' at %s\n", SuiteTypeAsString(m_suiteFilter).c_str(), m_sparTiaFile.c_str()).c_str());
+                    "No test impact analysis data found for suite '%s' at %s\n",
+                    SuiteSetAsString(m_suiteSet).c_str(),
+                    m_sparTiaFile.c_str())
+                    .c_str());
         }
     }
 
@@ -246,7 +252,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(m_suiteFilter, selectedTests);
+            (*testSequenceStartCallback)(m_suiteSet, m_suiteLabelExcludeSet, selectedTests);
         }
 
         // Run the test targets and collect the test run results
@@ -267,7 +273,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             GenerateSequencePolicyState(),
-            m_suiteFilter,
+            m_suiteSet,
+            m_suiteLabelExcludeSet,
             selectedTests,
             GenerateTestRunReport(result, testRunTimer.GetStartTimePointRelative(sequenceTimer), testRunDuration, testJobs));
 
@@ -295,6 +302,7 @@ namespace TestImpact
         // Draft in the test targets that have no coverage entries in the dynamic dependency map
         const AZStd::vector<const TestTarget*> draftedTestTargets = m_dynamicDependencyMap->GetNotCoveringTests();
 
+        // Use test impact analysis to select the tests and remove any tests from the discarded set that exist in the drafted set
         const auto selectCoveringTestTargetsAndPruneDraftedFromDiscarded =
             [this, &draftedTestTargets, &changeList, testPrioritizationPolicy]()
         {
@@ -374,7 +382,8 @@ namespace TestImpact
             return ImpactAnalysisTestSequenceWrapper(
                 m_maxConcurrency,
                 GenerateImpactAnalysisSequencePolicyState(testPrioritizationPolicy, dynamicDependencyMapPolicy),
-                m_suiteFilter,
+                m_suiteSet,
+                m_suiteLabelExcludeSet,
                 sequenceTimer,
                 instrumentedTestRun,
                 includedSelectedTestTargets,
@@ -393,7 +402,8 @@ namespace TestImpact
             return ImpactAnalysisTestSequenceWrapper(
                 m_maxConcurrency,
                 GenerateImpactAnalysisSequencePolicyState(testPrioritizationPolicy, dynamicDependencyMapPolicy),
-                m_suiteFilter,
+                m_suiteSet,
+                m_suiteLabelExcludeSet,
                 sequenceTimer,
                 regularTestRun,
                 includedSelectedTestTargets,
@@ -447,7 +457,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(m_suiteFilter, selectedTests, discardedTests, draftedTests);
+            (*testSequenceStartCallback)(m_suiteSet, m_suiteLabelExcludeSet, selectedTests, discardedTests, draftedTests);
         }
 
         // We share the test run complete handler between the selected, discarded and drafted test runs as to present them together as one
@@ -534,7 +544,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             GenerateSafeImpactAnalysisSequencePolicyState(testPrioritizationPolicy),
-            m_suiteFilter,
+            m_suiteSet,
+            m_suiteLabelExcludeSet,
             selectedTests,
             discardedTests,
             draftedTests,
@@ -601,7 +612,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(m_suiteFilter, selectedTests);
+            (*testSequenceStartCallback)(m_suiteSet, m_suiteLabelExcludeSet, selectedTests);
         }
 
         // Run the test targets and collect the test run results
@@ -623,7 +634,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             GenerateSequencePolicyState(),
-            m_suiteFilter,
+            m_suiteSet,
+            m_suiteLabelExcludeSet,
             selectedTests,
             GenerateTestRunReport(result, testRunTimer.GetStartTimePointRelative(sequenceTimer), testRunDuration, testJobs));
 

+ 17 - 2
Code/Tools/TestImpactFramework/Runtime/Python/Code/Include/TestImpactFramework/Python/TestImpactPythonRuntime.h

@@ -27,12 +27,25 @@ namespace TestImpact
     class PythonRuntime
     {
     public:
+        //! Constructs a runtime with the specified configuration and policies.
+        //! @param config The configuration used for this runtime instance.
+        //! @param dataFile The optional data file to be used instead of that specified in the config file.
+        //! @param previousRunDataFile The optional previous run data file to be used instead of that specified in the config file.
+        //! @param testsToExclude The tests to exclude from the run (will override any excluded tests in the config file).
+        //! @param suiteSet The test suites from which the coverage data and test selection will draw from.
+        //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
+        //! @param executionFailurePolicy Determines how to handle test targets that fail to execute.
+        //! @param executionFailureDraftingPolicy Determines how test targets that previously failed to execute are drafted into subsequent test sequences.
+        //! @param testFailurePolicy Determines how to handle test targets that report test failures.
+        //! @param integrationFailurePolicy Determines how to handle instances where the build system model and/or test impact analysis data is compromised.
+        //! @param testRunnerPolicy Determines which test runner type to use.
         PythonRuntime(
             PythonRuntimeConfig&& config,
             const AZStd::optional<RepoPath>& dataFile,
             [[maybe_unused]] const AZStd::optional<RepoPath>& previousRunDataFile,
             const AZStd::vector<ExcludedTarget>& testsToExclude,
-            SuiteType suiteFilter,
+            const SuiteSet& suiteSet,
+            const SuiteLabelExcludeSet& suiteLabelExcludeSet,
             Policy::ExecutionFailure executionFailurePolicy,
             Policy::FailedTestCoverage failedTestCoveragePolicy,
             Policy::TestFailure testFailurePolicy,
@@ -42,6 +55,7 @@ namespace TestImpact
 
         ~PythonRuntime();
 
+        //! Returns true if the runtime has test impact analysis data (either preexisting or generated).
         bool HasImpactAnalysisData() const;
 
         //! Runs a test sequence where all tests with a matching suite in the suite filter and also not on the excluded list are selected.
@@ -151,7 +165,8 @@ namespace TestImpact
 
         PythonRuntimeConfig m_config;
         RepoPath m_sparTiaFile;
-        SuiteType m_suiteFilter;
+        SuiteSet m_suiteSet;
+        SuiteLabelExcludeSet m_suiteLabelExcludeSet;
         Policy::ExecutionFailure m_executionFailurePolicy;
         Policy::FailedTestCoverage m_failedTestCoveragePolicy;
         Policy::TestFailure m_testFailurePolicy;

+ 26 - 14
Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/Artifact/Factory/TestImpactPythonTestTargetMetaMapFactory.cpp

@@ -9,13 +9,15 @@
 #include <TestImpactFramework/TestImpactUtils.h>
 
 #include <Artifact/Factory/TestImpactPythonTestTargetMetaMapFactory.h>
+#include <Artifact/Factory/TestImpactTestTargetMetaMapFactoryUtils.h>
 #include <Artifact/TestImpactArtifactException.h>
 
 #include <AzCore/JSON/document.h>
 
 namespace TestImpact
 {
-    PythonTestTargetMetaMap PythonTestTargetMetaMapFactory(const AZStd::string& testListData, SuiteType suiteType)
+    PythonTestTargetMetaMap PythonTestTargetMetaMapFactory(
+        const AZStd::string& testListData, const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet)
     {
         // Keys for pertinent JSON node and attribute names
         constexpr const char* Keys[] =
@@ -29,7 +31,8 @@ namespace TestImpact
             "name",
             "timeout",
             "script",
-            "command"
+            "command",
+            "labels"
         };
 
         enum
@@ -43,7 +46,8 @@ namespace TestImpact
             NameKey,
             TimeoutKey,
             ScriptKey,
-            TestCommandKey
+            TestCommandKey,
+            SuiteLabelsKey
         };
 
         AZ_TestImpact_Eval(!testListData.empty(), ArtifactException, "Test meta-data cannot be empty");
@@ -60,23 +64,31 @@ namespace TestImpact
         for (const auto& test : tests)
         {
             PythonTestTargetMeta testMeta;
+            AZStd::string name = test[Keys[NameKey]].GetString();
+            AZ_TestImpact_Eval(!name.empty(), ArtifactException, "Test name field cannot be empty");
+            testMeta.m_testTargetMeta.m_namespace = test[Keys[NamespaceKey]].GetString();
+
             const auto testSuites = test[Keys[TestSuitesKey]].GetArray();
             for (const auto& suite : testSuites)
             {
-                // Check to see if this test target has the suite we're looking for
+                // Check to see if this test target has a suite we're looking for (first suite to
+                // match will be "the" suite for this test)
                 if (const auto suiteName = suite[Keys[SuiteKey]].GetString();
-                    strcmp(SuiteTypeAsString(suiteType).c_str(), suiteName) == 0)
+                    suiteSet.contains(suiteName))
                 {
-                    testMeta.m_testTargetMeta.m_namespace = test[Keys[NamespaceKey]].GetString();
-                    testMeta.m_testTargetMeta.m_suiteMeta.m_name = suiteName;
-                    testMeta.m_testTargetMeta.m_suiteMeta.m_timeout = AZStd::chrono::seconds{ suite[Keys[TimeoutKey]].GetUint() };
-                    testMeta.m_scriptMeta.m_scriptPath = suite[Keys[ScriptKey]].GetString();
-                    testMeta.m_scriptMeta.m_testCommand = suite[Keys[TestCommandKey]].GetString();
+                    if (auto labelSet = ExtractTestSuiteLabelSet(suite[Keys[SuiteLabelsKey]].GetArray(), suiteLabelExcludeSet);
+                        labelSet.has_value())
+                    {
+                        testMeta.m_testTargetMeta.m_suiteMeta.m_labelSet = AZStd::move(labelSet.value());
+                        testMeta.m_testTargetMeta.m_suiteMeta.m_name = suiteName;
+                        testMeta.m_testTargetMeta.m_suiteMeta.m_timeout = AZStd::chrono::seconds{ suite[Keys[TimeoutKey]].GetUint() };
+                        testMeta.m_scriptMeta.m_scriptPath = suite[Keys[ScriptKey]].GetString();
+                        testMeta.m_scriptMeta.m_testCommand = suite[Keys[TestCommandKey]].GetString();
+                        testMetas.emplace(AZStd::move(name), AZStd::move(testMeta));
+                    }
 
-                    AZStd::string name = test[Keys[NameKey]].GetString();
-                    AZ_TestImpact_Eval(!name.empty(), ArtifactException, "Test name field cannot be empty");
-                    //AZ_TestImpact_Eval(!testMeta.m_scriptPath.empty(), ArtifactException, "Test script field cannot be empty");
-                    testMetas.emplace(AZStd::move(name), AZStd::move(testMeta));
+                    // We either have one matching suite or the suite contians a label in the exclude set so we will break
+                    // out of the suite loop
                     break;
                 }
             }

+ 6 - 3
Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/Artifact/Factory/TestImpactPythonTestTargetMetaMapFactory.h

@@ -8,14 +8,17 @@
 
 #pragma once
 
-#include <Artifact/Static/TestImpactPythonTestTargetMeta.h>
 #include <TestImpactFramework/TestImpactTestSequence.h>
 
+#include <Artifact/Static/TestImpactPythonTestTargetMeta.h>
+
 namespace TestImpact
 {
     //! Constructs a list of test target meta-data artifacts of the specified suite type from the specified master test list data.
     //! @param testListData The raw test list data in JSON format.
-    //! @param suiteType The suite type to select the target meta-data artifacts from.
+    //! @param suiteSet The suites to select the target meta-data artifacts from.
+    //! @param suiteLabelExcludeSet Any tests with suites that match a label from this set will be excluded.
     //! @return The constructed list of test target meta-data artifacts.
-    PythonTestTargetMetaMap PythonTestTargetMetaMapFactory(const AZStd::string& testListData, SuiteType suiteType);
+    PythonTestTargetMetaMap PythonTestTargetMetaMapFactory(
+        const AZStd::string& testListData, const SuiteSet& suiteSet, const SuiteLabelExcludeSet& suiteLabelExcludeSet);
 } // namespace TestImpact

+ 4 - 10
Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/TestEngine/Python/TestImpactPythonTestEngine.cpp

@@ -149,7 +149,6 @@ namespace TestImpact
         InstrumentedRun(
         const AZStd::vector<const PythonTestTarget*>& testTargets,
         Policy::ExecutionFailure executionFailurePolicy,
-        Policy::IntegrityFailure integrityFailurePolicy,
         Policy::TestFailure testFailurePolicy,
         Policy::TargetOutputCapture targetOutputCapture,
         AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
@@ -161,8 +160,7 @@ namespace TestImpact
         if (m_testRunnerPolicy == Policy::TestRunner::UseNullTestRunner)
         {
             // We don't delete the artifacts as they have been left by another test runner (e.g. ctest)
-            return GenerateInstrumentedRunResult(
-            RunTests(
+            return RunTests(
                 m_instrumentedNullTestRunner.get(),
                 jobInfos,
                 testTargets,
@@ -173,14 +171,11 @@ namespace TestImpact
                 testTargetTimeout,
                 globalTimeout,
                 callback,
-                std::nullopt),
-            integrityFailurePolicy);
+                std::nullopt);
         }
         else
         {
-            DeleteXmlArtifacts();
-            return GenerateInstrumentedRunResult(
-                RunTests(
+            return RunTests(
                     m_instrumentedTestRunner.get(),
                     jobInfos,
                     testTargets,
@@ -191,8 +186,7 @@ namespace TestImpact
                     testTargetTimeout,
                     globalTimeout,
                     callback,
-                    std::nullopt),
-                integrityFailurePolicy);
+                    std::nullopt);
         }
     }
 } // namespace TestImpact

+ 0 - 2
Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/TestEngine/Python/TestImpactPythonTestEngine.h

@@ -78,7 +78,6 @@ namespace TestImpact
         //! about the run.
         //! @param testTargets The test targets to run.
         //! @param executionFailurePolicy Policy for how test execution failures should be handled.
-        //! @param integrityFailurePolicy Policy for how integrity failures of the test impact data and source tree model should be handled.
         //! @param testFailurePolicy Policy for how test targets with failing tests should be handled.
         //! @param targetOutputCapture Policy for how test target standard output should be captured and handled.
         //! @param testTargetTimeout The maximum duration a test target may be in-flight for before being forcefully terminated (infinite if
@@ -91,7 +90,6 @@ namespace TestImpact
         InstrumentedRun(
             const AZStd::vector<const PythonTestTarget*>& testTargets,
             Policy::ExecutionFailure executionFailurePolicy,
-            Policy::IntegrityFailure integrityFailurePolicy,
             Policy::TestFailure testFailurePolicy,
             Policy::TargetOutputCapture targetOutputCapture,
             AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,

+ 38 - 32
Code/Tools/TestImpactFramework/Runtime/Python/Code/Source/TestImpactPythonRuntime.cpp

@@ -25,10 +25,14 @@
 
 namespace TestImpact
 {
-    PythonTestTargetMetaMap ReadPythonTestTargetMetaMapFile(SuiteType suiteFilter, const RepoPath& testTargetMetaConfigFile, const AZStd::string& buildType)
+    PythonTestTargetMetaMap ReadPythonTestTargetMetaMapFile(
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
+        const RepoPath& testTargetMetaConfigFile,
+        const AZStd::string& buildType)
     {
         const auto masterTestListData = ReadFileContents<RuntimeException>(testTargetMetaConfigFile);
-        auto testTargetMetaMap = PythonTestTargetMetaMapFactory(masterTestListData, suiteFilter);
+        auto testTargetMetaMap = PythonTestTargetMetaMapFactory(masterTestListData, suiteSet, suiteLabelExcludeSet);
         for (auto& [name, meta] : testTargetMetaMap)
         {
             meta.m_scriptMeta.m_testCommand = AZStd::regex_replace(meta.m_scriptMeta.m_testCommand, AZStd::regex("\\$\\<CONFIG\\>"), buildType);
@@ -42,7 +46,8 @@ namespace TestImpact
         const AZStd::optional<RepoPath>& dataFile,
         [[maybe_unused]] const AZStd::optional<RepoPath>& previousRunDataFile,
         const AZStd::vector<ExcludedTarget>& testsToExclude,
-        SuiteType suiteFilter,
+        const SuiteSet& suiteSet,
+        const SuiteLabelExcludeSet& suiteLabelExcludeSet,
         Policy::ExecutionFailure executionFailurePolicy,
         Policy::FailedTestCoverage failedTestCoveragePolicy,
         Policy::TestFailure testFailurePolicy,
@@ -50,7 +55,8 @@ namespace TestImpact
         Policy::TargetOutputCapture targetOutputCapture,
         Policy::TestRunner testRunnerPolicy)
         : m_config(AZStd::move(config))
-        , m_suiteFilter(suiteFilter)
+        , m_suiteSet(suiteSet)
+        , m_suiteLabelExcludeSet(suiteLabelExcludeSet)
         , m_executionFailurePolicy(executionFailurePolicy)
         , m_failedTestCoveragePolicy(failedTestCoveragePolicy)
         , m_testFailurePolicy(testFailurePolicy)
@@ -62,7 +68,8 @@ namespace TestImpact
         auto targetDescriptors = ReadTargetDescriptorFiles(m_config.m_commonConfig.m_buildTargetDescriptor);
         auto buildTargets = CompilePythonTargetLists(
             AZStd::move(targetDescriptors),
-            ReadPythonTestTargetMetaMapFile(suiteFilter, m_config.m_commonConfig.m_testTargetMeta.m_metaFile, m_config.m_commonConfig.m_meta.m_buildConfig));
+            ReadPythonTestTargetMetaMapFile(
+                m_suiteSet, m_suiteLabelExcludeSet, m_config.m_commonConfig.m_testTargetMeta.m_metaFile, m_config.m_commonConfig.m_meta.m_buildConfig));
         auto&& [productionTargets, testTargets] = buildTargets;
         m_buildTargets = AZStd::make_unique<BuildTargetList<ProductionTarget, TestTarget>>(
             AZStd::move(testTargets), AZStd::move(productionTargets));
@@ -102,7 +109,7 @@ namespace TestImpact
             }
             else
             {
-                m_sparTiaFile = m_config.m_workspace.m_active.m_root / RepoPath(SuiteTypeAsString(m_suiteFilter)) /
+                m_sparTiaFile = m_config.m_workspace.m_active.m_root / RepoPath(SuiteSetAsString(m_suiteSet)) /
                     m_config.m_workspace.m_active.m_sparTiaFile;
             }
 
@@ -140,8 +147,8 @@ namespace TestImpact
             AZ_Printf(
                 LogCallSite,
                 AZStd::string::format(
-                    "No test impact analysis data found for suite '%s' at %s\n",
-                    SuiteTypeAsString(m_suiteFilter).c_str(),
+                    "No test impact analysis data found for suites '%s' at %s\n",
+                    SuiteSetAsString(m_suiteSet).c_str(),
                     m_sparTiaFile.c_str())
                     .c_str());
         }
@@ -240,7 +247,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(m_suiteFilter, selectedTests);
+            (*testSequenceStartCallback)(m_suiteSet, m_suiteLabelExcludeSet, selectedTests);
         }
 
         // Run the test targets and collect the test run results
@@ -261,7 +268,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             GenerateSequencePolicyState(),
-            m_suiteFilter,
+            m_suiteSet,
+            m_suiteLabelExcludeSet,
             selectedTests,
             GenerateTestRunReport(result, testRunTimer.GetStartTimePointRelative(sequenceTimer), testRunDuration, testJobs));
 
@@ -286,22 +294,19 @@ namespace TestImpact
     {
         const Timer sequenceTimer;
 
-        AZStd::vector<const TestTarget*> draftedTestTargets;
-        if (!HasImpactAnalysisData())
-        {
-            const auto notCovered = m_dynamicDependencyMap->GetNotCoveringTests();
-            for (const auto& testTarget : notCovered)
-            {
-                if (!m_testTargetExcludeList->IsTestTargetFullyExcluded(testTarget))
-                {
-                    draftedTestTargets.push_back(testTarget);
-                }
-            }
-        }
-
         // The test targets that were selected for the change list by the dynamic dependency map and the test targets that were not
         const auto [selectedTestTargets, discardedTestTargets] = SelectCoveringTestTargets(changeList, testPrioritizationPolicy);
 
+        // Set of selected tests so we can prune from drafted tests to avoid duplicate runs
+        const AZStd::unordered_set<const PythonTestTarget*> selectedTestTargetSet(selectedTestTargets.begin(), selectedTestTargets.end());
+
+        // Unlike native test impact analysis, python test impact analysis can have tests with no coverage so we cannot simply
+        // draft in tests without coverage (i.e. new tests, or tests that have yet to successfully execute in previous runs).
+        // Instead, the python test selector will run all parent test target tests when a new python test is added. What we
+        // should do in future versions (for both native and python) is draft in any previous failing tests. For now, we will
+        // leave the drafted set empty.
+        AZStd::vector<const TestTarget*> draftedTestTargets;
+
         // The subset of selected test targets that are not on the configuration's exclude list and those that are
         const auto [includedSelectedTestTargets, excludedSelectedTestTargets] =
             SelectTestTargetsByExcludeList(*m_testTargetExcludeList, selectedTestTargets);
@@ -315,7 +320,6 @@ namespace TestImpact
             return m_testEngine->InstrumentedRun(
                 testsTargets,
                 m_executionFailurePolicy,
-                m_integrationFailurePolicy,
                 m_testFailurePolicy,
                 m_targetOutputCapture,
                 testTargetTimeout,
@@ -341,7 +345,8 @@ namespace TestImpact
             return ImpactAnalysisTestSequenceWrapper(
                 1,
                 GenerateImpactAnalysisSequencePolicyState(testPrioritizationPolicy, dynamicDependencyMapPolicy),
-                m_suiteFilter,
+                m_suiteSet,
+                m_suiteLabelExcludeSet,
                 sequenceTimer,
                 instrumentedTestRun,
                 includedSelectedTestTargets,
@@ -360,7 +365,8 @@ namespace TestImpact
             return ImpactAnalysisTestSequenceWrapper(
                 1,
                 GenerateImpactAnalysisSequencePolicyState(testPrioritizationPolicy, dynamicDependencyMapPolicy),
-                m_suiteFilter,
+                m_suiteSet,
+                m_suiteLabelExcludeSet,
                 sequenceTimer,
                 instrumentedTestRun,
                 includedSelectedTestTargets,
@@ -415,7 +421,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(m_suiteFilter, selectedTests, discardedTests, draftedTests);
+            (*testSequenceStartCallback)(m_suiteSet, m_suiteLabelExcludeSet, selectedTests, discardedTests, draftedTests);
         }
 
         // We share the test run complete handler between the selected, discarded and drafted test runs as to present them together as one
@@ -431,7 +437,6 @@ namespace TestImpact
             return m_testEngine->InstrumentedRun(
                 testsTargets,
                 m_executionFailurePolicy,
-                m_integrationFailurePolicy,
                 m_testFailurePolicy,
                 m_targetOutputCapture,
                 testTargetTimeout,
@@ -521,7 +526,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             GenerateSafeImpactAnalysisSequencePolicyState(testPrioritizationPolicy),
-            m_suiteFilter,
+            m_suiteSet,
+            m_suiteLabelExcludeSet,
             selectedTests,
             discardedTests,
             draftedTests,
@@ -577,7 +583,7 @@ namespace TestImpact
         // Inform the client that the sequence is about to start
         if (testSequenceStartCallback.has_value())
         {
-            (*testSequenceStartCallback)(m_suiteFilter, selectedTests);
+            (*testSequenceStartCallback)(m_suiteSet, m_suiteLabelExcludeSet, selectedTests);
         }
 
         // Run the test targets and collect the test run results
@@ -585,7 +591,6 @@ namespace TestImpact
         const auto [result, testJobs] = m_testEngine->InstrumentedRun(
             includedTestTargets,
             m_executionFailurePolicy,
-            m_integrationFailurePolicy,
             m_testFailurePolicy,
             m_targetOutputCapture,
             testTargetTimeout,
@@ -599,7 +604,8 @@ namespace TestImpact
             testTargetTimeout,
             globalTimeout,
             GenerateSequencePolicyState(),
-            m_suiteFilter,
+            m_suiteSet,
+            m_suiteLabelExcludeSet,
             selectedTests,
             GenerateTestRunReport(result, testRunTimer.GetStartTimePointRelative(sequenceTimer), testRunDuration, testJobs));
 

+ 7 - 2
cmake/LYTestWrappers.cmake

@@ -173,6 +173,9 @@ function(ly_add_test)
         list(APPEND final_labels ${ly_add_test_LABELS})
     endif()
 
+    # Allow TIAF to apply the label of supported test categories from being run by CTest 
+    o3de_test_impact_apply_test_labels(${ly_add_test_TEST_LIBRARY} final_labels)
+
     # labels expects a single param, of concatenated labels
     # this always has a value because ly_add_test_TEST_SUITE is automatically
     # filled in to be "main" if not specified.
@@ -274,10 +277,12 @@ function(ly_add_test)
         set_property(GLOBAL APPEND PROPERTY LY_ALL_TESTS ${test_target})
         set_property(GLOBAL PROPERTY LY_ALL_TESTS_${test_target}_TEST_LIBRARY ${ly_add_test_TEST_LIBRARY})
     endif()
-    # Add the test suite and timeout value to the test target params
+    # Add the test suite, timeout value and labels to the test target params
     set(LY_TEST_PARAMS "${LY_TEST_PARAMS}#${ly_add_test_TEST_SUITE}")
     set(LY_TEST_PARAMS "${LY_TEST_PARAMS}#${ly_add_test_TIMEOUT}")
-    # Store the params for this test target
+    string(REPLACE ";" "," flattened_labels "${final_labels}")
+    set(LY_TEST_PARAMS "${LY_TEST_PARAMS}#${flattened_labels}")
+    # Store the params and labels for this test target
     set_property(GLOBAL APPEND PROPERTY LY_ALL_TESTS_${test_target}_PARAMS ${LY_TEST_PARAMS})
 endfunction()
 

+ 8 - 3
cmake/TestImpactFramework/ConsoleFrontendConfig.in

@@ -5,9 +5,6 @@
       "timestamp": "${timestamp}",
       "build_config": "${build_config}"
     },
-    "jenkins": {
-      "use_test_impact_analysis": ${use_tiaf}
-    },
     "repo": {
       "root": "${repo_dir}",
       "build": "${bin_dir}"
@@ -49,6 +46,10 @@
     }
   },
   "native": {
+    "jenkins": {
+      "enabled": ${native_test_targets_enabled},
+      "use_test_impact_analysis": ${use_tiaf}
+    },
     "workspace": {
       "temp": {
         "root": "${native_temp_dir}",
@@ -100,6 +101,10 @@
     }
   },
   "python": {
+    "jenkins": {
+      "enabled": ${python_test_targets_enabled},
+      "use_test_impact_analysis": true
+    },
     "workspace": {
       "temp": {
         "root": "${python_temp_dir}",

+ 44 - 35
cmake/TestImpactFramework/LYTestImpactFramework.cmake

@@ -6,9 +6,6 @@
 #
 #
 
-# Path to test instrumentation binary
-set(LY_TEST_IMPACT_INSTRUMENTATION_BIN "" CACHE PATH "Path to test impact framework instrumentation binary")
-
 # Name of test impact framework console static library target
 set(LY_TEST_IMPACT_CONSOLE_NATIVE_STATIC_TARGET "TestImpact.Frontend.Console.Native.Static")
 
@@ -69,15 +66,6 @@ set(LY_TEST_IMPACT_NATIVE_TEST_RUN_DIR "${GTEST_XML_OUTPUT_DIR}")
 # Path to the directory that the result of python runs will be stored in.
 set(LY_TEST_IMPACT_PYTHON_TEST_RUN_DIR "${PYTEST_XML_OUTPUT_DIR}")
 
-# If we are not provided a path to the Instrumentation bin,
-# set LY_TEST_IMPACT to false so that our tests don't get added
-# and TIAF doesn't get built.
-if(LY_TEST_IMPACT_INSTRUMENTATION_BIN)
-    set(LY_TEST_IMPACT_ACTIVE true)
-else()
-    set(LY_TEST_IMPACT_ACTIVE false)
-endif()
-
 #! ly_test_impact_rebase_file_to_repo_root: rebases the relative and/or absolute path to be relative to repo root directory and places the resulting path in quotes.
 #
 # \arg:INPUT_FILE the file to rebase
@@ -193,16 +181,18 @@ function(ly_test_impact_extract_google_test_params COMPOSITE_TEST COMPOSITE_SUIT
 
     set(test_suites "")
     foreach(composite_suite ${COMPOSITE_SUITES})
-        # Command, suite, timeout
+        # Command, suite, timeout, labels
         string(REPLACE "#" ";" suite_components ${composite_suite})
         list(LENGTH suite_components num_suite_components)
-        if(num_suite_components LESS 3)
-            message(FATAL_ERROR "The suite components ${composite_suite} are required to be in the following format: command#suite#string.")
+        if(num_suite_components LESS 4)
+            message(FATAL_ERROR "Test ${test_components} suite components ${composite_suite} are required to be in the following format: command#suite#timeout#labels.")
         endif()
         list(GET suite_components 0 test_command)
         list(GET suite_components 1 test_suite)
         list(GET suite_components 2 test_timeout)
-        set(suite_params "{ \"suite\": \"${test_suite}\",  \"command\": \"${test_command}\", \"timeout\": ${test_timeout} }")
+        list(GET suite_components 3 test_labels)
+        string(REPLACE "," "\",\"" test_labels "${test_labels}")
+        set(suite_params "{ \"suite\": \"${test_suite}\",  \"command\": \"${test_command}\", \"timeout\": ${test_timeout}, \"labels\": [\"${test_labels}\"] }")
         list(APPEND test_suites "${suite_params}")
     endforeach()
     string(REPLACE ";" ", " test_suites "${test_suites}")
@@ -239,22 +229,24 @@ function(ly_test_impact_extract_python_test_params COMPOSITE_TEST COMPOSITE_SUIT
     
     set(test_suites "")
     foreach(composite_suite ${COMPOSITE_SUITES})
-        # Script path, suite, timeout
+        # Script path, suite, timeout, labels
         string(REPLACE "#" ";" suite_components ${composite_suite})
         list(LENGTH suite_components num_suite_components)
-        if(num_suite_components LESS 3)
-            message(FATAL_ERROR "The suite components ${composite_suite} are required to be in the following format: script_path#suite#string.")
+        if(num_suite_components LESS 4)
+            message(FATAL_ERROR "Test ${test_components} suite components ${composite_suite} are required to be in the following format: script_path#suite#timeout#labels.")
         endif()
         list(GET suite_components 0 script_path)
         list(GET suite_components 1 test_suite)
         list(GET suite_components 2 test_timeout)
+        list(GET suite_components 3 test_labels)
         # Get python script path relative to repo root
         ly_test_impact_rebase_file_to_repo_root(
             "${script_path}"
             script_path
             "${LY_ROOT_FOLDER}"
         )
-        set(suite_params "{ \"suite\": \"${test_suite}\",  \"script\": \"${script_path}\", \"timeout\": ${test_timeout}, \"command\": \"${test_command}\" }")
+        string(REPLACE "," "\",\"" test_labels "${test_labels}")
+        set(suite_params "{ \"suite\": \"${test_suite}\",  \"script\": \"${script_path}\", \"timeout\": ${test_timeout}, \"command\": \"${test_command}\", \"labels\": [\"${test_labels}\"] }")
         list(APPEND test_suites "${suite_params}")
     endforeach()
     string(REPLACE ";" ", " test_suites "${test_suites}")
@@ -278,12 +270,13 @@ function(ly_test_impact_write_test_enumeration_file TEST_ENUMERATION_TEMPLATE_FI
         message(TRACE "Parsing ${test}")
         get_property(test_params GLOBAL PROPERTY LY_ALL_TESTS_${test}_PARAMS)
         get_property(test_type GLOBAL PROPERTY LY_ALL_TESTS_${test}_TEST_LIBRARY)
+
         if("${test_type}" STREQUAL "pytest")
             # Python tests
             ly_test_impact_extract_python_test_params(${test} "${test_params}" test_namespace test_name test_suites)
             list(APPEND python_tests "        { \"namespace\": \"${test_namespace}\", \"name\": \"${test_name}\", \"suites\": [${test_suites}] }")
         elseif("${test_type}" STREQUAL "pytest_editor")
-            # Python editor tests            
+            # Python editor tests
             ly_test_impact_extract_python_test_params(${test} "${test_params}" test_namespace test_name test_suites)
             list(APPEND python_editor_tests "        { \"namespace\": \"${test_namespace}\", \"name\": \"${test_name}\", \"suites\": [${test_suites}] }")
         elseif("${test_type}" STREQUAL "googletest")
@@ -296,9 +289,8 @@ function(ly_test_impact_write_test_enumeration_file TEST_ENUMERATION_TEMPLATE_FI
             ly_test_impact_extract_google_test_params(${test} "${test_params}" test_namespace test_name test_suites)
             list(APPEND google_benchmarks "        { \"namespace\": \"${test_namespace}\", \"name\": \"${test_name}\", \"launch_method\": \"${launch_method}\", \"suites\": [${test_suites}] }")
         else()
-            ly_test_impact_extract_python_test_params(${test} "${test_params}" test_namespace test_name test_suites)
             message("${test_name} is of unknown type (TEST_LIBRARY property is \"${test_type}\")")
-            list(APPEND unknown_tests "        { \"namespace\": \"${test_namespace}\", \"name\": \"${test}\", \"type\": \"${test_type}\" }")
+            list(APPEND unknown_tests "        { \"name\": \"${test}\" }")
         endif()
     endforeach()
 
@@ -334,9 +326,9 @@ function(ly_test_impact_write_gem_target_enumeration_file GEM_TARGET_TEMPLATE_FI
         endif()
     endforeach()
     string (REPLACE ";" ",\n" enumerated_gem_targets "${enumerated_gem_targets}")
-     # Write out source to target mapping file
-     set(mapping_path "${LY_TEST_IMPACT_GEM_TARGET_FILE}")
-     configure_file(${GEM_TARGET_TEMPLATE_FILE} ${mapping_path})
+    # Write out source to target mapping file
+    set(mapping_path "${LY_TEST_IMPACT_GEM_TARGET_FILE}")
+    configure_file(${GEM_TARGET_TEMPLATE_FILE} ${mapping_path})
 endfunction()
 
 #! ly_extract_aliased_target_dependencies: recursively extracts the aliases of a target to retrieve the true de-aliased target.
@@ -513,14 +505,26 @@ function(ly_test_impact_write_config_file CONFIG_TEMPLATE_FILE BIN_DIR)
     set(build_config "$<CONFIG>")
 
     # Instrumentation binary
-    if(NOT LY_TEST_IMPACT_INSTRUMENTATION_BIN)
+    if(NOT O3DE_TEST_IMPACT_INSTRUMENTATION_BIN)
         # No binary specified is not an error, it just means that the test impact analysis part of the framework is disabled
         message("No test impact framework instrumentation binary was specified, test impact analysis framework will fall back to regular test sequences instead")
         set(use_tiaf false)
         set(instrumentation_bin "")
     else()
         set(use_tiaf true)
-        file(TO_CMAKE_PATH ${LY_TEST_IMPACT_INSTRUMENTATION_BIN} instrumentation_bin)
+        file(TO_CMAKE_PATH ${O3DE_TEST_IMPACT_INSTRUMENTATION_BIN} instrumentation_bin)
+    endif()
+
+    if(O3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED)
+        set(native_test_targets_enabled true)
+    else()
+        set(native_test_targets_enabled false)
+    endif()
+
+    if(O3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED)
+        set(python_test_targets_enabled true)
+    else()
+        set(python_test_targets_enabled false)
     endif()
 
     # Testrunner binary
@@ -566,10 +570,14 @@ function(ly_test_impact_write_config_file CONFIG_TEMPLATE_FILE BIN_DIR)
     set(target_dependency_dir "${LY_TEST_IMPACT_TARGET_DEPENDENCY_DIR}")
 
     # Test impact analysis framework native runtime binary
-    set(native_runtime_bin "$<TARGET_FILE:${LY_TEST_IMPACT_NATIVE_CONSOLE_TARGET}>")
+    if(O3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED)
+        set(native_runtime_bin "$<TARGET_FILE:${LY_TEST_IMPACT_NATIVE_CONSOLE_TARGET}>")
+    endif()
 
     # Test impact analysis framework python runtime binary
-    set(python_runtime_bin "$<TARGET_FILE:${LY_TEST_IMPACT_PYTHON_CONSOLE_TARGET}>")
+    if(O3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED)
+        set(python_runtime_bin "$<TARGET_FILE:${LY_TEST_IMPACT_PYTHON_CONSOLE_TARGET}>")
+    endif()
     
     # Substitute config file template with above vars
     ly_file_read("${CONFIG_TEMPLATE_FILE}" config_file)
@@ -595,16 +603,17 @@ function(ly_test_impact_write_pytest_file CONFIGURATION_FILE)
         set(config_path "${LY_TEST_IMPACT_WORKING_DIR}/${config_type}/${LY_TEST_IMPACT_PERSISTENT_DIR}/${LY_TEST_IMPACT_CONFIG_FILE_NAME}")
         list(APPEND build_configs "\"${config_type}\" : { \"config\" : \"${config_path}\"}")
     endforeach()
- 
+
     # Configure our list of entries
     string(REPLACE ";" ",\n" build_configs "${build_configs}")
-    
+
     # Configure and write out our test data file
     ly_file_read("${CONFIGURATION_FILE}" test_file)
     string(CONFIGURE ${test_file} test_file)
     file(GENERATE
         OUTPUT "${LY_TEST_IMPACT_PYTEST_FILE_PATH}/ly_test_impact_test_data.json"
-        CONTENT "${test_file}")
+        CONTENT "${test_file}"
+    )
 
 endfunction()
 
@@ -630,8 +639,8 @@ endfunction()
 
 #! ly_test_impact_post_step: runs the post steps to be executed after all other cmake scripts have been executed.
 function(ly_test_impact_post_step)
-    if(NOT LY_TEST_IMPACT_ACTIVE)
-        return()
+    if(NOT O3DE_TEST_IMPACT_ACTIVE)
+        message("TIAF is deactivated but configs and meta-data will still be generated.")
     endif()
 
     # Clean temporary and persistent directories

+ 68 - 0
cmake/TestImpactFramework/TestImpactTestTargetConfig.cmake

@@ -0,0 +1,68 @@
+#
+# Copyright (c) Contributors to the Open 3D Engine Project.
+# For complete copyright and license terms please see the LICENSE at the root of this distribution.
+#
+# SPDX-License-Identifier: Apache-2.0 OR MIT
+#
+#
+
+# Path to test instrumentation binary
+set(O3DE_TEST_IMPACT_INSTRUMENTATION_BIN "" CACHE PATH "Path to test impact framework instrumentation binary")
+
+# Label to add to test for them to be included in TIAF
+set(REQUIRES_TIAF_LABEL "REQURIES_tiaf")
+
+# Test impact analysis opt-in for native test targets
+set(O3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED FALSE CACHE BOOL "Whether to enable native C++ test targets with the REQUIRES_TIAF_LABEL label for test impact analysis (otherwise, CTest will be used to run these targets).")
+
+# Test impact analysis opt-in for Python test targets
+set(O3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED FALSE CACHE BOOL "Whether to enable Python test targets with the REQUIRES_TIAF_LABEL label for test impact analysis (otherwise, CTest will be used to run these targets).")
+
+# If we are not provided a path to the Instrumentation bin,
+# set LY_TEST_IMPACT to false so that our tests don't get added
+# and TIAF doesn't get built.
+if(O3DE_TEST_IMPACT_INSTRUMENTATION_BIN)
+    # TIAF is only enabled if at least one supported test target type has opted in for test impact analysis
+    if(O3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED OR O3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED)
+        set(O3DE_TEST_IMPACT_ACTIVE true)
+        if(O3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED)
+            message("TIAF enabled for native tests.")
+        else()
+            message("TIAF disabled for native tests.")
+        endif()
+        if(O3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED)
+            message("TIAF enabled for Python tests.")
+        else()
+            message("TIAF disabled for Python tests.")
+        endif()
+    else()
+        set(O3DE_TEST_IMPACT_ACTIVE false)
+        message("TIAF disabled. No test target types have opted in.")
+    endif()
+else()
+    set(O3DE_TEST_IMPACT_ACTIVE false)
+    message("TIAF disabled. Instrumentation bin not provided.")
+endif()
+
+#! o3de_test_impact_apply_test_labels: applies the the appropriate label to a test target for running in CTest according to whether
+#  or not their test framework type is enabled for running in TIAF.
+#
+# \arg:TEST_NAME The test target name
+# \arg:TEST_FRAMEWORK The test framework type of the test target
+# \arg:TEST_LABELS The existing test labels list that the TIAF label will be appended to
+function(o3de_test_impact_apply_test_labels TEST_FRAMEWORK TEST_LABELS)
+    if("${TEST_FRAMEWORK}" STREQUAL "pytest" OR "${TEST_FRAMEWORK}" STREQUAL "pytest_editor")
+        if(NOT O3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED)
+            set(remove_tiaf_label ON)
+        endif()
+    elseif("${TEST_FRAMEWORK}" STREQUAL "googletest" OR "${TEST_FRAMEWORK}" STREQUAL "googlebenchmark")
+        if(NOT O3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED)
+            set(remove_tiaf_label ON)
+        endif()
+    endif()
+    
+    if(remove_tiaf_label)
+        list(REMOVE_ITEM TEST_LABELS REQUIRES_TIAF_LABEL)
+        set(${TEST_LABELS} ${${TEST_LABELS}} PARENT_SCOPE)
+    endif()
+endfunction()

+ 7 - 16
scripts/build/Platform/Windows/build_config.json

@@ -25,16 +25,7 @@
     "steps": [
       "profile",
       "asset_profile",
-      "test_cpu_profile"
-    ]
-  },
-  "profile_pipe_tiaf": {
-    "TAGS": [
-      "tiaf-testing"
-    ],
-    "steps": [
-      "profile",
-      "asset_profile",
+      "test_cpu_profile",
       "test_impact_analysis_profile_native",
       "test_impact_analysis_profile_python"
     ]
@@ -65,7 +56,7 @@
       "CONFIGURATION": "profile",
       "SCRIPT_PATH": "scripts/build/TestImpactAnalysis/tiaf_driver.py",
       "SCRIPT_PARAMETERS": 
-      "--config=\"%OUTPUT_DIRECTORY%/bin/TestImpactFramework/profile/Persistent/tiaf.json\" --src-branch=%BRANCH_NAME% --dst-branch=%CHANGE_TARGET% --commit=%CHANGE_ID% --s3-bucket=%TEST_IMPACT_S3_BUCKET% --mars-index-prefix=o3de-tiaf --s3-top-level-dir=%REPOSITORY_NAME% --build-number=%BUILD_NUMBER% --suite=main --test-failure-policy=continue --runtime-type=native"
+      "--config=\"%OUTPUT_DIRECTORY%/bin/TestImpactFramework/profile/Persistent/tiaf.json\" --src-branch=%BRANCH_NAME% --dst-branch=%CHANGE_TARGET% --commit=%CHANGE_ID% --s3-bucket=%TEST_IMPACT_S3_BUCKET% --mars-index-prefix=o3de-tiaf --s3-top-level-dir=%REPOSITORY_NAME% --build-number=%BUILD_NUMBER% --suites smoke main --label-excludes REQUIRES_gpu --test-failure-policy=continue --runtime-type=native"
     }
   },
   "test_impact_analysis_profile_python": {
@@ -77,7 +68,7 @@
       "CONFIGURATION": "profile",
       "SCRIPT_PATH": "scripts/build/TestImpactAnalysis/tiaf_driver.py",
       "SCRIPT_PARAMETERS": 
-      "--config=\"%OUTPUT_DIRECTORY%/bin/TestImpactFramework/profile/Persistent/tiaf.json\" --src-branch=%BRANCH_NAME% --dst-branch=%CHANGE_TARGET% --commit=%CHANGE_ID% --s3-bucket=%TEST_IMPACT_S3_BUCKET% --mars-index-prefix=o3de-tiaf --s3-top-level-dir=%REPOSITORY_NAME% --build-number=%BUILD_NUMBER% --suite=main --test-failure-policy=continue --runtime-type=python --testrunner=live --target-output=stdout"
+      "--config=\"%OUTPUT_DIRECTORY%/bin/TestImpactFramework/profile/Persistent/tiaf.json\" --src-branch=%BRANCH_NAME% --dst-branch=%CHANGE_TARGET% --commit=%CHANGE_ID% --s3-bucket=%TEST_IMPACT_S3_BUCKET% --mars-index-prefix=o3de-tiaf --s3-top-level-dir=%REPOSITORY_NAME% --build-number=%BUILD_NUMBER% --suites smoke main --label-excludes REQUIRES_gpu --test-failure-policy=continue --runtime-type=python --testrunner=live --target-output=stdout"
     }
   },
   "debug": {
@@ -120,7 +111,7 @@
     "PARAMETERS": {
       "CONFIGURATION": "profile",
       "OUTPUT_DIRECTORY": "build\\windows",
-      "CMAKE_OPTIONS": "-DCMAKE_SYSTEM_VERSION=10.0 -DLY_TEST_IMPACT_INSTRUMENTATION_BIN=%TEST_IMPACT_WIN_BINARY%",
+      "CMAKE_OPTIONS": "-DCMAKE_SYSTEM_VERSION=10.0 -DO3DE_TEST_IMPACT_INSTRUMENTATION_BIN=%TEST_IMPACT_WIN_BINARY%",
       "CMAKE_LY_PROJECTS": "AutomatedTesting",
       "CMAKE_TARGET": "ALL_BUILD",
       "CMAKE_NATIVE_BUILD_ARGS": "/m /nologo"
@@ -135,7 +126,7 @@
     "PARAMETERS": {
       "CONFIGURATION": "profile",
       "OUTPUT_DIRECTORY": "build\\windows",
-      "CMAKE_OPTIONS": "-G \"Visual Studio 16 2019\" -DCMAKE_SYSTEM_VERSION=10.0 -DLY_TEST_IMPACT_INSTRUMENTATION_BIN=%TEST_IMPACT_WIN_BINARY%",
+      "CMAKE_OPTIONS": "-G \"Visual Studio 16 2019\" -DCMAKE_SYSTEM_VERSION=10.0 -DO3DE_TEST_IMPACT_INSTRUMENTATION_BIN=%TEST_IMPACT_WIN_BINARY%",
       "CMAKE_LY_PROJECTS": "AutomatedTesting",
       "CMAKE_TARGET": "ALL_BUILD",
       "CMAKE_NATIVE_BUILD_ARGS": "/m /nologo"
@@ -166,11 +157,11 @@
     "PARAMETERS": {
       "CONFIGURATION": "profile",
       "OUTPUT_DIRECTORY": "build\\windows",
-      "CMAKE_OPTIONS": "-DCMAKE_SYSTEM_VERSION=10.0",
+      "CMAKE_OPTIONS": "-DCMAKE_SYSTEM_VERSION=10.0 -DO3DE_TEST_IMPACT_NATIVE_TEST_TARGETS_ENABLED=FALSE -DO3DE_TEST_IMPACT_PYTHON_TEST_TARGETS_ENABLED=TRUE",
       "CMAKE_LY_PROJECTS": "AutomatedTesting",
       "CMAKE_TARGET": "TEST_SUITE_smoke TEST_SUITE_main",
       "CMAKE_NATIVE_BUILD_ARGS": "/m /nologo",
-      "CTEST_OPTIONS": "-L \"(SUITE_smoke|SUITE_main)\" -LE \"(REQUIRES_gpu)\" -T Test --no-tests=error",
+      "CTEST_OPTIONS": "-L \"(SUITE_smoke|SUITE_main)\" -LE \"(REQUIRES_gpu|REQUIRES_tiaf)\" -T Test --no-tests=error",
       "TEST_METRICS": "True",
       "TEST_RESULTS": "True"
     }

+ 9 - 2
scripts/build/TestImpactAnalysis/Testing/conftest.py

@@ -15,6 +15,8 @@ BUILD_INFO_KEY = 'build_info'
 CONFIG_PATH_KEY = 'config'
 BINARY_PATH_KEY = 'runtime_bin'
 COMMON_CONFIG_KEY = "common"
+JENKINS_KEY = "jenkins"
+ENABLED_KEY = "enabled"
 WORKSPACE_KEY = "workspace"
 ROOT_KEY = "root"
 TEMP_KEY = "temp"
@@ -61,6 +63,11 @@ def storage_config(runtime_type, config_data):
     args_from_config['temp_workspace'] = config_data[runtime_type][WORKSPACE_KEY][TEMP_KEY][ROOT_KEY]
     return args_from_config
 
[email protected]
+def skip_if_test_targets_disabled(runtime_type, config_data):
+    if not config_data[runtime_type][JENKINS_KEY][ENABLED_KEY]:
+        pytest.skip("Test targets are disabled for this runtime, test will be skipped.")
+
 
 @pytest.fixture
 def config_path(build_type, test_data_file):
@@ -91,7 +98,7 @@ def tiaf_args(config_path):
     args['dst_branch'] = "123"
     args['commit'] = "foobar"
     args['build_number'] = 1
-    args['suite'] = "main"
+    args['suites'] = "main"
     args['test_failure_policy'] = "continue"
     return args
 
@@ -126,7 +133,7 @@ def default_runtime_args(mock_uuid, report_path):
     runtime_args['test_failure_policy'] = "--fpolicy=continue"
     runtime_args['report'] = "--report=" + \
         str(report_path).replace("/", "\\")
-    runtime_args['suite'] = "--suite=main"
+    runtime_args['suites'] = "--suites=main"
     return runtime_args
 
 

+ 45 - 31
scripts/build/TestImpactAnalysis/Testing/test_tiaf_unit_tests.py

@@ -42,7 +42,8 @@ class ConcreteBaseTestImpact(BaseTestImpact):
 
 class TestTiafDriver():
 
-    def test_run_Tiaf_mars_index_prefix_is_supplied(self, caplog, main_args, mock_runtime, mocker):
+
+    def test_run_Tiaf_mars_index_prefix_is_supplied(self, caplog, main_args, skip_if_test_targets_disabled, mock_runtime, mocker):
         # given:
         # Default args + mars_index_prefix being provided,
         # and transmit_report_to_mars is patched to intercept the call.
@@ -58,7 +59,7 @@ class TestTiafDriver():
         # Tiaf should call the transmit function.
         mock_mars.assert_called()
 
-    def test_run_Tiaf_mars_index_prefix_is_not_supplied(self, caplog, main_args, mock_runtime, mocker):
+    def test_run_Tiaf_mars_index_prefix_is_not_supplied(self, caplog, main_args, skip_if_test_targets_disabled, mock_runtime, mocker):
         # given:
         # Default_args - mars index is not supplied.
         mock_mars = mocker.patch("mars_utils.transmit_report_to_mars")
@@ -73,7 +74,7 @@ class TestTiafDriver():
         mock_mars.assert_not_called()
 
     @pytest.mark.parametrize("runtime_type,mock_type", [("native", "test_impact.native_test_impact.NativeTestImpact.__init__"), ("python", "test_impact.python_test_impact.PythonTestImpact.__init__")])
-    def test_run_Tiaf_driver_runtime_type_selection(self, caplog, tiaf_args, mock_runtime, mocker, runtime_type, mock_type):
+    def test_run_Tiaf_driver_runtime_type_selection(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, mocker, runtime_type, mock_type):
         # given:
         # Default args + runtime_type
         tiaf_args['runtime_type'] = runtime_type
@@ -92,6 +93,10 @@ class TestTiafDriver():
 
 class TestTiafInitialiseStorage():
 
+    def skip_if_test_targets_disabled(skip_if_test_targets_disabled):
+        if not skip_if_test_targets_disabled:
+            pytest.skip("Test targets are disabled for this runtime, test will be skipped.")
+
     @pytest.fixture
     def runtime_type(self):
         """
@@ -105,10 +110,10 @@ class TestTiafInitialiseStorage():
             output.append(entry)
         return output
         
-    def test_create_TestImpact_no_s3_bucket_name(self, caplog, tiaf_args, config_data, mocker, storage_config):
+    def test_create_TestImpact_no_s3_bucket_name(self, caplog, tiaf_args, skip_if_test_targets_disabled, config_data, mocker, storage_config):
         # given:
         # Default args.
-        expected_storage_args = config_data, tiaf_args['suite'], tiaf_args[
+        expected_storage_args = config_data, tiaf_args['suites'], tiaf_args[
             'commit'], storage_config['active_workspace'], storage_config['unpacked_coverage_data_file'], storage_config['previous_test_run_data_file'], storage_config['historic_workspace'], storage_config['historic_data_file'], storage_config['temp_workspace']
         mock_local = mocker.patch(
             "persistent_storage.PersistentStorageLocal.__init__", side_effect=SystemError(), return_value=None)
@@ -117,11 +122,11 @@ class TestTiafInitialiseStorage():
         tiaf = ConcreteBaseTestImpact(tiaf_args)
 
         # then:
-        # PersistentStorageLocal should be called with suite, commit and config data as arguments.
+        # PersistentStorageLocal should be called with suites, commit and config data as arguments.
         assert_list_content_equal(self.to_list(mock_local.call_args[0]).pop(), self.to_list(expected_storage_args).pop())
 
     @pytest.mark.parametrize("bucket_name,top_level_dir,expected_top_level_dir", [("test_bucket", "test_dir", "test_dir/native")])
-    def test_create_TestImpact_s3_bucket_name_supplied(self, caplog, tiaf_args, mocker, bucket_name, top_level_dir, config_data, expected_top_level_dir, storage_config):
+    def test_create_TestImpact_s3_bucket_name_supplied(self, caplog, tiaf_args, skip_if_test_targets_disabled, mocker, bucket_name, top_level_dir, config_data, expected_top_level_dir, storage_config):
         # given:
         # Default arguments + s3_bucket and s3_top_level_dir being set to the above parameters,
         # and we patch PersistentStorageS3 to intercept the constructor call.
@@ -130,7 +135,7 @@ class TestTiafInitialiseStorage():
         mock_storage = mocker.patch(
             "persistent_storage.PersistentStorageS3.__init__", side_effect=SystemError())
 
-        expected_storage_args = config_data, tiaf_args['suite'], tiaf_args[
+        expected_storage_args = config_data, tiaf_args['suites'], tiaf_args[
             'commit'], bucket_name, expected_top_level_dir, tiaf_args['src_branch'], storage_config['active_workspace'], storage_config['unpacked_coverage_data_file'], storage_config['previous_test_run_data_file'], storage_config['temp_workspace']
 
         # when:
@@ -141,7 +146,7 @@ class TestTiafInitialiseStorage():
         # PersistentStorageS3 should be called with config data, commit, bucket_name, top_level_dir and src branch as arguments.
         mock_storage.assert_called_with(*expected_storage_args)
 
-    def test_create_TestImpact_s3_bucket_name_not_supplied(self, caplog, tiaf_args, mock_runtime, default_runtime_args, mocker, config_data):
+    def test_create_TestImpact_s3_bucket_name_not_supplied(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args, mocker, config_data):
         # given:
         # Default arguments + s3_bucket and s3_top_level_dir arguments set to none.
         tiaf_args['s3_bucket'] = None
@@ -157,7 +162,7 @@ class TestTiafInitialiseStorage():
         # PersistentStorageS3 should not be called.
         mock_storage.assert_not_called()
 
-    def test_create_TestImpact_s3_top_level_dir_bucket_name_not_supplied(self, caplog, tiaf_args, mock_runtime, default_runtime_args, mocker, config_data):
+    def test_create_TestImpact_s3_top_level_dir_bucket_name_not_supplied(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args, mocker, config_data):
         # given:
         # Default arguments + s3_bucket set to none and s3_top_level_dir is defined.
         tiaf_args['s3_bucket'] = None
@@ -176,6 +181,10 @@ class TestTiafInitialiseStorage():
 
 class TestTIAFNativeUnitTests():
 
+    def skip_if_test_targets_disabled(skip_if_test_targets_disabled):
+        if not skip_if_test_targets_disabled:
+            pytest.skip("Test targets are disabled for this runtime, test will be skipped.")
+
     @pytest.fixture
     def runtime_type(self):
         """
@@ -184,7 +193,7 @@ class TestTIAFNativeUnitTests():
         return "native"
 
     @pytest.mark.parametrize("safemode, arg_val", [("on", "on")])
-    def test_create_NativeTestImpact_safe_mode_arguments(self, caplog, tiaf_args, mock_runtime, cpp_default_runtime_args, safemode, arg_val):
+    def test_create_NativeTestImpact_safe_mode_arguments(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, cpp_default_runtime_args, safemode, arg_val):
         # given:
         # Default args + safe_mode set.
         tiaf_args['safe_mode'] = safemode
@@ -199,7 +208,7 @@ class TestTIAFNativeUnitTests():
         assert_list_content_equal(
             tiaf.runtime_args, cpp_default_runtime_args.values())
 
-    def test_create_NativeTestImpact_no_safe_mode(self, caplog, tiaf_args, mock_runtime, cpp_default_runtime_args):
+    def test_create_NativeTestImpact_no_safe_mode(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, cpp_default_runtime_args):
         # given:
         # Default args + safe_mode set.
 
@@ -213,14 +222,14 @@ class TestTIAFNativeUnitTests():
             tiaf.runtime_args, cpp_default_runtime_args.values())
 
     @pytest.mark.parametrize("bucket_name,top_level_dir,expected_top_level_dir", [("test_bucket", "test_dir", "test_dir/native")])
-    def test_create_NativeTestImpact_correct_s3_dir_runtime_type(self, config_data, caplog, tiaf_args, mock_runtime, cpp_default_runtime_args, mocker, bucket_name, storage_config, top_level_dir, expected_top_level_dir):
+    def test_create_NativeTestImpact_correct_s3_dir_runtime_type(self, config_data, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, cpp_default_runtime_args, mocker, bucket_name, storage_config, top_level_dir, expected_top_level_dir):
         # given:
         # Default args + s3_bucket and s3_top_level_dir set
         tiaf_args['s3_bucket'] = bucket_name
         tiaf_args['s3_top_level_dir'] = top_level_dir
         mock_storage = mocker.patch(
             "persistent_storage.PersistentStorageS3.__init__", side_effect=SystemError())
-        expected_storage_args = config_data, tiaf_args['suite'], tiaf_args[
+        expected_storage_args = config_data, tiaf_args['suites'], tiaf_args[
             'commit'], bucket_name, expected_top_level_dir, tiaf_args['src_branch'], storage_config['active_workspace'], storage_config['unpacked_coverage_data_file'], storage_config['previous_test_run_data_file'], storage_config['temp_workspace']
 
         # when:
@@ -228,12 +237,16 @@ class TestTIAFNativeUnitTests():
         tiaf = NativeTestImpact(tiaf_args)
 
         # then:
-        # PersistentStorageS3.__init__ should be called with config data, suite, commit, bucket_name, modified top level dir and src_branch as arguments
+        # PersistentStorageS3.__init__ should be called with config data, suites, commit, bucket_name, modified top level dir and src_branch as arguments
         mock_storage.assert_called_with(*expected_storage_args)
 
 
 class TestTIAFPythonUnitTests():
 
+    def skip_if_test_targets_disabled(skip_if_test_targets_disabled):
+        if not skip_if_test_targets_disabled:
+            pytest.skip("Test targets are disabled for this runtime, test will be skipped.")
+
     @pytest.fixture
     def runtime_type(self):
         """
@@ -243,14 +256,14 @@ class TestTIAFPythonUnitTests():
 
     #@pytest.mark.skip(reason="To fix before PR")
     @pytest.mark.parametrize("bucket_name,top_level_dir,expected_top_level_dir", [("test_bucket", "test_dir", "test_dir/python")])
-    def test_create_PythonTestImpact_correct_s3_dir_runtime_type(self, config_data, caplog, tiaf_args, mock_runtime, mocker, bucket_name, top_level_dir, expected_top_level_dir, storage_config):
+    def test_create_PythonTestImpact_correct_s3_dir_runtime_type(self, config_data, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, mocker, bucket_name, top_level_dir, expected_top_level_dir, storage_config):
         # given:
         # Default args + s3_bucket and s3_top_level_dir set
         tiaf_args['s3_bucket'] = bucket_name
         tiaf_args['s3_top_level_dir'] = top_level_dir
         mock_storage = mocker.patch(
             "persistent_storage.PersistentStorageS3.__init__", side_effect=SystemError())
-        expected_storage_args = config_data, tiaf_args['suite'], tiaf_args[
+        expected_storage_args = config_data, tiaf_args['suites'], tiaf_args[
             'commit'], bucket_name, expected_top_level_dir, tiaf_args['src_branch'], storage_config['active_workspace'], storage_config['unpacked_coverage_data_file'], storage_config['previous_test_run_data_file'], storage_config['temp_workspace']
 
         # when:
@@ -258,7 +271,7 @@ class TestTIAFPythonUnitTests():
         tiaf = PythonTestImpact(tiaf_args)
 
         # then:
-        # PersistentStorageS3.__init__ should be called with config data, suite, commit, bucket_name, modified top level dir and src_branch as arguments
+        # PersistentStorageS3.__init__ should be called with config data, suites, commit, bucket_name, modified top level dir and src_branch as arguments
         mock_storage.assert_called_with(*expected_storage_args)
 
 
@@ -271,10 +284,11 @@ class TestTIAFBaseUnitTests():
         """
         return "native"
 
-    def test_create_TestImpact_valid_config(self, caplog, tiaf_args, mock_runtime, mocker, default_runtime_args):
+    def test_create_TestImpact_valid_config(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, mocker, default_runtime_args):
         """
         Given default arguments, when we create a TestImpact object, tiaf.runtime_args should be eqaul
         """
+
         # given:
         # Default arguments.
 
@@ -287,7 +301,7 @@ class TestTIAFBaseUnitTests():
         assert_list_content_equal(
             tiaf.runtime_args, default_runtime_args.values())
 
-    def test_create_TestImpact_invalid_config(self, caplog, tiaf_args, mock_runtime, tmp_path_factory, default_runtime_args):
+    def test_create_TestImpact_invalid_config(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, tmp_path_factory, default_runtime_args):
         # given:
         # Invalid config file at invalid_file,
         # and setting tiaf_args.config to that path.
@@ -304,7 +318,7 @@ class TestTIAFBaseUnitTests():
             tiaf = ConcreteBaseTestImpact(tiaf_args)
 
     @ pytest.mark.parametrize("branch_name", ["development", "not_a_real_branch"])
-    def test_create_TestImpact_src_branch(self, caplog, tiaf_args, mock_runtime, default_runtime_args, branch_name):
+    def test_create_TestImpact_src_branch(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args, branch_name):
         # given:
         # Default args + src_branch set to branch_name.
         tiaf_args['src_branch'] = branch_name
@@ -318,7 +332,7 @@ class TestTIAFBaseUnitTests():
         assert tiaf.source_branch == branch_name
 
     @ pytest.mark.parametrize("branch_name", ["development", "not_a_real_branch"])
-    def test_create_TestImpact_dst_branch(self, caplog, tiaf_args, mock_runtime, default_runtime_args, branch_name):
+    def test_create_TestImpact_dst_branch(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args, branch_name):
         # given:
         # Default args + dst_branch set to branch_name.
         tiaf_args['dst_branch'] = branch_name
@@ -332,7 +346,7 @@ class TestTIAFBaseUnitTests():
         assert tiaf.destination_branch == branch_name
 
     @ pytest.mark.parametrize("commit", ["9a15f038807ba8b987c9e689952d9271ef7fd086", "foobar"])
-    def test_create_TestImpact_commit(self, caplog, tiaf_args, mock_runtime, default_runtime_args, commit):
+    def test_create_TestImpact_commit(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args, commit):
         # given:
         # Default args + commit set to commit.
         tiaf_args['commit'] = commit
@@ -345,7 +359,7 @@ class TestTIAFBaseUnitTests():
         # tiaf.destination_commit should equal our commit parameter.
         assert tiaf.destination_commit == commit
 
-    def test_create_TestImpact_valid_test_suite_name(self, caplog, tiaf_args, mock_runtime, default_runtime_args):
+    def test_create_TestImpact_valid_test_suite_name(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args):
         # given:
         # Default args
 
@@ -358,11 +372,11 @@ class TestTIAFBaseUnitTests():
         assert_list_content_equal(
             tiaf.runtime_args, default_runtime_args.values())
 
-    def test_create_TestImpact_invalid_test_suite_name(self, caplog, tiaf_args, mock_runtime, default_runtime_args):
+    def test_create_TestImpact_invalid_test_suite_name(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args):
         # given:
-        # Default args + suite defined as "foobar" in given args and expected args.
-        tiaf_args['suite'] = "foobar"
-        default_runtime_args['suite'] = "--suite=foobar"
+        # Default args + suites defined as "foobar" in given args and expected args.
+        tiaf_args['suites'] = "foobar"
+        default_runtime_args['suites'] = "--suites=foobar"
 
         # when:
         # We create a TestImpact object.
@@ -374,7 +388,7 @@ class TestTIAFBaseUnitTests():
             tiaf.runtime_args, default_runtime_args.values())
 
     @ pytest.mark.parametrize("policy", ["continue", "abort", "ignore"])
-    def test_create_TestImpact_valid_failure_policy(self, caplog, tiaf_args, mock_runtime, default_runtime_args, policy):
+    def test_create_TestImpact_valid_failure_policy(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args, policy):
         # given:
         # Default args + test_failure_policy set to policy parameter.
         tiaf_args['test_failure_policy'] = policy
@@ -394,7 +408,7 @@ class TestTIAFBaseUnitTests():
         assert_list_content_equal(
             tiaf.runtime_args, default_runtime_args.values())
 
-    def test_create_TestImpact_exclude_file_not_supplied(self, caplog, tiaf_args, mock_runtime, default_runtime_args):
+    def test_create_TestImpact_exclude_file_not_supplied(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args):
         # given:
         # Default args.
 
@@ -407,7 +421,7 @@ class TestTIAFBaseUnitTests():
         assert_list_content_equal(
             tiaf.runtime_args, default_runtime_args.values())
 
-    def test_create_TestImpact_exclude_file_supplied(self, caplog, tiaf_args, mock_runtime, default_runtime_args):
+    def test_create_TestImpact_exclude_file_supplied(self, caplog, tiaf_args, skip_if_test_targets_disabled, mock_runtime, default_runtime_args):
         # given:
         # Default args + exclude_file set.
         tiaf_args['exclude_file'] = "testpath"

+ 2 - 1
scripts/build/TestImpactAnalysis/mars_utils.py

@@ -120,7 +120,8 @@ def generate_mars_job(tiaf_result, driver_args, build_number: int):
         constants.COMMIT_DISTANCE_KEY,
         constants.SRC_BRANCH_KEY,
         constants.DST_BRANCH_KEY,
-        constants.SUITE_KEY,
+        constants.SUITES_KEY,
+        constants.LABEL_EXCLUDES_KEY,
         constants.SOURCE_OF_TRUTH_BRANCH_KEY,
         constants.IS_SOURCE_OF_TRUTH_BRANCH_KEY,
         constants.USE_TEST_IMPACT_ANALYSIS_KEY,

+ 6 - 6
scripts/build/TestImpactAnalysis/persistent_storage/tiaf_persistent_storage.py

@@ -30,27 +30,27 @@ class PersistentStorage(ABC):
     RUNTIME_ARTIFACT_DIRECTORY = "RuntimeArtifacts"
     RUNTIME_COVERAGE_DIRECTORY = "RuntimeCoverage"
 
-    def __init__(self, config: dict, suite: str, commit: str, active_workspace: str, unpacked_coverage_data_file_path: str, previous_test_run_data_file_path: str, temp_workspace: str):
+    def __init__(self, config: dict, suites_string: str, commit: str, active_workspace: str, unpacked_coverage_data_file_path: str, previous_test_run_data_file_path: str, temp_workspace: str):
         """
         Initializes the persistent storage into a state for which there is no historic data available.
 
         @param config: The runtime configuration to obtain the data file paths from.
-        @param suite:  The test suite for which the historic data will be obtained for.
+        @param suites_string: The unique key to differentiate the different suite combinations from one another different for which the historic data will be obtained for.
         @param commit: The commit hash for this build.
         """
 
         # Work on the assumption that there is no historic meta-data (a valid state to be in, should none exist)
-        self._suite = suite
+        self._suites_string = suites_string
         self._last_commit_hash = None
         self._has_historic_data = False
         self._has_previous_last_commit_hash = False
         self._this_commit_hash = commit
         self._this_commit_hash_last_commit_hash = None
         self._historic_data = None
-        logger.info(f"Attempting to access persistent storage for the commit '{self._this_commit_hash}' for suite '{self._suite}'")
+        logger.info(f"Attempting to access persistent storage for the commit '{self._this_commit_hash}' for suites '{self._suites_string}'")
 
         self._temp_workspace = pathlib.Path(temp_workspace)
-        self._active_workspace = pathlib.Path(active_workspace).joinpath(pathlib.Path(self._suite))
+        self._active_workspace = pathlib.Path(active_workspace).joinpath(pathlib.Path(self._suites_string))
         self._unpacked_coverage_data_file = self._active_workspace.joinpath(unpacked_coverage_data_file_path)
         self._previous_test_run_data_file = self._active_workspace.joinpath(previous_test_run_data_file_path)
 
@@ -80,7 +80,7 @@ class PersistentStorage(ABC):
                     self._has_previous_last_commit_hash = self._this_commit_hash_last_commit_hash is not None
 
                     if self._has_previous_last_commit_hash:
-                        logger.info(f"Last commit hash '{self._this_commit_hash_last_commit_hash}' was used previously for this commit.")
+                        logger.info(f"Last commit hash '{self._this_commit_hash_last_commit_hash}' was used previously for the commit '{self._last_commit_hash}'.")
                     else:
                         logger.info(f"Prior sequence data found for this commit but it is empty (there was no coverage data available at that time).")
                 else:

+ 8 - 9
scripts/build/TestImpactAnalysis/persistent_storage/tiaf_persistent_storage_local.py

@@ -21,16 +21,16 @@ class PersistentStorageLocal(PersistentStorage):
     HISTORIC_KEY = "historic"
     DATA_KEY = "data"
 
-    def __init__(self, config: dict, suite: str, commit: str, active_workspace: str, unpacked_coverage_data_file_path: str, previous_test_run_data_file_path: str, historic_workspace: str, historic_data_file_path: str, temp_workspace: str):
+    def __init__(self, config: dict, suites_string: str, commit: str, active_workspace: str, unpacked_coverage_data_file_path: str, previous_test_run_data_file_path: str, historic_workspace: str, historic_data_file_path: str, temp_workspace: str):
         """
         Initializes the persistent storage with any local historic data available.
 
         @param config: The runtime config file to obtain the data file paths from.
-        @param suite:  The test suite for which the historic data will be obtained for.
+        @param suites_string:  The concatenated test suite string for which the historic data will be obtained for.
         @param commit: The commit hash for this build.
         """
 
-        super().__init__(config, suite, commit, active_workspace, unpacked_coverage_data_file_path, previous_test_run_data_file_path, temp_workspace)
+        super().__init__(config, suites_string, commit, active_workspace, unpacked_coverage_data_file_path, previous_test_run_data_file_path, temp_workspace)
         self._retrieve_historic_data(config, historic_workspace, historic_data_file_path)
 
     def _store_historic_data(self, historic_data_json: str):
@@ -52,7 +52,7 @@ class PersistentStorageLocal(PersistentStorage):
         try:
             # Attempt to obtain the local persistent data location specified in the runtime config file
             self._historic_workspace = pathlib.Path(historic_workspace)
-            self._historic_workspace = self._historic_workspace.joinpath(pathlib.Path(self._suite))
+            self._historic_workspace = self._historic_workspace.joinpath(pathlib.Path(self._suites_string))
             historic_data_file = pathlib.Path(historic_data_file_path)
             
             # Attempt to unpack the local historic data file
@@ -108,9 +108,8 @@ class PersistentStorageLocal(PersistentStorage):
         @param source_directory: pathlib.Path to directory to copy files from.
         @param target_direcotry: pathlib.Path to directory to store files in.
         """
-        for artifact_path in source_directory.iterdir():
-            try:
-                shutil.copy2(artifact_path, target_directory.joinpath(artifact_path.name))
-            except OSError as e:
-                logger.error(f"Error copying file {artifact_path.name} from {source_directory} to {target_directory}")
+        try:
+            shutil.copytree(source_directory, target_directory, dirs_exist_ok=True)
+        except OSError as e:
+                logger.error(f"Error copying tree '{source_directory}' to '{target_directory}'")
                 logger.error(f"Error thrown: {e}")

+ 10 - 10
scripts/build/TestImpactAnalysis/persistent_storage/tiaf_persistent_storage_s3.py

@@ -27,19 +27,19 @@ class PersistentStorageS3(PersistentStorage):
     META_KEY = "meta"
     BUILD_CONFIG_KEY = "build_config"
 
-    def __init__(self, config: dict, suite: str, commit: str, s3_bucket: str, root_dir: str, branch: str, active_workspace: str, unpacked_coverage_data_file_path: str, previous_test_run_data_file_path: str, temp_workspace: str):
+    def __init__(self, config: dict, suites_string: str, commit: str, s3_bucket: str, root_dir: str, branch: str, active_workspace: str, unpacked_coverage_data_file_path: str, previous_test_run_data_file_path: str, temp_workspace: str):
         """
         Initializes the persistent storage with the specified s3 bucket.
 
-        @param config:    The runtime config file to obtain the data file paths from.
-        @param suite:     The test suite for which the historic data will be obtained for.
-        @param commit:    The commit hash for this build.
-        @param s3_bucket: The s3 bucket to use for storing nd retrieving historic data.
-        @param root_dir:  The root directory to use for the historic data object.
-        @branch branch:   The branch to retrieve the historic data for.
+        @param config:        The runtime config file to obtain the data file paths from.
+        @param suites_string: The concatenated test suites string for which the historic data will be obtained for.
+        @param commit:        The commit hash for this build.
+        @param s3_bucket:     The s3 bucket to use for storing nd retrieving historic data.
+        @param root_dir:      The root directory to use for the historic data object.
+        @branch branch:       The branch to retrieve the historic data for.
         """
 
-        super().__init__(config, suite, commit, active_workspace, unpacked_coverage_data_file_path, previous_test_run_data_file_path, temp_workspace)
+        super().__init__(config, suites_string, commit, active_workspace, unpacked_coverage_data_file_path, previous_test_run_data_file_path, temp_workspace)
 
         self.s3_bucket = s3_bucket
         self.root_dir = root_dir
@@ -79,8 +79,8 @@ class PersistentStorageS3(PersistentStorage):
             # historic_data.json.zip is the file containing the coverage and meta-data of the last TIAF sequence run
             historic_data_file = f"historic_data.{object_extension}"
 
-            # The location of the data is in the form <root_dir>/<branch>/<config>/<suite> so the build config of each branch gets its own historic data
-            self._historic_data_dir = f"{self.root_dir}/{self.branch}/{config[self.COMMON_CONFIG_KEY][self.META_KEY][self.BUILD_CONFIG_KEY]}/{self._suite}"
+            # The location of the data is in the form <root_dir>/<branch>/<config>/<suites_string> so the build config of each branch gets its own historic data
+            self._historic_data_dir = f"{self.root_dir}/{self.branch}/{config[self.COMMON_CONFIG_KEY][self.META_KEY][self.BUILD_CONFIG_KEY]}/{self._suites_string}"
             self._historic_data_key = f"{self._historic_data_dir}/{historic_data_file}"
 
             logger.info(

+ 38 - 25
scripts/build/TestImpactAnalysis/test_impact/base_test_impact.py

@@ -22,7 +22,8 @@ logger = get_logger(__file__)
 
 # Constants to access our argument dictionary for the values of different arguments. Not guarunteed to be in dictionary in all cases.
 ARG_S3_BUCKET = 's3_bucket'
-ARG_SUITE = 'suite'
+ARG_SUITES = 'suites'
+ARG_LABEL_EXCLUDES = 'label_excludes'
 ARG_CONFIG = 'config'
 ARG_SOURCE_BRANCH = 'src_branch'
 ARG_DESTINATION_BRANCH = 'dst_branch'
@@ -57,15 +58,24 @@ class BaseTestImpact(ABC):
         self._change_list = {"createdFiles": [],
                              "updatedFiles": [], "deletedFiles": []}
         self._has_change_list = False
+        self._enabled = False
         self._use_test_impact_analysis = False
 
         # Unique instance id to be used as part of the report name.
         self._instance_id = uuid.uuid4().hex
 
         self._s3_bucket = args.get(ARG_S3_BUCKET)
-        self._suite = args.get(ARG_SUITE)
+        self._suites = args.get(ARG_SUITES)
+        self._label_excludes = args.get(ARG_LABEL_EXCLUDES)
+
+        # Compile the dash-separated concatenation of the ordered suites and labels to be used as path components
+        self._suites_string = '-'.join(self._suites) if isinstance(self._suites, list) else self._suites
+        self._label_excludes_string = '-'.join(self._label_excludes) if isinstance(self._label_excludes, list) else self._label_excludes
 
         self._config = self._parse_config_file(args.get(ARG_CONFIG))
+        if not self._enabled:
+            logger.info(f"TIAF is disabled.")
+            return
 
         # Initialize branches
         self._src_branch = args.get(ARG_SOURCE_BRANCH)
@@ -87,8 +97,7 @@ class BaseTestImpact(ABC):
         # If flag is set for us to use TIAF
         if self._use_test_impact_analysis:
             logger.info("Test impact analysis is enabled.")
-            self._persistent_storage = self._initialize_persistent_storage(
-                s3_bucket=self._s3_bucket, suite=self._suite, s3_top_level_dir=args.get(ARG_S3_TOP_LEVEL_DIR))
+            self._persistent_storage = self._initialize_persistent_storage(s3_top_level_dir=args.get(ARG_S3_TOP_LEVEL_DIR))
 
             # If persistent storage intialized correctly
             if self._persistent_storage:
@@ -131,21 +140,19 @@ class BaseTestImpact(ABC):
         self._report_file = PurePath(self._report_workspace).joinpath(
             f"report.{self._instance_id}.json")
         args[ARG_REPORT] = self._report_file
-        self._parse_arguments_to_runtime(
-            args, self._runtime_args)
+        self._parse_arguments_to_runtime(args)
 
-    def _parse_arguments_to_runtime(self, args, runtime_args):
+    def _parse_arguments_to_runtime(self, args):
         """
         Fetches the relevant keys from the provided dictionary, and applies the values of the arguments(or applies them as a flag) to our runtime_args list.
 
         @param args: Dictionary containing the arguments passed to this TestImpact object. Will contain all the runtime arguments we need to apply.
-        @runtime_args: A list of strings that will become the arguments for our runtime.
         """
 
         for argument in RuntimeArgs:
             value = args.get(argument.driver_argument)
             if value:
-                runtime_args.append(f"{argument.runtime_arg}{value}")
+                self._runtime_args.append(f"{argument.runtime_arg}{','.join(value) if isinstance(value, list) else value}") 
                 logger.info(f"{argument.message}{value}")
 
     def _handle_historic_data(self):
@@ -181,24 +188,22 @@ class BaseTestImpact(ABC):
             # If this commit is different to the last commit in our historic data, we can diff the commits to get our change list
             self._attempt_to_generate_change_list()
 
-    def _initialize_persistent_storage(self, suite: str, s3_bucket: str = None, s3_top_level_dir: str = None):
+    def _initialize_persistent_storage(self, s3_top_level_dir: str = None):
         """
         Initialise our persistent storage object. Defaults to initialising local storage, unless the s3_bucket argument is not None.
         Returns PersistentStorage object or None if initialisation failed.
 
-        @param suite: The testing suite we are using.
-        @param s3_bucket: the name of the S3 bucket to connect to. Can be set to none.
         @param s3_top_level_dir: The name of the top level directory to use in the s3 bucket.
 
         @returns: Returns a persistent storage object, or None if a SystemError exception occurs while initialising the object.
         """
         try:
-            if s3_bucket:
+            if self._s3_bucket:
                 return PersistentStorageS3(
-                    self._config, suite, self._dst_commit, s3_bucket, self._compile_s3_top_level_dir_name(s3_top_level_dir), self._source_of_truth_branch, self._active_workspace, self._unpacked_coverage_data_file, self._previous_test_run_data_file, self._temp_workspace)
+                    self._config, self._suites_string, self._dst_commit, self._s3_bucket, self._compile_s3_top_level_dir_name(s3_top_level_dir), self._source_of_truth_branch, self._active_workspace, self._unpacked_coverage_data_file, self._previous_test_run_data_file, self._temp_workspace)
             else:
                 return PersistentStorageLocal(
-                    self._config, suite, self._dst_commit, self._active_workspace, self._unpacked_coverage_data_file, self._previous_test_run_data_file, self._historic_workspace, self._historic_data_file, self._temp_workspace)
+                    self._config, self._suites_string, self._dst_commit, self._active_workspace, self._unpacked_coverage_data_file, self._previous_test_run_data_file, self._historic_workspace, self._historic_data_file, self._temp_workspace)
         except SystemError as e:
             logger.warning(
                 f"The persistent storage encountered an irrecoverable error, test impact analysis will be disabled: '{e}'")
@@ -244,7 +249,8 @@ class BaseTestImpact(ABC):
         PREVIOUS_TEST_RUNS_KEY = "previous_test_runs"
         HISTORIC_DATA_FILE_KEY = "data"
         JENKINS_KEY = "jenkins"
-        USE_TIAF_KEY = "use_test_impact_analysis"
+        ENABLED_KEY = "enabled"
+        USE_TEST_IMPACT_ANALYSIS_KEY = "use_test_impact_analysis"
         RUNTIME_BIN_KEY = "runtime_bin"
         RUNTIME_ARTIFACT_DIR_KEY = "run_artifact_dir"
         RUNTIME_COVERAGE_DIR_KEY = "coverage_artifact_dir"
@@ -260,7 +266,8 @@ class BaseTestImpact(ABC):
                 self._repo = Repo(self._repo_dir)
 
                 # TIAF
-                self._use_test_impact_analysis = config[COMMON_CONFIG_KEY][JENKINS_KEY][USE_TIAF_KEY]
+                self._enabled = config[self.runtime_type][JENKINS_KEY][ENABLED_KEY]
+                self._use_test_impact_analysis = config[self.runtime_type][JENKINS_KEY][USE_TEST_IMPACT_ANALYSIS_KEY]
                 self._tiaf_bin = Path(
                     config[self.runtime_type][RUNTIME_BIN_KEY])
                 if self._use_test_impact_analysis and not self._tiaf_bin.is_file():
@@ -387,7 +394,7 @@ class BaseTestImpact(ABC):
             self._has_change_list = False
             return
 
-    def _generate_result(self, s3_bucket: str, suite: str, return_code: int, report: dict, runtime_args: list):
+    def _generate_result(self, return_code: int, report: dict):
         """
         Generates the result object from the pertinent runtime meta-data and sequence report.
 
@@ -400,14 +407,15 @@ class BaseTestImpact(ABC):
         result[constants.COMMIT_DISTANCE_KEY] = self._commit_distance
         result[constants.SRC_BRANCH_KEY] = self._src_branch
         result[constants.DST_BRANCH_KEY] = self._dst_branch
-        result[constants.SUITE_KEY] = suite
+        result[constants.SUITES_KEY] = self._suites
+        result[constants.LABEL_EXCLUDES_KEY] = self._label_excludes
         result[constants.USE_TEST_IMPACT_ANALYSIS_KEY] = self._use_test_impact_analysis
         result[constants.SOURCE_OF_TRUTH_BRANCH_KEY] = self._source_of_truth_branch
         result[constants.IS_SOURCE_OF_TRUTH_BRANCH_KEY] = self._is_source_of_truth_branch
         result[constants.HAS_CHANGE_LIST_KEY] = self._has_change_list
         result[constants.HAS_HISTORIC_DATA_KEY] = self._has_historic_data
-        result[constants.S3_BUCKET_KEY] = s3_bucket
-        result[constants.RUNTIME_ARGS_KEY] = runtime_args
+        result[constants.S3_BUCKET_KEY] = self._s3_bucket
+        result[constants.RUNTIME_ARGS_KEY] = self._runtime_args
         result[constants.RUNTIME_RETURN_CODE_KEY] = return_code
         result[constants.REPORT_KEY] = report
         result[constants.CHANGE_LIST_KEY] = self._change_list
@@ -470,6 +478,7 @@ class BaseTestImpact(ABC):
             if report_type == constants.SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
                 test_runs = test_runs + self._extract_test_runs_from_test_run_report(
                     report[constants.DISCARDED_TEST_RUN_REPORT_KEY])
+        return test_runs
 
     def run(self):
         """
@@ -505,7 +514,7 @@ class BaseTestImpact(ABC):
             logger.error(
                 f"The test impact analysis runtime returned with error: '{runtime_result.returncode}'.")
 
-        return self._generate_result(self._s3_bucket, self._suite, runtime_result.returncode, report, self._runtime_args)
+        return self._generate_result(runtime_result.returncode, report)
 
     @property
     def _is_source_of_truth_branch(self):
@@ -525,6 +534,10 @@ class BaseTestImpact(ABC):
             return self._persistent_storage.has_historic_data
         return False
 
+    @property
+    def enabled(self):
+        return self._enabled
+
     @property
     def source_branch(self):
         """
@@ -577,11 +590,11 @@ class BaseTestImpact(ABC):
         return self._instance_id
 
     @property
-    def test_suite(self):
+    def test_suites(self):
         """
-        The test suite being executed.
+        The test suites being executed.
         """
-        return self._suite
+        return self._suites
 
     @property
     def source_of_truth_branch(self):

+ 2 - 1
scripts/build/TestImpactAnalysis/test_impact/runtime_test_impact_args.py

@@ -23,7 +23,8 @@ class RuntimeArgs(Enum):
     COMMON_SEQUENCE = ("sequence", "--sequence=", "Sequence type is set to: ")
     COMMON_FPOLICY = ("test_failure_policy", "--fpolicy=",
                "Test failure policy is set to: ")
-    COMMON_SUITE = ("suite", "--suite=", "Test suite is set to: ")
+    COMMON_SUITES = ("suites", "--suites=", "Test suites is set to: ")
+    COMMON_LABEL_EXCLUDES = ("label_excludes", "--labelexcludes=", "Suite label excludes is set to: ")
     COMMON_EXCLUDE = ("exclude_file", "--excluded=",
                "Exclude file found, excluding tests stored at: ")
     COMMON_TEST_TIMEOUT = ("test_timeout", "--ttimeout=",

+ 25 - 7
scripts/build/TestImpactAnalysis/tiaf_driver.py

@@ -11,12 +11,16 @@ import mars_utils
 import sys
 import pathlib
 import traceback
-import re
 from test_impact import NativeTestImpact, PythonTestImpact
 from tiaf_logger import get_logger
 
 logger = get_logger(__file__)
 
+class PruneAndSortMultiValues(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string=None):
+            # Remove the suite duplicates and sort alphabetically
+            values = sorted(set(values), key = lambda x: x[1])
+            setattr(namespace, self.dest, values)
 
 def parse_args():
     def valid_file_path(value):
@@ -99,9 +103,20 @@ def parse_args():
 
     # Test suite
     parser.add_argument(
-        '--suite',
-        help="Test suite to run",
-        required=True
+        '--suites',
+        help="Test suites to run",
+        nargs='+',
+        action=PruneAndSortMultiValues,
+        required=True,
+    )
+
+    # Test label excludes
+    parser.add_argument(
+        '--label-excludes',
+        help="CTest suite labels to exclude if matched",
+        nargs='*',
+        action=PruneAndSortMultiValues,
+        required=False
     )
 
     # Test failure policy
@@ -191,6 +206,11 @@ def main(args: dict):
     try:
         tiaf_class = SUPPORTED_RUNTIMES[args.pop("runtime_type")]
         tiaf = tiaf_class(args)
+
+        if not tiaf.enabled:
+            logger.info("TIAF has been disabled for this runtime type.")
+            sys.exit(0)
+
         tiaf_result = tiaf.run()
         if args.get('mars_index_prefix'):
             logger.info("Transmitting report to MARS...")
@@ -205,9 +225,7 @@ def main(args: dict):
         # Non-gating will be removed from this script and handled at the job level in SPEC-7413
         logger.error(f"Exception caught by TIAF driver: '{e}'.")
         traceback.print_exc()
-    finally:
-        # This will not gate the AR run - replace with result.return_code if you wish to enable gating.
-        sys.exit(0)
+        sys.exit(1)
 
 
 if __name__ == "__main__":

+ 2 - 1
scripts/build/TestImpactAnalysis/tiaf_report_constants.py

@@ -13,7 +13,8 @@ DST_COMMIT_KEY = "dst_commit"
 COMMIT_DISTANCE_KEY = "commit_distance"
 SRC_BRANCH_KEY = "src_branch"
 DST_BRANCH_KEY = "dst_branch"
-SUITE_KEY = "suite"
+SUITES_KEY = "suites"
+LABEL_EXCLUDES_KEY = "label_excludes"
 SOURCE_OF_TRUTH_BRANCH_KEY = "source_of_truth_branch"
 IS_SOURCE_OF_TRUTH_BRANCH_KEY = "is_source_of_truth_branch"
 USE_TEST_IMPACT_ANALYSIS_KEY = "use_test_impact_analysis"