2
0

TestSuite_Benchmark_GPU.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. """
  2. Copyright (c) Contributors to the Open 3D Engine Project.
  3. For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. SPDX-License-Identifier: Apache-2.0 OR MIT
  5. """
  6. import logging
  7. import os
  8. import psutil
  9. import sys
  10. import pytest
  11. import editor_python_test_tools.hydra_test_utils as hydra
  12. from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator
  13. logger = logging.getLogger(__name__)
  14. WINDOWS = sys.platform.startswith('win')
  15. if not WINDOWS:
  16. pytestmark = pytest.mark.skipif(
  17. not WINDOWS,
  18. reason="TestSuite_Benchmark_GPU.py currently only runs on Windows")
  19. def filebeat_service_running():
  20. """
  21. Checks if the filebeat service is currently running on the OS.
  22. :return: True if filebeat service detected and running, False otherwise.
  23. """
  24. result = False
  25. try:
  26. filebeat_service = psutil.win_service_get('filebeat')
  27. filebeat_service_info = filebeat_service.as_dict()
  28. if filebeat_service_info['status'] == 'running':
  29. result = True
  30. except psutil.NoSuchProcess:
  31. return result
  32. return result
  33. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  34. @pytest.mark.parametrize("launcher_platform", ["windows_editor"])
  35. @pytest.mark.parametrize("level", ["AtomFeatureIntegrationBenchmark"])
  36. class TestPerformanceBenchmarkSuite(object):
  37. @pytest.mark.parametrize('rhi', ['-rhi=dx12'])
  38. def test_AtomFeatureIntegrationBenchmarkTest_GatherBenchmarkMetrics_DX12(
  39. self, request, editor, workspace, rhi, project, launcher_platform, level):
  40. """
  41. Please review the hydra script run by this test for more specific test info.
  42. """
  43. expected_lines = [
  44. "Benchmark metadata captured.",
  45. "Pass timestamps captured.",
  46. "CPU frame time captured.",
  47. "Captured data successfully.",
  48. "Exited game mode"
  49. ]
  50. unexpected_lines = [
  51. "Failed to capture data.",
  52. "Failed to capture pass timestamps.",
  53. "Failed to capture CPU frame time.",
  54. "Failed to capture benchmark metadata."
  55. ]
  56. hydra.launch_and_validate_results(
  57. request,
  58. os.path.join(os.path.dirname(__file__), "tests"),
  59. editor,
  60. "hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
  61. timeout=600,
  62. expected_lines=expected_lines,
  63. unexpected_lines=unexpected_lines,
  64. halt_on_unexpected=True,
  65. cfg_args=[level, rhi],
  66. null_renderer=False,
  67. )
  68. @pytest.mark.skipif(not filebeat_service_running(), reason="filebeat service not running")
  69. def test_AtomFeatureIntegrationBenchmarkTest_SendBenchmarkMetrics_DX12(
  70. self, request, editor, workspace, project, launcher_platform, level):
  71. """
  72. Gathers the DX12 benchmark metrics and uses filebeat to send the metrics data.
  73. """
  74. aggregator = BenchmarkDataAggregator(workspace, logger, 'main_gpu')
  75. aggregator.upload_metrics('dx12')
  76. @pytest.mark.parametrize('rhi', ['-rhi=Vulkan'])
  77. def test_AtomFeatureIntegrationBenchmarkTest_GatherBenchmarkMetrics_Vulkan(
  78. self, request, editor, workspace, rhi, project, launcher_platform, level):
  79. """
  80. Please review the hydra script run by this test for more specific test info.
  81. """
  82. expected_lines = [
  83. "Benchmark metadata captured.",
  84. "Pass timestamps captured.",
  85. "CPU frame time captured.",
  86. "Captured data successfully.",
  87. "Exited game mode"
  88. ]
  89. unexpected_lines = [
  90. "Failed to capture data.",
  91. "Failed to capture pass timestamps.",
  92. "Failed to capture CPU frame time.",
  93. "Failed to capture benchmark metadata."
  94. ]
  95. hydra.launch_and_validate_results(
  96. request,
  97. os.path.join(os.path.dirname(__file__), "tests"),
  98. editor,
  99. "hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
  100. timeout=600,
  101. expected_lines=expected_lines,
  102. unexpected_lines=unexpected_lines,
  103. halt_on_unexpected=True,
  104. cfg_args=[level, rhi],
  105. null_renderer=False,
  106. )
  107. @pytest.mark.skipif(not filebeat_service_running(), reason="filebeat service not running")
  108. def test_AtomFeatureIntegrationBenchmarkTest_SendBenchmarkMetrics_Vulkan(
  109. self, request, editor, workspace, project, launcher_platform, level):
  110. """
  111. Gathers the Vulkan benchmark metrics and uses filebeat to send the metrics data.
  112. """
  113. aggregator = BenchmarkDataAggregator(workspace, logger, 'main_gpu')
  114. aggregator.upload_metrics('Vulkan')