benchmark_config.py 3.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. from toolset.utils.output_helper import QuietOutputStream
  2. from toolset.test_types import test_types
  3. import os
  4. import time
  5. class BenchmarkConfig:
  6. def __init__(self, args):
  7. '''
  8. Configures this BenchmarkConfig given the arguments provided.
  9. '''
  10. # Map type strings to their objects
  11. types = {}
  12. for type in test_types:
  13. types[type] = test_types[type](self)
  14. # Turn type into a map instead of a list of strings
  15. if 'all' in args.type:
  16. self.types = types
  17. else:
  18. self.types = {t: types[t] for t in args.type}
  19. # Check if we're running in a CI environment
  20. self.is_ci = os.getenv('CI')
  21. self.duration = args.duration
  22. self.exclude = args.exclude
  23. self.quiet = args.quiet
  24. self.reverse_order = args.reverse_order
  25. self.server_host = args.server_host
  26. self.database_host = args.database_host
  27. self.client_host = args.client_host
  28. self.audit = args.audit
  29. self.new = args.new
  30. self.mode = args.mode
  31. self.list_tests = args.list_tests
  32. self.list_tag = args.list_tag
  33. self.max_concurrency = max(args.concurrency_levels)
  34. self.concurrency_levels = args.concurrency_levels
  35. self.cached_query_levels = args.cached_query_levels
  36. self.pipeline_concurrency_levels = args.pipeline_concurrency_levels
  37. self.query_levels = args.query_levels
  38. self.parse = args.parse
  39. self.results_environment = args.results_environment
  40. self.results_name = args.results_name
  41. self.results_upload_uri = args.results_upload_uri
  42. self.test = args.test
  43. self.test_dir = args.test_dir
  44. self.test_lang = args.test_lang
  45. self.tag = args.tag
  46. self.network_mode = args.network_mode
  47. self.server_docker_host = None
  48. self.database_docker_host = None
  49. self.client_docker_host = None
  50. self.network = None
  51. self.cpuset_cpus = args.cpuset_cpus
  52. self.test_container_memory = args.test_container_memory
  53. self.extra_docker_runtime_args = args.extra_docker_runtime_args
  54. if self.network_mode is None:
  55. self.network = 'tfb'
  56. self.server_docker_host = "unix://var/run/docker.sock"
  57. self.database_docker_host = "unix://var/run/docker.sock"
  58. self.client_docker_host = "unix://var/run/docker.sock"
  59. else:
  60. self.network = None
  61. # The only other supported network_mode is 'host', and that means
  62. # that we have a tri-machine setup, so we need to use tcp to
  63. # communicate with docker.
  64. self.server_docker_host = "tcp://%s:2375" % self.server_host
  65. self.database_docker_host = "tcp://%s:2375" % self.database_host
  66. self.client_docker_host = "tcp://%s:2375" % self.client_host
  67. self.quiet_out = QuietOutputStream(self.quiet)
  68. self.start_time = time.time()
  69. # Remember directories
  70. self.fw_root = os.getenv('FWROOT')
  71. self.db_root = os.path.join(self.fw_root, "toolset", "databases")
  72. self.lang_root = os.path.join(self.fw_root, "frameworks")
  73. self.results_root = os.path.join(self.fw_root, "results")
  74. self.wrk_root = os.path.join(self.fw_root, "toolset", "wrk")
  75. self.scaffold_root = os.path.join(self.fw_root, "toolset", "scaffolding")
  76. if hasattr(self, 'parse') and self.parse is not None:
  77. self.timestamp = self.parse
  78. else:
  79. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  80. self.run_test_timeout_seconds = 7200