Procházet zdrojové kódy

Move script generation into test type classes (#3376)

Mike Smith před 7 roky
rodič
revize
be60078bc5

+ 3 - 3
deployment/vagrant/bootstrap.sh

@@ -34,11 +34,11 @@ cat <<EOF > benchmark.cfg
 [Defaults]
 # Available Keys:
 os=linux
-server_host=TFB-server
-client_host=TFB-client
+server_host=127.0.0.1
+client_host=127.0.0.1
 client_identity_file=/home/vagrant/.ssh/id_rsa
 client_user=vagrant
-database_host=TFB-database
+database_host=127.0.0.1
 database_identity_file=/home/vagrant/.ssh/id_rsa
 database_os=linux
 database_user=vagrant

+ 4 - 19
toolset/benchmark/benchmarker.py

@@ -1,6 +1,5 @@
 from toolset.utils.output_helper import header
 from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
-from toolset.utils.remote_script_helper import generate_concurrency_script, generate_pipeline_script, generate_query_script
 from toolset.utils import docker_helper
 
 import os
@@ -96,24 +95,10 @@ class Benchmarker:
                         pass
 
                 if not test.failed:
-                    if test_type == 'plaintext':  # One special case
-                        remote_script = generate_pipeline_script(
-                            self.config, test.name, test.get_url(),
-                            framework_test.port, test.accept_header)
-                    elif test_type == 'query' or test_type == 'update':
-                        remote_script = generate_query_script(
-                            self.config, test.name, test.get_url(),
-                            framework_test.port, test.accept_header,
-                            self.config.query_levels)
-                    elif test_type == 'cached_query':
-                        remote_script = generate_query_script(
-                            self.config, test.name, test.get_url(),
-                            framework_test.port, test.accept_header,
-                            self.config.cached_query_levels)
-                    else:
-                        remote_script = generate_concurrency_script(
-                            self.config, test.name, test.get_url(),
-                            framework_test.port, test.accept_header)
+                    remote_script = self.config.types[
+                        test_type].get_remote_script(self.config, test.name,
+                                                     test.get_url(),
+                                                     framework_test.port)
 
                     # Begin resource usage metrics collection
                     self.__begin_logging(framework_test, test_type)

+ 9 - 0
toolset/benchmark/test_types/cached_query_type.py

@@ -1,5 +1,6 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.test_types.verifications import verify_query_cases
+from toolset.utils.remote_script_helper import generate_query_script
 
 
 class CachedQueryTestType(FrameworkTestType):
@@ -35,3 +36,11 @@ class CachedQueryTestType(FrameworkTestType):
             return [('pass', '', url + case) for case, _ in cases]
         else:
             return problems
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_query_script(self.config, name, url, port,
+                                     self.accept_header,
+                                     self.config.cached_query_levels)

+ 8 - 0
toolset/benchmark/test_types/db_type.py

@@ -1,5 +1,6 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers, verify_randomnumber_object
+from toolset.utils.remote_script_helper import generate_concurrency_script
 
 
 class DBTestType(FrameworkTestType):
@@ -56,3 +57,10 @@ class DBTestType(FrameworkTestType):
             return [('pass', '', url)]
         else:
             return problems
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_concurrency_script(self.config, name, url, port,
+                                           self.accept_header)

+ 8 - 0
toolset/benchmark/test_types/fortune_type.py

@@ -1,6 +1,7 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.fortune_html_parser import FortuneHTMLParser
 from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers
+from toolset.utils.remote_script_helper import generate_concurrency_script
 
 
 class FortuneTestType(FrameworkTestType):
@@ -86,3 +87,10 @@ class FortuneTestType(FrameworkTestType):
             # If there were errors reading the diff, then no diff information
             pass
         return problems
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_concurrency_script(self.config, name, url, port,
+                                           self.accept_header)

+ 9 - 1
toolset/benchmark/test_types/framework_test_type.py

@@ -125,11 +125,19 @@ class FrameworkTestType:
         raise NotImplementedError("Subclasses must provide verify")
 
     def get_url(self):
-        '''Returns the URL for this test, like '/json'''
+        '''
+        Returns the URL for this test, like '/json'
+        '''
         # This is a method because each test type uses a different key
         # for their URL so the base class can't know which arg is the URL
         raise NotImplementedError("Subclasses must provide get_url")
 
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script for running the benchmarking process.
+        '''
+        raise NotImplementedError("Subclasses must provide get_remote_script")
+
     def copy(self):
         '''
         Returns a copy that can be safely modified.

+ 8 - 0
toolset/benchmark/test_types/json_type.py

@@ -1,5 +1,6 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers, verify_helloworld_object
+from toolset.utils.remote_script_helper import generate_concurrency_script
 
 
 class JsonTestType(FrameworkTestType):
@@ -38,3 +39,10 @@ class JsonTestType(FrameworkTestType):
             return problems
         else:
             return [('pass', '', url)]
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_concurrency_script(self.config, name, url, port,
+                                           self.accept_header)

+ 8 - 1
toolset/benchmark/test_types/plaintext_type.py

@@ -1,5 +1,6 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers
+from toolset.utils.remote_script_helper import generate_pipeline_script
 
 
 class PlaintextTestType(FrameworkTestType):
@@ -23,7 +24,6 @@ class PlaintextTestType(FrameworkTestType):
             return problems
 
         # Case insensitive
-        orig = body
         body = body.lower()
         expected = "hello, world!"
         extra_bytes = len(body) - len(expected)
@@ -48,3 +48,10 @@ class PlaintextTestType(FrameworkTestType):
 
     def get_url(self):
         return self.plaintext_url
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_pipeline_script(self.config, name, url, port,
+                                        self.accept_header)

+ 9 - 0
toolset/benchmark/test_types/query_type.py

@@ -1,5 +1,6 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.test_types.verifications import verify_query_cases
+from toolset.utils.remote_script_helper import generate_query_script
 
 
 class QueryTestType(FrameworkTestType):
@@ -35,3 +36,11 @@ class QueryTestType(FrameworkTestType):
             return [('pass', '', url + case) for case, _ in cases]
         else:
             return problems
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_query_script(self.config, name, url, port,
+                                     self.accept_header,
+                                     self.config.query_levels)

+ 9 - 0
toolset/benchmark/test_types/update_type.py

@@ -1,5 +1,6 @@
 from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
 from toolset.benchmark.test_types.verifications import verify_query_cases
+from toolset.utils.remote_script_helper import generate_query_script
 
 
 class UpdateTestType(FrameworkTestType):
@@ -34,3 +35,11 @@ class UpdateTestType(FrameworkTestType):
             return [('pass', '', url + case) for (case, _) in cases]
         else:
             return problems
+
+    def get_remote_script(self, config, name, url, port):
+        '''
+        Returns the remote script
+        '''
+        return generate_query_script(self.config, name, url, port,
+                                     self.accept_header,
+                                     self.config.query_levels)

+ 4 - 0
toolset/continuous/run-continuously.sh

@@ -33,6 +33,10 @@ do
   git clone -b $TFB_REPOBRANCH $TFB_REPOURI $TFB_REPOPARENT/$TFB_REPONAME
   # Replace the benchmark.cfg
   cp $TFB_REPOPARENT/benchmark.cfg $TFB_REPOPARENT/$TFB_REPONAME/benchmark.cfg 2>/dev/null
+  # Set the hosts correctly in the benchmark.cfg
+  sed -i 's|server_host=.*|server_host='"$TFB_SERVER"'|g' $TFB_REPOPARENT/$TFB_REPONAME/benchmark.cfg 2>/dev/null
+  sed -i 's|server_host=.*|server_host='"$TFB_DATABASE"'|g' $TFB_REPOPARENT/$TFB_REPONAME/benchmark.cfg 2>/dev/null
+  sed -i 's|server_host=.*|server_host='"$TFB_CLIENT"'|g' $TFB_REPOPARENT/$TFB_REPONAME/benchmark.cfg 2>/dev/null
 
   echo Running continuous tasks
   $TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/run-tasks.sh