Ver código fonte

CachedQueryTestType Scaffolding (#2824)

* CachedQueryTestType Scaffolding

* Add different query levels for the cached type

* cached query test for default nodejs
Nate 8 anos atrás
pai
commit
3f1a08eff4

+ 2 - 1
benchmark.cfg.example

@@ -17,7 +17,8 @@ install_strategy=unified
 install_only=False
 list_tests=False
 concurrency_levels=[16, 32, 64, 128, 256, 512]
-query_levels=[1, 5,10,15,20]
+query_levels=[1,5,10,15,20]
+cached_query_levels=[1,10,20,50,100]
 mode=benchmark
 sleep=60
 test=None

+ 1 - 0
deployment/vagrant/bootstrap.sh

@@ -56,6 +56,7 @@ install_only=False
 list_tests=False
 concurrency_levels=[8, 16, 32, 64, 128, 256]
 query_levels=[1, 5,10,15,20]
+cached_query_levels=[1,10,20,50,100]
 threads=8
 mode=benchmark
 sleep=60

+ 1 - 0
frameworks/JavaScript/nodejs/benchmark_config.json

@@ -7,6 +7,7 @@
       "plaintext_url": "/plaintext",
       "db_url": "/mysql/db",
       "query_url": "/mysql/queries?queries=",
+      "cached_query_url": "/mysql/cached?queries=",
       "update_url": "/mysql/updates?queries=",
       "fortune_url": "/mysql/fortunes",
       "port": 8080,

+ 29 - 1
frameworks/JavaScript/nodejs/handlers/mysql-raw.js

@@ -7,12 +7,17 @@ const connection = mysql.createConnection({
   password : 'benchmarkdbpass',
   database : 'hello_world'
 });
+const NodeCache = require( "node-cache" );
+const myCache = new NodeCache( { stdTTL: 0, checkperiod: 0 } );
+
+let cachePopulated = false;
 
 connection.connect();
 
 const queries = {
   GET_RANDOM_WORLD: () => "SELECT * FROM world WHERE id = " + h.randomTfbNumber(),
   ALL_FORTUNES: "SELECT * FROM fortune",
+  ALL_WORLDS: "SELECT * FROM world",
   UPDATE_WORLD: (rows) => {
     return [
       "UPDATE world SET randomNumber = ", rows[0].randomNumber,
@@ -21,6 +26,16 @@ const queries = {
   }
 };
 
+const populateCache = (callback) => {
+  if (cachePopulated) return callback();
+  connection.query(queries.ALL_WORLDS, (err, rows) => {
+    rows.forEach(r =>
+      myCache.set(r.id, { id: r.id, randomNumber: r.randomNumber }));
+    cachePopulated = true;
+    callback();
+  });
+};
+
 const mysqlRandomWorld = (callback) =>
   connection.query(queries.GET_RANDOM_WORLD(), (err, rows, fields) => {
     callback(err, rows[0]);
@@ -65,6 +80,19 @@ module.exports = {
     });
   },
 
+  CachedQueries: (queries, req, res) => {
+    populateCache(() => {
+      let worlds = [];
+      for (let i = 0; i < queries; i++) {
+        const key = h.randomTfbNumber() + '';
+        worlds.push(myCache.get(key));
+      }
+
+      h.addTfbHeaders(res, 'json');
+      res.end(JSON.stringify(worlds));
+    });
+  },
+
   Fortunes: (req, res) => {
     mysqlGetAllFortunes((err, fortunes) => {
       if (err) { return process.exit(1); }
@@ -87,6 +115,6 @@ module.exports = {
       h.addTfbHeaders(res, 'json');
       res.end(JSON.stringify(results));
     });
-  } 
+  }
 
 };

+ 2 - 1
frameworks/JavaScript/nodejs/package.json

@@ -11,7 +11,8 @@
     "parseurl": "1.3.1",
     "pg": "6.1.2",
     "pg-hstore": "2.3.2",
-    "sequelize": "3.30.2"
+    "sequelize": "3.30.2",
+    "node-cache": "4.1.1"
   },
   "main": "app.js"
 }

+ 1 - 0
frameworks/JavaScript/nodejs/routing.js

@@ -54,6 +54,7 @@ module.exports.QueryHandler = ((() => {
 
     '/mysql/queries':     MySQLRawHandler.MultipleQueries,
     '/mysql/updates':     MySQLRawHandler.Updates,
+    '/mysql/cached' :     MySQLRawHandler.CachedQueries,
 
     '/sequelize-pg/queries': SequelizePgHandler.MultipleQueries,
     '/sequelize-pg/updates': SequelizePgHandler.Updates

+ 6 - 1
toolset/benchmark/benchmarker.py

@@ -895,7 +895,7 @@ class Benchmarker:
                             color = Fore.YELLOW
                         else:
                             color = Fore.RED
-                        print prefix + "|       " + test_type.ljust(11) + ' : ' + color + result.upper()
+                        print prefix + "|       " + test_type.ljust(13) + ' : ' + color + result.upper()
                 else:
                     print prefix + "|      " + Fore.RED + "NO RESULTS (Did framework launch?)"
             print prefix + header('', top='', bottom='=') + Style.RESET_ALL
@@ -925,6 +925,7 @@ class Benchmarker:
         types['fortune'] = FortuneTestType()
         types['update'] = UpdateTestType()
         types['plaintext'] = PlaintextTestType()
+        types['cached_query'] = CachedQueryTestType()
 
         # Turn type into a map instead of a string
         if args['type'] == 'all':
@@ -990,6 +991,7 @@ class Benchmarker:
             self.results['completionTime'] = None
             self.results['concurrencyLevels'] = self.concurrency_levels
             self.results['queryIntervals'] = self.query_levels
+            self.results['cachedQueryIntervals'] = self.cached_query_levels
             self.results['frameworks'] = [t.name for t in self.__gather_tests]
             self.results['duration'] = self.duration
             self.results['rawData'] = dict()
@@ -999,6 +1001,7 @@ class Benchmarker:
             self.results['rawData']['fortune'] = dict()
             self.results['rawData']['update'] = dict()
             self.results['rawData']['plaintext'] = dict()
+            self.results['rawData']['cached_query'] = dict()
             self.results['completed'] = dict()
             self.results['succeeded'] = dict()
             self.results['succeeded']['json'] = []
@@ -1007,6 +1010,7 @@ class Benchmarker:
             self.results['succeeded']['fortune'] = []
             self.results['succeeded']['update'] = []
             self.results['succeeded']['plaintext'] = []
+            self.results['succeeded']['cached_query'] = []
             self.results['failed'] = dict()
             self.results['failed']['json'] = []
             self.results['failed']['db'] = []
@@ -1014,6 +1018,7 @@ class Benchmarker:
             self.results['failed']['fortune'] = []
             self.results['failed']['update'] = []
             self.results['failed']['plaintext'] = []
+            self.results['failed']['cached_query'] = []
             self.results['verify'] = dict()
         else:
             #for x in self.__gather_tests():

+ 14 - 10
toolset/benchmark/framework_test.py

@@ -484,7 +484,9 @@ class FrameworkTest:
           if test_type == 'plaintext': # One special case
             remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
           elif test_type == 'query' or test_type == 'update':
-            remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
+            remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header, self.benchmarker.query_levels)
+          elif test_type == 'cached_query':
+            remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header, self.benchmarker.cached_query_levels)
           else:
             remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
 
@@ -667,11 +669,11 @@ class FrameworkTest:
   # be run on the client to benchmark a single test. This
   # specifically works for the variable query tests (Query)
   ############################################################
-  def __generate_query_script(self, url, port, accept_header):
+  def __generate_query_script(self, url, port, accept_header, query_levels):
     headers = self.headers_template.format(accept=accept_header)
     return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
       name=self.name, duration=self.benchmarker.duration,
-      levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
+      levels=" ".join("{}".format(item) for item in query_levels),
       server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
 
   ############################################################
@@ -892,14 +894,16 @@ def validate_urls(test_name, test_keys):
   the suggested url specifications, although those suggestions are presented if a url fails validation here.
   """
   example_urls = {
-    "json_url":      "/json",
-    "db_url":        "/mysql/db",
-    "query_url":     "/mysql/queries?queries=  or  /mysql/queries/",
-    "fortune_url":   "/mysql/fortunes",
-    "update_url":    "/mysql/updates?queries=  or  /mysql/updates/",
-    "plaintext_url": "/plaintext"
+    "json_url":         "/json",
+    "db_url":           "/mysql/db",
+    "query_url":        "/mysql/queries?queries=  or  /mysql/queries/",
+    "fortune_url":      "/mysql/fortunes",
+    "update_url":       "/mysql/updates?queries=  or  /mysql/updates/",
+    "plaintext_url":    "/plaintext",
+    "cached_query_url": "/mysql/cached_queries?queries=  or /mysql/cached_queries"
   }
-  for test_url in ["json_url","db_url","query_url","fortune_url","update_url","plaintext_url"]:
+
+  for test_url in ["json_url","db_url","query_url","fortune_url","update_url","plaintext_url","cached_query_url"]:
     key_value = test_keys.get(test_url, None)
     if key_value != None and not key_value.startswith('/'):
       errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n

+ 2 - 1
toolset/benchmark/test_types/__init__.py

@@ -5,4 +5,5 @@ from plaintext_type import PlaintextTestType
 from db_type import DBTestType
 from query_type import QueryTestType
 from update_type import UpdateTestType
-from fortune_type import FortuneTestType
+from fortune_type import FortuneTestType
+from cached_query_type import CachedQueryTestType

+ 46 - 0
toolset/benchmark/test_types/cached_query_type.py

@@ -0,0 +1,46 @@
+from benchmark.test_types.framework_test_type import FrameworkTestType
+from benchmark.test_types.verifications import (
+    verify_headers,
+    verify_randomnumber_list,
+    verify_query_cases
+)
+
+
+class CachedQueryTestType(FrameworkTestType):
+
+    def __init__(self):
+        kwargs = {
+            'name': 'cached_query',
+            'accept_header': self.accept('json'),
+            'requires_db': True,
+            'args': ['cached_query_url']
+        }
+        FrameworkTestType.__init__(self, **kwargs)
+
+    def get_url(self):
+        return self.cached_query_url
+
+    def verify(self, base_url):
+        '''
+        Validates the response is a JSON array of
+        the proper length, each JSON Object in the array
+        has keys 'id' and 'randomNumber', and these keys
+        map to integers. Case insensitive and
+        quoting style is ignored
+        '''
+
+        url = base_url + self.cached_query_url
+        cases = [
+            ('2',   'fail'),
+            ('0',   'fail'),
+            ('foo', 'fail'),
+            ('501', 'warn'),
+            ('',    'fail')
+        ]
+
+        problems = verify_query_cases(self, cases, url)
+
+        if len(problems) == 0:
+            return [('pass', '', url + case) for case, _ in cases]
+        else:
+            return problems

+ 1 - 1
toolset/run-tests.py

@@ -144,7 +144,7 @@ def main(argv=None):
     parser.add_argument('--test', nargs='+', help='names of tests to run')
     parser.add_argument('--test-dir', nargs='+', dest='test_dir', help='name of framework directory containing all tests to run')
     parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
-    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
+    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
     parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
     parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')