Browse Source

WIP: Started work on a scaffolding init process (#3176)

* Started work on a scaffolding init process

* Built out file structures

* Changes based on feedback.

Also, updated the main README.md to give instructions.

* Added A LOT more instructions.

Also, cut down the typing needed to interact with the tool.

* Added more help to the wizard

Wizard now checks that your name isn't already taken and
that your language exists.

* Fixed a bug and titlified the language name (new)

* Renamed the flag to --new

* Fixed a bug and added the note about .travis.yml

* Typos

* Better imports
Mike Smith 7 years ago
parent
commit
cdcd2ffa65

+ 8 - 0
README.md

@@ -45,6 +45,14 @@ required.
 
         vagrant@TFB-all:~/FrameworkBenchmarks$ tfb --mode verify --test beego
 
+## Add a New Test
+
+Once you open an SSH connection to your vagrant box, start the new test initialization wizard.
+
+        vagrant@TFB-all:~/FrameworkBenchmarks$ tfb --new
+
+This will walk you through the entire process of creating a new test to include in the suite.
+
 
 ## Official Documentation
 

+ 0 - 0
toolset/__init__.py


+ 19 - 1
toolset/benchmark/utils.py

@@ -6,6 +6,21 @@ import socket
 
 from ast import literal_eval
 
+def gather_langauges():
+    '''
+    Gathers all the known languages in the suite via the folder names
+    beneath FWROOT.
+    '''
+    # Avoid setting up a circular import
+    from setup.linux import setup_util
+
+    lang_dir = os.path.join(setup_util.get_fwroot(), "frameworks")
+    langs = []
+    for dir in glob.glob(os.path.join(lang_dir, "*")):
+        langs.append(dir.replace(lang_dir,"")[1:])
+    return langs
+
+
 def gather_tests(include = [], exclude=[], benchmarker=None):
     '''
     Given test names as strings, returns a list of FrameworkTest objects.
@@ -41,7 +56,6 @@ def gather_tests(include = [], exclude=[], benchmarker=None):
 
     # Setup default Benchmarker using example configuration
     if benchmarker is None:
-        print "Creating Benchmarker from benchmark.cfg"
         default_config = setup_util.get_fwroot() + "/benchmark.cfg"
         config = ConfigParser.SafeConfigParser()
         config.readfp(open(default_config))
@@ -56,6 +70,10 @@ def gather_tests(include = [], exclude=[], benchmarker=None):
 
         # Ensure we only run the __init__ method of Benchmarker
         defaults['install'] = None
+        defaults['results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
+        defaults['results_environment'] = "My Server Environment"
+        defaults['test_dir'] = None
+        defaults['quiet'] = True
 
         benchmarker = Benchmarker(defaults)
 

+ 6 - 0
toolset/run-tests.py

@@ -12,6 +12,7 @@ import copy
 from benchmark.benchmarker import Benchmarker
 from setup.linux.unbuffered import Unbuffered
 from setup.linux import setup_util
+from scaffolding import Scaffolding
 from ast import literal_eval
 
 # Enable cross-platform colored output
@@ -139,6 +140,7 @@ def main(argv=None):
     # Install options
     parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
     parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
+    parser.add_argument('--new', action='store_true', default=False, help='Initialize a new framework test')
 
     # Test options
     parser.add_argument('--test', nargs='+', help='names of tests to run')
@@ -162,6 +164,10 @@ def main(argv=None):
     parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
     args = parser.parse_args(remaining_argv)
 
+    if args.new:
+        Scaffolding()
+        return 0
+
     benchmarker = Benchmarker(vars(args))
 
     # Run the benchmarker in the specified mode

+ 348 - 0
toolset/scaffolding.py

@@ -0,0 +1,348 @@
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import imp
+from shutil import copytree
+from setup.linux.setup_util import replace_text
+from benchmark.utils import gather_frameworks, gather_langauges
+
+class Scaffolding:
+  def __init__(self):
+    print("""
+-------------------------------------------------------------------------------
+    This wizard is intended to help build the scaffolding required for a new 
+    test to be benchmarked.
+
+    From here, you will be prompted for values related to the test you
+    wish to add.
+-------------------------------------------------------------------------------""")
+
+    try:
+      self.__gather_display_name()
+      self.__gather_language()
+      self.__gather_approach()
+      self.__gather_classification()
+      self.__gather_orm()
+      self.__gather_webserver()
+      self.__gather_versus()
+      self.__confirm_values()
+      self.__print_success()
+    except:
+      print("")
+
+  def __gather_display_name(self):
+    print("""
+  The name of your test as you wish it to be displayed on the results page.
+
+  Example: Gemini, Gin, Express
+    """)
+    self.__prompt_display_name()
+    while not self.display_name:
+      self.__prompt_display_name()
+    self.name = self.display_name.lower()
+
+  def __prompt_display_name(self):
+    self.display_name = raw_input("Name: ").strip()
+
+    found = False
+    for framework in gather_frameworks():
+      if framework.lower() == self.display_name.lower():
+        found = True
+
+    if found:
+      print("""
+  It appears that there is already a '%s' framework in the test suite. You will
+  have to pick a different name.
+      """ % self.display_name)
+      self.display_name = None
+
+  def __gather_language(self):
+    print("""
+  The language in which your test implementation is written.
+
+  Example: Java, Go, PHP
+    """)
+    self.language = None
+    while not self.language:
+      self.__prompt_language()
+
+  def __prompt_language(self):
+    self.language = raw_input("Language: ").strip()
+
+    known_languages = gather_langauges()
+    language = None
+    for lang in known_languages:
+      if lang.lower() == self.language.lower():
+        language = lang
+
+    if not language:
+      similar = []
+      for lang in known_languages:
+        if lang.lower()[:1] == self.language.lower()[:1]:
+          similar.append(lang)
+      similar = ', '.join(similar)
+
+      print("""
+  That language is not currently in our list of known languages.
+  
+  Here is a list of similar languages present in our benchmark suite that you
+  may have meant:
+
+  %s
+      
+  Did you mean to add the new language, '%s', to the benchmark suite?
+      """ % (similar, self.language))
+      valid = self.__prompt_confirm_new_language(known_languages)
+      while not valid:
+        valid = self.__prompt_confirm_new_language(known_languages)
+
+      if self.confirm_new_lang == 'n':
+        self.language = None
+      else:
+        self.language = self.language.title()
+
+    return self.language
+
+  def __prompt_confirm_new_language(self, known_languages):
+    self.confirm_new_lang = raw_input("Create New Language '%s' (y/n): " % self.language).strip().lower()
+    return self.confirm_new_lang == 'y' or self.confirm_new_lang == 'n'
+
+  def __gather_approach(self):
+    print("""
+  The approach of your test implementation.
+
+  1) Realistic: Uses the framework with most out-of-the-box functionality 
+                enabled. We consider this realistic because most applications 
+                built with the framework will leave these features enabled.
+  2) Stripped:  Removes or outright avoids implementing features that are
+                unnecessary for the particulars of the benchmark exercise. This
+                might illuminate the marginal improvement available in fine-
+                tuning a framework to your application's use-case.
+
+  Note: If you are unsure, then your approach is probably Realistic. The
+        Stripped approach is seldom used and will not have results displayed
+        by default on the results website.
+    """)
+    valid = self.__prompt_approach()
+    while not valid:
+      valid = self.__prompt_approach()
+
+  def __prompt_approach(self):
+    self.approach = raw_input("Approach [1/2]: ").strip()
+    if self.approach == '1':
+      self.approach = 'Realistic'
+    if self.approach == '2':
+      self.approach = 'Stripped'
+    return self.approach == 'Realistic' or self.approach == 'Stripped'
+
+  def __gather_classification(self):
+    print("""
+  The classification of your test implementation.
+
+  1) Fullstack: Robust framework expected to provide high-level functionality 
+                for serving as a web application; for example, ability to 
+                compose views, provide functions for responding with several 
+                data types (json, html, etc), connecting to a database, form 
+                processing, etc.
+  2) Micro:     Simple framework expected to provide enough middleware to build
+                a robust web application such as request routing and some 
+                simple plumbing, but may not include built-in functionality 
+                such as, for example, server-composed views.
+  3) Platform:  Barebones infrastructure for servicing HTTP requests, but does
+                not include a framework at all.
+    """)
+    valid = self.__prompt_classification()
+    while not valid:
+      valid = self.__prompt_classification()
+    if self.classification == 'Platform':
+      self.platform = 'None'
+      self.framework = 'None'
+    else:
+      self.framework = self.display_name
+      self.__gather_platform()
+
+  def __prompt_classification(self):
+    self.classification = raw_input("Classification [1/2/3]: ").strip()
+    if self.classification == '1':
+      self.classification = 'Fullstack'
+    if self.classification == '2':
+      self.classification = 'Micro'
+    if self.classification == '3':
+      self.classification = 'Platform'
+    return self.classification == 'Fullstack' or \
+           self.classification == 'Micro' or \
+           self.classification == 'Platform'
+
+  def __gather_platform(self):
+    print("""
+  The platform of your test implementation.
+
+  The platform is the low-level software or API used to host web applications 
+  for the framework; the platform provides an implementation of the HTTP
+  fundamentals.
+
+  Not all frameworks have a platform and if your programming language provides
+  much of that by which we define a platform, leave black.
+
+  Example: Servlet, Wai, .NET
+    """)
+    self.__prompt_platform()
+    
+  def __prompt_platform(self):
+    self.platform = raw_input("Platform (optional): ").strip()
+    if self.platform == '':
+      self.platform = 'None'
+
+  def __gather_orm(self):
+    print("""
+  How you would classify the ORM (object relational mapper) of your test?
+
+  1) Full:  A feature-rich ORM which provides functionality for interacting 
+            with a database without writing a query in all but the most edge 
+            cases.
+  2) Micro: An ORM which provides functionality for interacting with a database
+            for many trivial operations (querying, updating), but not more 
+            robust cases (for example, gathering relations).
+  3) Raw:   No ORM; raw database access.
+    """)
+    valid = self.__prompt_orm()
+    while not valid:
+      valid = self.__prompt_orm()
+
+  def __prompt_orm(self):
+    self.orm = raw_input("ORM [1/2/3]: ").strip()
+    if self.orm == '1':
+      self.orm = 'Full'
+    if self.orm == '2':
+      self.orm = 'Micro'
+    if self.orm == '3':
+      self.orm = 'Raw'
+    return self.orm == 'Full' or \
+           self.orm == 'Micro' or \
+           self.orm == 'Raw'
+
+  def __gather_webserver(self):
+    print("""
+  Name of the front-end webserver sitting in front of your test implementation.
+
+  Your test implementation may not use a web-server and may act as its own; you
+  can leave this blank in this case.
+
+  Example: nginx, Meinheld, httplight
+    """)
+    self.__prompt_webserver()
+
+  def __prompt_webserver(self):
+    self.webserver = raw_input("Webserver (optional): ").strip()
+    if self.webserver == '':
+      self.webserver = 'None'
+
+  def __gather_versus(self):
+    print("""
+  The name of another test (elsewhere in this project) that is a subset of this
+  framework.
+  This allows for the generation of the framework efficiency chart in the 
+  results web site.
+  For example, Compojure is compared to "servlet" since Compojure is built on 
+  the Servlet platform.
+
+  Example: Servlet, Wai, Undertow
+    """)
+    self.__prompt_versus()
+
+  def __prompt_versus(self):
+    self.versus = raw_input("Versus (optional): ").strip()
+    if self.versus == '':
+      self.versus = 'None'
+
+  def __confirm_values(self):
+    print("""
+    Name: %s
+    Language: %s
+    Approach: %s
+    Classification: %s
+    Platform: %s
+    ORM: %s
+    Webserver: %s
+    Versus: %s
+
+  Finalize the initialization of your test given the above values?
+
+  Note: once you have initialized your test, you can change these values later.
+    """ % (self.display_name, 
+           self.language, 
+           self.approach, 
+           self.classification, 
+           self.platform,
+           self.orm, 
+           self.webserver, 
+           self.versus))
+
+    valid = self.__prompt_confirmation()
+    while not valid:
+      valid = self.__prompt_confirmation()
+
+    if self.confirmation == 'y':
+      self.__build_scaffolding()
+    else:
+      print('Aborting')
+
+  def __prompt_confirmation(self):
+    self.confirmation = raw_input("Initialize [y/n]: ").strip().lower()
+    return self.confirmation == 'y' or self.confirmation == 'n'
+
+  def __build_scaffolding(self):
+    if self.__create_test_folder():
+      self.__copy_scaffold_files()
+      self.__edit_scaffold_files()
+
+  def __create_test_folder(self):
+    self.language_dir = os.path.join("frameworks", self.language)
+    self.test_dir = os.path.join(self.language_dir, self.name)
+
+    if os.path.exists(self.test_dir):
+      print("Test '%s' already exists; aborting." % self.name)
+      return False
+
+    return True
+
+  def __copy_scaffold_files(self):
+    self.scaffold_dir = os.path.join("toolset","setup","scaffolding")
+    copytree(self.scaffold_dir, self.test_dir)
+
+  def __edit_scaffold_files(self):
+    for file in os.listdir(os.path.join(self.test_dir)):
+      replace_text(os.path.join(self.test_dir, file), "\$NAME", self.name)
+      replace_text(os.path.join(self.test_dir, file), "\$DISPLAY_NAME", self.display_name)
+      replace_text(os.path.join(self.test_dir, file), "\$APPROACH", self.approach)
+      replace_text(os.path.join(self.test_dir, file), "\$CLASSIFICATION", self.classification)
+      replace_text(os.path.join(self.test_dir, file), "\$FRAMEWORK", self.framework)
+      replace_text(os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
+      replace_text(os.path.join(self.test_dir, file), "\$ORM", self.orm)
+      replace_text(os.path.join(self.test_dir, file), "\$PLATFORM", self.platform)
+      replace_text(os.path.join(self.test_dir, file), "\$WEBSERVER", self.webserver)
+      replace_text(os.path.join(self.test_dir, file), "\$VERSUS", self.versus)
+
+  def __print_success(self):
+    print("""
+-------------------------------------------------------------------------------
+  Success!
+
+  Your new test structure has been built to the sepcifications of the suite.
+  Here is a brief run-down of what has been built:
+
+    frameworks
+        └─── %s
+              └─── %s
+                    ├─── .gitignore
+                    ├─── benchmark_config.json
+                    ├─── README.md
+                    ├─── setup.sh
+                    ├─── setup_mysql.sh
+                    └─── source_code
+
+  The next step is to read through your README.md and follow the instructions
+  provided therein.
+-------------------------------------------------------------------------------"""
+    % (self.language, self.name))

+ 3 - 0
toolset/setup/scaffolding/.gitignore

@@ -0,0 +1,3 @@
+# Add any files that are created at build/run-time
+#
+# Example: *.class, *.pyc, bin/

+ 103 - 0
toolset/setup/scaffolding/README.md

@@ -0,0 +1,103 @@
+# Congratulations!
+
+You have successfully built a new test in the suite!
+
+There are some remaining tasks to do before you are ready to open a pull request, however.
+
+## Next Steps
+
+1. Gather your source code.
+
+You will need to ensure that your source code is beneath this directory. The most common solution is to include a `src` directory and place your source code there.
+
+2. Edit `source_files`
+
+A metric we capture, in addition to the actual benchmark numbers, is the significant lines of code required to run your application. To help our suite identify your source code, we require you to list your source files in `source_files`.
+
+3. Edit `.gitignore`
+
+It is very important that any files created by building or running your application are included in your `.gitignore`. The repository **must** be only source files and the files the suite requires for starting your test application.
+
+4. Edit `benchmark_config.json`
+
+The initialization process made some assumptions about your test implementation that may or may not be true. For example, it laid out two separate tests: the non-database tests; and the database tests. You, on the other hand, may only want to implement the `JSON` test, so you will need alter `benchmark_config.json`.
+
+Additionally, `benchmark_config.json` has, for each test, a key called "setup_file". This value refers to the next bullet.
+
+5. Edit `setup.sh`
+
+This is the script that is executed when a benchmark test is run. Specifically, this file tells the suite how to build and start your test application.
+
+In this file, there are detailed instructions on what is expected and what is available to help you start your test application.
+
+6. Test your application
+
+        $ tfb --mode verify --test $NAME
+
+This will run the suite in `verify` mode for your test. This means that no benchmarks will be captured and we will test that we can hit your implementation end-points specified by `benchmark_config.json` and that the response is correct.
+
+Once you are able to successfully run your test through our suite in this way **and** your test passes our validation, you may move on to the next step.
+
+7. Add your test to `.travis.yml`
+
+Edit `.travis.yml` to ensure that Travis-CI will automatically run our verification tests against your new test. This file is kept in alphabetical order, so find where `TESTDIR=$LANGUAGE/$NAME` should be inserted under `env > matrix` and put it there.
+
+8. Fix this `README.md` and open a pull request
+
+Starting on line 59 is your actual `README.md` that will sit with your test implementation. Update all the dummy values to their correct values so that when people visit your test in our Github repository, they will be greated with information on how your test implementation works and where to look for useful source code.
+
+After you have the real `README.md` file in place, delete everything above line 59 and you are ready to open a pull request.
+
+Thanks and Cheers!
+
+
+
+
+
+
+
+# $DISPLAY_NAME Benchmarking Test
+
+### Test Type Implementation Source Code
+
+* [JSON](Relative/Path/To/Your/Source/File)
+* [PLAINTEXT](Relative/Path/To/Your/Source/File)
+* [DB](Relative/Path/To/Your/Source/File)
+* [QUERY](Relative/Path/To/Your/Source/File)
+* [CACHED QUERY](Relative/Path/To/Your/Source/File)
+* [UPDATE](Relative/Path/To/Your/Source/File)
+* [FORTUNES](Relative/Path/To/Your/Source/File)
+
+## Important Libraries
+The tests were run with:
+* [Software](https://www.example1.com/)
+* [Example](http://www.example2.com/)
+
+## Test URLs
+### JSON
+
+http://localhost:8080/json
+
+### PLAINTEXT
+
+http://localhost:8080/plaintext
+
+### DB
+
+http://localhost:8080/db
+
+### QUERY
+
+http://localhost:8080/query?queries=
+
+### CACHED QUERY
+
+http://localhost:8080/cached_query?queries=
+
+### UPDATE
+
+http://localhost:8080/update?queries=
+
+### FORTUNES
+
+http://localhost:8080/fortunes

+ 50 - 0
toolset/setup/scaffolding/benchmark_config.json

@@ -0,0 +1,50 @@
+{
+  "framework": "$NAME",
+  "tests": [
+    {
+      "default": {
+        "setup_file": "setup",
+        "json_url": "/json",
+        "plaintext_url": "/plaintext",
+        "port": 8080,
+        "approach": "$APPROACH",
+        "classification": "$CLASSIFICATION",
+        "database": "None",
+        "framework": "$FRAMEWORK",
+        "language": "$LANGUAGE",
+        "flavor": "None",
+        "orm": "$ORM",
+        "platform": "$PLATFORM",
+        "webserver": "$WEBSERVER",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "$DISPLAY_NAME",
+        "notes": "",
+        "versus": "$VERSUS"
+      },
+      "mysql": {
+        "setup_file": "setup_mysql",
+        "db_url": "/db",
+        "query_url": "/query?queries=",
+        "cached_query_url": "/cached_query?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/update?queries=",
+        "port": 8080,
+        "approach": "$APPROACH",
+        "classification": "$CLASSIFICATION",
+        "database": "MySQL",
+        "framework": "$FRAMEWORK",
+        "language": "$LANGUAGE",
+        "flavor": "None",
+        "orm": "$ORM",
+        "platform": "$PLATFORM",
+        "webserver": "$WEBSERVER",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "$DISPLAY_NAME",
+        "notes": "",
+        "versus": "$VERSUS"
+      }
+    }
+  ]
+}

+ 56 - 0
toolset/setup/scaffolding/setup.sh

@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# This file describes how to gather the prerequisites of your test implementation, set
+# them up, and finally execute your test application(s) and exit in a state where the
+# test is read to respond to HTTP requests to the port/urls described in your
+# benchmark_config.json
+
+
+# fw_depends will search toolset/setup/linux/** for named shell files and execute them.
+# These files will set up the sandboxed runtime to have the softwares required when your
+# test is ready to go. For example:
+#
+#   fw_depends Java
+# 
+# If you are adding a new piece of software, ensure that you first create the setup 
+# script in the appropriate place and that it follows the same paradigms illustrated
+# in the existing scripts.
+
+
+# Three HOST entries are provided to this script:
+#
+#   TFB-database = The IP address of the database 
+#   TFB-client   = The IP address of the client making the HTTP requests
+#   TFB-server   = The IP address of this machine servicing HTTP requests
+#
+# This is the preferred way of resolving these IP addresses. However, some applications
+# do not support internal name resolution and will bypass the system's HOST file and
+# attempt to resolve externally (which will fail). In those cases, use the $DBHOST
+# environment variable described below.
+
+
+# Very often, you will need variables to be set up in order to run your application
+# implementation. The suite provides several variables to this shell script to be used
+# for just this reason.
+#
+#   $DBHOST = the IP address of the database machine
+#   $TROOT  = the test's root directory (the directory in which this file resides)
+#   $FWROOT = the framework benchmark root (the root of this repository)
+#   $IROOT  = the sandbox installation root directory (your installed software as well
+#             as anything installed via fw_depends is inside this dir)
+#   $MAX_CONCURRENCY
+#           = the concurrently levels set from the suite configuration file
+#
+# Below is an example of how to replace a connect string in an application config file
+# so that the application will start up with the correct IP:
+#
+#   sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' app.conf
+
+
+# Lastly, you will need to start your test implementation application in a daemon or
+# detached mode. For example:
+#
+#   go run hello.go &
+
+
+# Note: all comments except for the first line of this file can be deleted.

+ 56 - 0
toolset/setup/scaffolding/setup_mysql.sh

@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# This file describes how to gather the prerequisites of your test implementation, set
+# them up, and finally execute your test application(s) and exit in a state where the
+# test is read to respond to HTTP requests to the port/urls described in your
+# benchmark_config.json
+
+
+# fw_depends will search toolset/setup/linux/** for named shell files and execute them.
+# These files will set up the sandboxed runtime to have the softwares required when your
+# test is ready to go. For example:
+#
+#   fw_depends Java
+# 
+# If you are adding a new piece of software, ensure that you first create the setup 
+# script in the appropriate place and that it follows the same paradigms illustrated
+# in the existing scripts.
+
+
+# Three HOST entries are provided to this script:
+#
+#   TFB-database = The IP address of the database 
+#   TFB-client   = The IP address of the client making the HTTP requests
+#   TFB-server   = The IP address of this machine servicing HTTP requests
+#
+# This is the preferred way of resolving these IP addresses. However, some applications
+# do not support internal name resolution and will bypass the system's HOST file and
+# attempt to resolve externally (which will fail). In those cases, use the $DBHOST
+# environment variable described below.
+
+
+# Very often, you will need variables to be set up in order to run your application
+# implementation. The suite provides several variables to this shell script to be used
+# for just this reason.
+#
+#   $DBHOST = the IP address of the database machine
+#   $TROOT  = the test's root directory (the directory in which this file resides)
+#   $FWROOT = the framework benchmark root (the root of this repository)
+#   $IROOT  = the sandbox installation root directory (your installed software as well
+#             as anything installed via fw_depends is inside this dir)
+#   $MAX_CONCURRENCY
+#           = the concurrently levels set from the suite configuration file
+#
+# Below is an example of how to replace a connect string in an application config file
+# so that the application will start up with the correct IP:
+#
+#   sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' app.conf
+
+
+# Lastly, you will need to start your test implementation application in a daemon or
+# detached mode. For example:
+#
+#   go run hello.go &
+
+
+# Note: all comments except for the first line of this file can be deleted.

+ 12 - 0
toolset/setup/scaffolding/source_code

@@ -0,0 +1,12 @@
+# This file should list the directories here that contain the actual source code
+# for this implementation.  This file is used as the "--list-file" argument of the
+# "cloc" tool:
+#
+# https://github.com/AlDanial/cloc#Options
+#
+# For example, for a Java application that uses Maven, the content of this file
+# might be:
+#
+# ./src/main/java
+#
+# (remove the '#' character in the line above or else cloc treats it as a comment)