瀏覽代碼

Merge branch 'master' of https://github.com/TechEmpower/FrameworkBenchmarks

jamming 8 年之前
父節點
當前提交
384890f81e
共有 89 個文件被更改,包括 685 次插入2705 次删除
  1. 0 2
      benchmark.cfg.example
  2. 0 172
      config/cassandra/cassandra.init
  3. 0 6
      config/cassandra/cassandra.init.env
  4. 0 704
      config/cassandra/cassandra.yaml
  5. 0 1
      config/cassandra/cleanup-keyspace.cql
  6. 0 9
      config/cassandra/create-keyspace.cql
  7. 0 6
      config/cassandra/db-data-gen.py
  8. 0 44
      config/cassandra/log4j-server.properties
  9. 0 24
      config/elasticsearch/elasticsearch
  10. 0 389
      config/elasticsearch/elasticsearch.yml
  11. 0 21
      config/elasticsearch/es-create-index.sh
  12. 0 5
      config/elasticsearch/es-db-data-gen.py
  13. 2 2
      config/php-fpm.conf
  14. 0 18
      config/travis_setup.sh
  15. 1 29
      deployment/vagrant-common/bootstrap.sh
  16. 4 6
      deployment/vagrant-common/core.rb
  17. 1 1
      frameworks/C++/cpoll_cppsp/setup.sh
  18. 1 1
      frameworks/C++/silicon/setup_lwan_mysql.sh
  19. 3 3
      frameworks/C/h2o/CMakeLists.txt
  20. 12 1
      frameworks/C/h2o/setup.sh
  21. 1 1
      frameworks/CSharp/revenj/benchmark_config.json
  22. 0 1
      frameworks/CSharp/revenj/setup.sh
  23. 1 1
      frameworks/Crystal/crystal/server.cr
  24. 1 1
      frameworks/Go/go-std/setup_mongo.sh
  25. 0 1
      frameworks/Java/beyondj/setup.sh
  26. 1 1
      frameworks/Java/gemini/start.sh
  27. 1 1
      frameworks/Java/gemini/start_postgres.sh
  28. 1 1
      frameworks/Java/revenj/benchmark_config.json
  29. 0 14
      frameworks/Java/servlet3-cass/README.md
  30. 0 45
      frameworks/Java/servlet3-cass/benchmark_config.json
  31. 0 93
      frameworks/Java/servlet3-cass/pom.xml
  32. 0 10
      frameworks/Java/servlet3-cass/setup.sh
  33. 0 22
      frameworks/Java/servlet3-cass/source_code
  34. 0 105
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseBaseServlet.java
  35. 0 33
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseQueriesServlet.java
  36. 0 34
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseQueryServlet.java
  37. 0 86
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseUpdatesServlet.java
  38. 0 12
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/HelloMessage.java
  39. 0 33
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/JsonSerializationServlet.java
  40. 0 14
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/MessageDAO.java
  41. 0 93
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/MessageDAOCassImpl.java
  42. 0 32
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/PlaintextServlet.java
  43. 0 27
      frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/World.java
  44. 0 2
      frameworks/Java/servlet3-cass/src/main/resources/application.properties
  45. 0 16
      frameworks/Java/servlet3-cass/src/main/resources/logback.xml
  46. 0 17
      frameworks/Java/servlet3-cass/src/main/webapp/jsp/error.jsp
  47. 1 1
      frameworks/Java/undertow/setup.sh
  48. 0 3
      frameworks/PHP/README.md
  49. 1 0
      frameworks/PHP/phalcon/.gitignore
  50. 3 3
      frameworks/PHP/phalcon/app/controllers/MongobenchController.php
  51. 14 0
      frameworks/PHP/phalcon/app/models/MongoFortunesCollection.php
  52. 14 0
      frameworks/PHP/phalcon/app/models/MongoWorldsCollection.php
  53. 1 1
      frameworks/PHP/phalcon/benchmark_config.json
  54. 6 0
      frameworks/PHP/phalcon/composer.json
  55. 5 4
      frameworks/PHP/phalcon/public/index.php
  56. 1 1
      frameworks/PHP/phalcon/setup.sh
  57. 1 1
      frameworks/Ruby/ngx_mruby/setup.sh
  58. 17 5
      frameworks/Rust/iron/Cargo.toml
  59. 5 1
      frameworks/Rust/iron/benchmark_config.json
  60. 13 0
      frameworks/Rust/iron/build.rs
  61. 216 20
      frameworks/Rust/iron/setup.sh
  62. 11 0
      frameworks/Rust/iron/src/main_types.in.rs
  63. 50 33
      toolset/benchmark/benchmarker.py
  64. 32 36
      toolset/benchmark/framework_test.py
  65. 4 20
      toolset/run-tests.py
  66. 62 0
      toolset/setup/linux/TFBReaper.c
  67. 6 2
      toolset/setup/linux/bash_functions.sh
  68. 2 0
      toolset/setup/linux/client.sh
  69. 0 281
      toolset/setup/linux/database.sh
  70. 21 0
      toolset/setup/linux/databases/databases.sh
  71. 37 0
      toolset/setup/linux/databases/mongodb.sh
  72. 48 0
      toolset/setup/linux/databases/mysql.sh
  73. 56 0
      toolset/setup/linux/databases/postgresql.sh
  74. 1 1
      toolset/setup/linux/frameworks/ffead-cpp-apache.sh
  75. 1 1
      toolset/setup/linux/frameworks/ffead-cpp-nginx.sh
  76. 2 5
      toolset/setup/linux/frameworks/ffead-cpp.sh
  77. 1 1
      toolset/setup/linux/frameworks/jester.sh
  78. 2 0
      toolset/setup/linux/frameworks/lwan.sh
  79. 7 12
      toolset/setup/linux/frameworks/phalcon.sh
  80. 1 1
      toolset/setup/linux/frameworks/silicon.sh
  81. 0 113
      toolset/setup/linux/installer.py
  82. 4 2
      toolset/setup/linux/languages/dlang.sh
  83. 2 2
      toolset/setup/linux/languages/php5.sh
  84. 2 2
      toolset/setup/linux/languages/php7.sh
  85. 1 1
      toolset/setup/linux/languages/ruby-2.0.sh
  86. 1 1
      toolset/setup/linux/languages/ruby-2.1.sh
  87. 1 1
      toolset/setup/linux/languages/ruby-2.2.sh
  88. 1 1
      toolset/setup/linux/languages/rust.sh
  89. 1 9
      toolset/setup/linux/prerequisites.sh

+ 0 - 2
benchmark.cfg.example

@@ -3,7 +3,6 @@
 client_host=127.0.0.1
 client_host=127.0.0.1
 client_identity_file=None
 client_identity_file=None
 client_user=techempower
 client_user=techempower
-runner_user=testrunner
 database_host=127.0.0.1
 database_host=127.0.0.1
 database_identity_file=None
 database_identity_file=None
 database_os=linux
 database_os=linux
@@ -21,7 +20,6 @@ query_levels=[1, 5,10,15,20]
 threads=8
 threads=8
 mode=benchmark
 mode=benchmark
 os=linux
 os=linux
-password_prompt=False
 server_host=127.0.0.1
 server_host=127.0.0.1
 sleep=60
 sleep=60
 test=None
 test=None

+ 0 - 172
config/cassandra/cassandra.init

@@ -1,172 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides:          cassandra
-# Required-Start:    $remote_fs $network $named $time
-# Required-Stop:     $remote_fs $network $named $time
-# Should-Start:      ntp mdadm
-# Should-Stop:       ntp mdadm
-# Default-Start:     2 3 4 5
-# Default-Stop:      0 1 6
-# Short-Description: distributed storage system for structured data
-# Description:       Cassandra is a distributed (peer-to-peer) system for
-#                    the management and storage of structured data.
-### END INIT INFO
-
-# Author: Eric Evans <[email protected]>
-
-DESC="Cassandra"
-NAME=cassandra
-PIDFILE=/var/run/$NAME/$NAME.pid
-SCRIPTNAME=/etc/init.d/$NAME
-CASSANDRA_CONF=/etc/cassandra
-WAIT_FOR_START=10
-CASSANDRA_HOME=/usr/share/cassandra
-CASSANDRA_LIB=$CASSANDRA_HOME
-CASSANDRA_PROG=/usr/sbin/cassandra
-FD_LIMIT=100000
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-valid_chome=`find $CASSANDRA_LIB/apache-cassandra*.jar 2> /dev/null`
-[ -n "$valid_chome" ] || exit 0
-[ -e $CASSANDRA_CONF/cassandra.yaml ] || exit 0
-[ -e $CASSANDRA_CONF/cassandra-env.sh ] || exit 0
-
-# Read Cassandra environment file.
-. $CASSANDRA_CONF/cassandra-env.sh
-
-if [ -z "$JVM_OPTS" ]; then
-    echo "Initialization failed; \$JVM_OPTS not set!" >&2
-    exit 3
-fi
-
-export JVM_OPTS
-
-# Export JAVA_HOME, if set.
-[ -n "$JAVA_HOME" ] && export JAVA_HOME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
-. /lib/lsb/init-functions
-
-#
-# Function that returns 0 if process is running, or nonzero if not.
-#
-# The nonzero value is 3 if the process is simply not running, and 1 if the
-# process is not running but the pidfile exists (to match the exit codes for
-# the "status" command; see LSB core spec 3.1, section 20.2)
-#
-CMD_PATT="cassandra.+CassandraDaemon"
-is_running()
-{
-    if [ -f $PIDFILE ]; then
-        pid=`cat $PIDFILE`
-        grep -Eq "$CMD_PATT" "/proc/$pid/cmdline" 2>/dev/null && return 0
-        return 1
-    fi
-    return 3
-}
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
-    # Return
-    #   0 if daemon has been started
-    #   1 if daemon was already running
-    #   2 if daemon could not be started
-
-    ulimit -l unlimited
-    ulimit -n "$FD_LIMIT"
-
-    cassandra_home=`getent passwd cassandra | awk -F ':' '{ print $6; }'`
-    heap_dump_f="$cassandra_home/java_`date +%s`.hprof"
-    error_log_f="$cassandra_home/hs_err_`date +%s`.log"
-
-    [ -e `dirname "$PIDFILE"` ] || \
-        install -d -ocassandra -gcassandra -m755 `dirname $PIDFILE`
-
-
-
-    start-stop-daemon -S -c cassandra -a $CASSANDRA_PROG -q -p "$PIDFILE" -t >/dev/null || return 1
-
-    start-stop-daemon -S -c cassandra -a $CASSANDRA_PROG -b -p "$PIDFILE" -- \
-        -p "$PIDFILE" -H "$heap_dump_f" -E "$error_log_f" >/dev/null || return 2
-
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
-    # Return
-    #   0 if daemon has been stopped
-    #   1 if daemon was already stopped
-    #   2 if daemon could not be stopped
-    #   other if a failure occurred
-    start-stop-daemon -K -p "$PIDFILE" -R TERM/30/KILL/5 >/dev/null
-    RET=$?
-    rm -f "$PIDFILE"
-    return $RET
-}
-
-case "$1" in
-  start)
-	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
-	do_start
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  stop)
-	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
-	do_stop
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  restart|force-reload)
-	log_daemon_msg "Restarting $DESC" "$NAME"
-	do_stop
-	case "$?" in
-	  0|1)
-		do_start
-		case "$?" in
-			0) log_end_msg 0 ;;
-			1) log_end_msg 1 ;; # Old process is still running
-			*) log_end_msg 1 ;; # Failed to start
-		esac
-		;;
-	  *)
-	  	# Failed to stop
-		log_end_msg 1
-		;;
-	esac
-	;;
-  status)
-    is_running
-    stat=$?
-    case "$stat" in
-      0) log_success_msg "$DESC is running" ;;
-      1) log_failure_msg "could not access pidfile for $DESC" ;;
-      *) log_success_msg "$DESC is not running" ;;
-    esac
-    exit "$stat"
-    ;;
-  *)
-	echo "Usage: $SCRIPTNAME {start|stop|restart|force-reload|status}" >&2
-	exit 3
-	;;
-esac
-
-:
-
-# vi:ai sw=4 ts=4 tw=0 et

+ 0 - 6
config/cassandra/cassandra.init.env

@@ -1,6 +0,0 @@
-
-CASSANDRA_HOME=/opt/cassandra
-CASSANDRA_LIB=$CASSANDRA_HOME/lib
-CASSANDRA_CONF=$CASSANDRA_HOME/conf
-CASSANDRA_PROG=$CASSANDRA_HOME/bin/cassandra
-

+ 0 - 704
config/cassandra/cassandra.yaml

@@ -1,704 +0,0 @@
-# Cassandra storage config YAML 
-
-# NOTE:
-#   See http://wiki.apache.org/cassandra/StorageConfiguration for
-#   full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: 'TFB Cluster'
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting on the node's initial start,
-# on subsequent starts, this setting will apply even if initial token is set.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to 
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-num_tokens: 256
-
-# initial_token allows you to specify tokens manually.  While you can use # it with
-# vnodes (num_tokens > 1, above) -- in which case you should provide a 
-# comma-separated list -- it's primarily used when adding nodes # to legacy clusters 
-# that do not have vnodes enabled.
-# initial_token:
-
-# May either be "true" or "false" to enable globally, or contain a list
-# of data centers to enable per-datacenter.
-# hinted_handoff_enabled: DC1,DC2
-# See http://wiki.apache.org/cassandra/HintedHandoff
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated.  After it has been dead this long, new hints for it will not be
-# created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
-# Maximum throttle in KBs per second, per delivery thread.  This will be
-# reduced proportionally to the number of nodes in the cluster.  (If there
-# are two nodes in the cluster, each delivery thread will use the maximum
-# rate; if there are three, each will throttle to half of the maximum,
-# since we expect two nodes to be delivering hints simultaneously.)
-hinted_handoff_throttle_in_kb: 1024
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# Maximum throttle in KBs per second, total. This will be
-# reduced proportionally to the number of nodes in the cluster.
-batchlog_replay_throttle_in_kb: 1024
-
-# Authentication backend, implementing IAuthenticator; used to identify users
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
-# PasswordAuthenticator}.
-#
-# - AllowAllAuthenticator performs no checks - set it to disable authentication.
-# - PasswordAuthenticator relies on username/password pairs to authenticate
-#   users. It keeps usernames and hashed passwords in system_auth.credentials table.
-#   Please increase system_auth keyspace replication factor if you use this authenticator.
-authenticator: AllowAllAuthenticator
-
-# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
-# CassandraAuthorizer}.
-#
-# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
-# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
-#   increase system_auth keyspace replication factor if you use this authorizer.
-authorizer: AllowAllAuthorizer
-
-# Validity period for permissions cache (fetching permissions can be an
-# expensive operation depending on the authorizer, CassandraAuthorizer is
-# one example). Defaults to 2000, set to 0 to disable.
-# Will be disabled automatically for AllowAllAuthorizer.
-permissions_validity_in_ms: 2000
-
-# Refresh interval for permissions cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If permissions_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as permissions_validity_in_ms.
-# permissions_update_interval_in_ms: 1000
-
-# The partitioner is responsible for distributing groups of rows (by
-# partition key) across nodes in the cluster.  You should leave this
-# alone for new clusters.  The partitioner can NOT be changed without
-# reloading all data, so when upgrading you should set this to the
-# same partitioner you were already using.
-#
-# Besides Murmur3Partitioner, partitioners included for backwards
-# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
-# OrderPreservingPartitioner.
-#
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# Directories where Cassandra should store data on disk.  Cassandra
-# will spread data evenly across them, subject to the granularity of
-# the configured compaction strategy.
-data_file_directories:
-    - /ssd/cassandra/data
-
-# commit log
-commitlog_directory: /ssd/cassandra/commitlog
-
-# policy for data disk failures:
-# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       can still be inspected via JMX.
-# best_effort: stop using the failed disk and respond to requests based on
-#              remaining available sstables.  This means you WILL see obsolete
-#              data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# policy for commit disk failures:
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       can still be inspected via JMX.
-# stop_commit: shutdown the commit log, letting writes collect but 
-#              continuing to service reads, as in pre-2.0.5 Cassandra
-# ignore: ignore fatal errors and let the batches fail
-commit_failure_policy: stop
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must contain the entire row,
-# so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the key cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Maximum size of the row cache in memory.
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should
-# safe the row cache. Caches are saved to saved_caches_directory as specified
-# in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save
-# Disabled by default, meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# The off-heap memory allocator.  Affects storage engine metadata as
-# well as caches.  Experiments show that JEMAlloc saves some memory
-# than the native GCC allocator (i.e., JEMalloc is more
-# fragmentation-resistant).
-# 
-# Supported values are: NativeAllocator, JEMallocAllocator
-#
-# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
-# modify cassandra-env.sh as directed in the file.
-#
-# Defaults to NativeAllocator
-# memory_allocator: NativeAllocator
-
-# saved caches
-saved_caches_directory: /ssd/cassandra/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch." 
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk.  It will wait up to
-# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
-# performing the sync.
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 50
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.  By default this allows 1024*(CPU cores) pending
-# entries on the commitlog queue.  If you are writing very large blobs,
-# you should reduce that; 16*cores works reasonably well for 1MB blobs.
-# It should be at least as large as the concurrent_writes setting.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-# commitlog_periodic_queue_size:
-
-# The size of the individual commitlog file segments.  A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentially from each columnfamily in the system) has been
-# flushed to sstables.  
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-          - seeds: "127.0.0.1"
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-
-# Total memory to use for sstable-reading buffers.  Defaults to
-# the smaller of 1/4 of heap or 512MB.
-# file_cache_size_in_mb: 512
-
-# Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.
-# If omitted, Cassandra will set it to 1/4 of the heap.
-# memtable_total_space_in_mb: 2048
-
-# Total space to use for commitlogs.  Since commitlog segments are
-# mmapped, and hence use up address space, the default size is 32
-# on 32-bit JVMs, and 1024 on 64-bit JVMs.
-#
-# If space gets above this value (it will round up to the next nearest
-# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.  So a small total commitlog space will tend
-# to cause more flush activity on less-active columnfamilies.
-# commitlog_total_space_in_mb: 4096
-
-# This sets the amount of memtable flush writer threads.  These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. If you have a large heap and many data directories,
-# you can increase this value for better flush performance.
-# By default this will be set to the amount of data directories defined.
-#memtable_flush_writers: 1
-
-# the number of full memtables to allow pending flush, that is,
-# waiting for a writer thread.  At a minimum, this should be set to
-# the maximum number of secondary indexes created on a single CF.
-memtable_flush_queue_size: 4
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSDs; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-storage_port: 7000
-
-# SSL port, for encrypted communication.  Unused unless enabled in
-# encryption_options
-ssl_storage_port: 7001
-
-# Address to bind to and tell other Cassandra nodes to connect to. You
-# _must_ change this if you want multiple nodes to be able to
-# communicate!
-# 
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing _if_ the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting this to 0.0.0.0 is always wrong.
-listen_address: 127.0.0.1
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-# broadcast_address: 1.2.3.4
-
-# Internode authentication backend, implementing IInternodeAuthenticator;
-# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
-
-# Whether to start the native transport server.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-native_transport_port: 9042
-# The maximum threads for handling requests when the native transport is used.
-# This is similar to rpc_max_threads though the default differs slightly (and
-# there is no native_transport_min_threads, idle threads will always be stopped
-# after 30 seconds).
-# native_transport_max_threads: 128
-#
-# The maximum size of allowed frame. Frame (requests) larger than this will
-# be rejected as invalid. The default is 256MB.
-# native_transport_max_frame_size_in_mb: 256
-
-# Whether to start the thrift rpc server.
-start_rpc: true
-
-# The address to bind the Thrift RPC service and native transport
-# server -- clients connect here.
-#
-# Leaving this blank has the same effect it does for ListenAddress,
-# (i.e. it will be based on the configured hostname of the node).
-#
-# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
-# here if you want to listen on all interfaces, but that will break clients 
-# that rely on node auto-discovery.
-rpc_address: 127.0.0.1
-# port for Thrift to listen for clients on
-rpc_port: 9160
-
-# enable or disable keepalive on rpc/native connections
-rpc_keepalive: true
-
-# Cassandra provides two out-of-the-box options for the RPC Server:
-#
-# sync  -> One thread per thrift connection. For a very large number of clients, memory
-#          will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
-#          per thread, and that will correspond to your use of virtual memory (but physical memory
-#          may be limited depending on use of stack space).
-#
-# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
-#          asynchronously using a small number of threads that does not vary with the amount
-#          of thrift clients (and thus scales well to many clients). The rpc requests are still
-#          synchronous (one thread per active request). If hsha is selected then it is essential
-#          that rpc_max_threads is changed from the default value of unlimited.
-#
-# The default is sync because on Windows hsha is about 30% slower.  On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# See:
-# /proc/sys/net/core/wmem_max
-# /proc/sys/net/core/rmem_max
-# /proc/sys/net/ipv4/tcp_wmem
-# /proc/sys/net/ipv4/tcp_wmem
-# and: man tcp
-# internode_send_buff_size_in_bytes:
-# internode_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum message length).
-thrift_framed_transport_size_in_mb: 15
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# keyspace data.  Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction.  Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you.  Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true 
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# When executing a scan, within or across a partition, we need to keep the
-# tombstones seen in memory so we can return them to the coordinator, which
-# will use them to make sure other replicas also know about the deleted rows.
-# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
-# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
-# Adjust the thresholds here if you understand the dangers and want to
-# scan more tombstones anyway.  These thresholds may also be adjusted at runtime
-# using the StorageService mbean.
-tombstone_warn_threshold: 1000
-tombstone_failure_threshold: 100000
-
-# Granularity of the collation index of rows within a partition.
-# Increase if your rows are large, or if you have a very large
-# number of rows per partition.  The competing goals are these:
-#   1) a smaller granularity means more index entries are generated
-#      and looking up rows withing the partition by collation column
-#      is faster
-#   2) but, Cassandra will keep the collation index in memory for hot
-#      rows (as part of the key cache), so a larger granularity means
-#      you can cache more hot rows
-column_index_size_in_kb: 64
-
-
-# Log WARN on any batch size exceeding this value. 5kb per batch by default.
-# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
-batch_size_warn_threshold_in_kb: 5
-
-# Size limit for rows being compacted in memory.  Larger rows will spill
-# over to disk and use a slower two-pass compaction process.  A message
-# will be logged specifying the row key.
-in_memory_compaction_limit_in_mb: 64
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair.  Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the number of cores.
-# Uncomment to make compaction mono-threaded, the pre-0.8 default.
-#concurrent_compactors: 1
-
-# Multi-threaded compaction. When enabled, each compaction will use
-# up to one thread per core, plus one thread per sstable being merged.
-# This is usually only useful for SSD-based hardware: otherwise, 
-# your concern is usually to get compaction to do LESS i/o (see:
-# compaction_throughput_mb_per_sec), not more.
-multithreaded_compaction: false
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Track cached row keys during compaction, and re-cache their new
-# positions in the compacted sstable.  Disable if you use really large
-# key caches.
-compaction_preheat_key_cache: true
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 200 Mbps or 25 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 200
-
-# Throttles all streaming file transfer between the datacenters,
-# this setting allows users to throttle inter dc stream throughput in addition
-# to throttling all network stream traffic as configured with
-# stream_throughput_outbound_megabits_per_sec
-# inter_dc_stream_throughput_outbound_megabits_per_sec:
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 5000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 2000
-# How long a coordinator should continue to retry a CAS operation
-# that contends with other proposals for the same row
-cas_contention_timeout_in_ms: 1000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts.  If disabled, replicas will assume that requests
-# were forwarded to them instantly by the coordinator, which means that
-# under overload conditions we will waste that much extra time processing 
-# already-timed-out requests.
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This _can_ involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 0, which never timeout streams.
-# streaming_socket_timeout_in_ms: 0
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch.  The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-#   requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-#   correlated failures. It does this by grouping machines into
-#   "datacenters" and "racks."  Cassandra will do its best not to have
-#   more than one replica on the same "rack" (which may not actually
-#   be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
-#    Treats Strategy order as proximity. This can improve cache
-#    locality when disabling read repair.  Only appropriate for
-#    single-datacenter deployments.
-#  - GossipingPropertyFileSnitch
-#    This should be your go-to snitch for production use.  The rack
-#    and datacenter for the local node are defined in
-#    cassandra-rackdc.properties and propagated to other nodes via
-#    gossip.  If cassandra-topology.properties exists, it is used as a
-#    fallback, allowing migration from the PropertyFileSnitch.
-#  - PropertyFileSnitch:
-#    Proximity is determined by rack and data center, which are
-#    explicitly configured in cassandra-topology.properties.
-#  - Ec2Snitch:
-#    Appropriate for EC2 deployments in a single Region. Loads Region
-#    and Availability Zone information from the EC2 API. The Region is
-#    treated as the datacenter, and the Availability Zone as the rack.
-#    Only private IPs are used, so this will not work across multiple
-#    Regions.
-#  - Ec2MultiRegionSnitch:
-#    Uses public IPs as broadcast_address to allow cross-region
-#    connectivity.  (Thus, you should set seed addresses to the public
-#    IP as well.) You will need to open the storage_port or
-#    ssl_storage_port on the public IP firewall.  (For intra-Region
-#    traffic, Cassandra will switch to the private IP after
-#    establishing a connection.)
-#  - RackInferringSnitch:
-#    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's IP
-#    address, respectively.  Unless this happens to match your
-#    deployment conventions, this is best used as an example of
-#    writing a custom Snitch class and is provided in that spirit.
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: SimpleSnitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100 
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it.  This is
-# expressed as a double which represents a percentage.  Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-# request_scheduler_id -- An identifier based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore.  For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-    # require_client_auth: false
-
-# enable or disable client/server encryption.
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # require_client_auth: false
-    # Set trustore and truststore_password if require_client_auth is true
-    # truststore: conf/.truststore
-    # truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# can be:  all  - all traffic is compressed
-#          dc   - traffic between different datacenters is compressed
-#          none - nothing is compressed.
-internode_compression: all
-
-# Enable or disable tcp_nodelay for inter-dc communication.
-# Disabling it will result in larger (but fewer) network packets being sent,
-# reducing overhead from the TCP protocol itself, at the cost of increasing
-# latency if you block for cross-datacenter responses.
-inter_dc_tcp_nodelay: false
-
-# Enable or disable kernel page cache preheating from contents of the key cache after compaction.
-# When enabled it would preheat only first "page" (4KB) of each row to optimize
-# for sequential access. Note: This could be harmful for fat rows, see CASSANDRA-4937
-# for further details on that topic.
-preheat_kernel_page_cache: false

+ 0 - 1
config/cassandra/cleanup-keyspace.cql

@@ -1 +0,0 @@
-DROP KEYSPACE tfb;

+ 0 - 9
config/cassandra/create-keyspace.cql

@@ -1,9 +0,0 @@
-
-CREATE KEYSPACE tfb WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1 };
-USE tfb;
-
-CREATE TABLE  World (
-  id int,
-  randomNumber int,
-  PRIMARY KEY (id)
-);

+ 0 - 6
config/cassandra/db-data-gen.py

@@ -1,6 +0,0 @@
-from random import randint
-
-print "USE tfb;"
-
-for i in range(1, 10001):
-  print "INSERT INTO world (id, randomnumber) VALUES (%d, %d);" % (i, randint(1, 10000))

+ 0 - 44
config/cassandra/log4j-server.properties

@@ -1,44 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# for production, you should probably set pattern to %c instead of %l.  
-# (%l is slower.)
-
-# output messages into a rolling log file as well as stdout
-log4j.rootLogger=INFO,stdout,R
-
-# stdout
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
-
-# rolling log file
-log4j.appender.R=org.apache.log4j.RollingFileAppender
-log4j.appender.R.maxFileSize=20MB
-log4j.appender.R.maxBackupIndex=50
-log4j.appender.R.layout=org.apache.log4j.PatternLayout
-log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
-# Edit the next line to point to your logs directory
-log4j.appender.R.File=/ssd/log/cassandra/system.log
-
-# Application logging options
-#log4j.logger.org.apache.cassandra=DEBUG
-#log4j.logger.org.apache.cassandra.db=DEBUG
-#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
-
-# Adding this to avoid thrift logging disconnect errors.
-log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
-

+ 0 - 24
config/elasticsearch/elasticsearch

@@ -1,24 +0,0 @@
-#!/bin/bash
-
-start() {
-  /opt/elasticsearch/bin/elasticsearch -d -p /ssd/elasticsearch/es.pid
-}
-
-stop() {
-  kill -HUP `cat /ssd/elasticsearch/es.pid`
-}
-
-case "$1" in
-  start)
-    start
-    ;;
-  stop)
-    stop
-    ;;
-  restart)
-    stop
-    sleep 10
-    start
-    ;;
-esac
-

+ 0 - 389
config/elasticsearch/elasticsearch.yml

@@ -1,389 +0,0 @@
-##################### Elasticsearch Configuration Example #####################
-
-# This file contains an overview of various configuration settings,
-# targeted at operations staff. Application developers should
-# consult the guide at <http://elasticsearch.org/guide>.
-#
-# The installation procedure is covered at
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
-#
-# Elasticsearch comes with reasonable defaults for most settings,
-# so you can try it out without bothering with configuration.
-#
-# Most of the time, these defaults are just fine for running a production
-# cluster. If you're fine-tuning your cluster, or wondering about the
-# effect of certain configuration option, please _do ask_ on the
-# mailing list or IRC channel [http://elasticsearch.org/community].
-
-# Any element in the configuration can be replaced with environment variables
-# by placing them in ${...} notation. For example:
-#
-#node.rack: ${RACK_ENV_VAR}
-
-# For information on supported formats and syntax for the config file, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
-
-
-################################### Cluster ###################################
-
-# Cluster name identifies your cluster for auto-discovery. If you're running
-# multiple clusters on the same network, make sure you're using unique names.
-#
-#cluster.name: elasticsearch
-
-
-#################################### Node #####################################
-
-# Node names are generated dynamically on startup, so you're relieved
-# from configuring them manually. You can tie this node to a specific name:
-#
-#node.name: "Franz Kafka"
-
-# Every node can be configured to allow or deny being eligible as the master,
-# and to allow or deny to store the data.
-#
-# Allow this node to be eligible as a master node (enabled by default):
-#
-#node.master: true
-#
-# Allow this node to store data (enabled by default):
-#
-#node.data: true
-
-# You can exploit these settings to design advanced cluster topologies.
-#
-# 1. You want this node to never become a master node, only to hold data.
-#    This will be the "workhorse" of your cluster.
-#
-#node.master: false
-#node.data: true
-#
-# 2. You want this node to only serve as a master: to not store any data and
-#    to have free resources. This will be the "coordinator" of your cluster.
-#
-#node.master: true
-#node.data: false
-#
-# 3. You want this node to be neither master nor data node, but
-#    to act as a "search load balancer" (fetching data from nodes,
-#    aggregating results, etc.)
-#
-#node.master: false
-#node.data: false
-
-# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
-# Node Info API [http://localhost:9200/_nodes] or GUI tools
-# such as <http://www.elasticsearch.org/overview/marvel/>,
-# <http://github.com/karmi/elasticsearch-paramedic>,
-# <http://github.com/lukas-vlcek/bigdesk> and
-# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
-
-# A node can have generic attributes associated with it, which can later be used
-# for customized shard allocation filtering, or allocation awareness. An attribute
-# is a simple key value pair, similar to node.key: value, here is an example:
-#
-#node.rack: rack314
-
-# By default, multiple nodes are allowed to start from the same installation location
-# to disable it, set the following:
-#node.max_local_storage_nodes: 1
-
-
-#################################### Index ####################################
-
-# You can set a number of options (such as shard/replica options, mapping
-# or analyzer definitions, translog settings, ...) for indices globally,
-# in this file.
-#
-# Note, that it makes more sense to configure index settings specifically for
-# a certain index, either when creating it or by using the index templates API.
-#
-# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
-# for more information.
-
-# Set the number of shards (splits) of an index (5 by default):
-#
-#index.number_of_shards: 5
-
-# Set the number of replicas (additional copies) of an index (1 by default):
-#
-#index.number_of_replicas: 1
-
-# Note, that for development on a local machine, with small indices, it usually
-# makes sense to "disable" the distributed features:
-#
-#index.number_of_shards: 1
-#index.number_of_replicas: 0
-
-# These settings directly affect the performance of index and search operations
-# in your cluster. Assuming you have enough machines to hold shards and
-# replicas, the rule of thumb is:
-#
-# 1. Having more *shards* enhances the _indexing_ performance and allows to
-#    _distribute_ a big index across machines.
-# 2. Having more *replicas* enhances the _search_ performance and improves the
-#    cluster _availability_.
-#
-# The "number_of_shards" is a one-time setting for an index.
-#
-# The "number_of_replicas" can be increased or decreased anytime,
-# by using the Index Update Settings API.
-#
-# Elasticsearch takes care about load balancing, relocating, gathering the
-# results from nodes, etc. Experiment with different settings to fine-tune
-# your setup.
-
-# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
-# the index status.
-
-
-#################################### Paths ####################################
-
-# Path to directory containing configuration (this file and logging.yml):
-#
-#path.conf: /path/to/conf
-
-# Path to directory where to store index data allocated for this node.
-#
-#path.data: /path/to/data
-path.data: /ssd/elasticsearch/data
-
-#
-# Can optionally include more than one location, causing data to be striped across
-# the locations (a la RAID 0) on a file level, favouring locations with most free
-# space on creation. For example:
-#
-#path.data: /path/to/data1,/path/to/data2
-
-# Path to temporary files:
-#
-#path.work: /path/to/work
-path.work: /ssd/elasticsearch/work
-
-# Path to log files:
-#
-#path.logs: /path/to/logs
-path.logs: /ssd/log/elasticsearch
-
-# Path to where plugins are installed:
-#
-#path.plugins: /path/to/plugins
-
-
-#################################### Plugin ###################################
-
-# If a plugin listed here is not installed for current node, the node will not start.
-#
-#plugin.mandatory: mapper-attachments,lang-groovy
-
-
-################################### Memory ####################################
-
-# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
-# it _never_ swaps.
-#
-# Set this property to true to lock the memory:
-#
-#bootstrap.mlockall: true
-
-# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
-# to the same value, and that the machine has enough memory to allocate
-# for Elasticsearch, leaving enough memory for the operating system itself.
-#
-# You should also make sure that the Elasticsearch process is allowed to lock
-# the memory, eg. by using `ulimit -l unlimited`.
-
-
-############################## Network And HTTP ###############################
-
-# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
-# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
-# communication. (the range means that if the port is busy, it will automatically
-# try the next port).
-
-# Set the bind address specifically (IPv4 or IPv6):
-#
-#network.bind_host: 192.168.0.1
-
-# Set the address other nodes will use to communicate with this node. If not
-# set, it is automatically derived. It must point to an actual IP address.
-#
-#network.publish_host: 192.168.0.1
-
-# Set both 'bind_host' and 'publish_host':
-#
-#network.host: 192.168.0.1
-
-# Set a custom port for the node to node communication (9300 by default):
-#
-#transport.tcp.port: 9300
-
-# Enable compression for all communication between nodes (disabled by default):
-#
-#transport.tcp.compress: true
-
-# Set a custom port to listen for HTTP traffic:
-#
-#http.port: 9200
-
-# Set a custom allowed content length:
-#
-#http.max_content_length: 100mb
-
-# Disable HTTP completely:
-#
-#http.enabled: false
-
-
-################################### Gateway ###################################
-
-# The gateway allows for persisting the cluster state between full cluster
-# restarts. Every change to the state (such as adding an index) will be stored
-# in the gateway, and when the cluster starts up for the first time,
-# it will read its state from the gateway.
-
-# There are several types of gateway implementations. For more information, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
-
-# The default gateway type is the "local" gateway (recommended):
-#
-#gateway.type: local
-
-# Settings below control how and when to start the initial recovery process on
-# a full cluster restart (to reuse as much local data as possible when using shared
-# gateway).
-
-# Allow recovery process after N nodes in a cluster are up:
-#
-#gateway.recover_after_nodes: 1
-
-# Set the timeout to initiate the recovery process, once the N nodes
-# from previous setting are up (accepts time value):
-#
-#gateway.recover_after_time: 5m
-
-# Set how many nodes are expected in this cluster. Once these N nodes
-# are up (and recover_after_nodes is met), begin recovery process immediately
-# (without waiting for recover_after_time to expire):
-#
-#gateway.expected_nodes: 2
-
-
-############################# Recovery Throttling #############################
-
-# These settings allow to control the process of shards allocation between
-# nodes during initial recovery, replica allocation, rebalancing,
-# or when adding and removing nodes.
-
-# Set the number of concurrent recoveries happening on a node:
-#
-# 1. During the initial recovery
-#
-#cluster.routing.allocation.node_initial_primaries_recoveries: 4
-#
-# 2. During adding/removing nodes, rebalancing, etc
-#
-#cluster.routing.allocation.node_concurrent_recoveries: 2
-
-# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
-#
-#indices.recovery.max_bytes_per_sec: 20mb
-
-# Set to limit the number of open concurrent streams when
-# recovering a shard from a peer:
-#
-#indices.recovery.concurrent_streams: 5
-
-
-################################## Discovery ##################################
-
-# Discovery infrastructure ensures nodes can be found within a cluster
-# and master node is elected. Multicast discovery is the default.
-
-# Set to ensure a node sees N other master eligible nodes to be considered
-# operational within the cluster. This should be set to a quorum/majority of 
-# the master-eligible nodes in the cluster.
-#
-#discovery.zen.minimum_master_nodes: 1
-
-# Set the time to wait for ping responses from other nodes when discovering.
-# Set this option to a higher value on a slow or congested network
-# to minimize discovery failures:
-#
-#discovery.zen.ping.timeout: 3s
-
-# For more information, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
-
-# Unicast discovery allows to explicitly control which nodes will be used
-# to discover the cluster. It can be used when multicast is not present,
-# or to restrict the cluster communication-wise.
-#
-# 1. Disable multicast discovery (enabled by default):
-#
-#discovery.zen.ping.multicast.enabled: false
-#
-# 2. Configure an initial list of master nodes in the cluster
-#    to perform discovery when new nodes (master or data) are started:
-#
-#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
-
-# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
-#
-# You have to install the cloud-aws plugin for enabling the EC2 discovery.
-#
-# For more information, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
-#
-# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
-# for a step-by-step tutorial.
-
-# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
-#
-# You have to install the cloud-gce plugin for enabling the GCE discovery.
-#
-# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
-
-# Azure discovery allows to use Azure API in order to perform discovery.
-#
-# You have to install the cloud-azure plugin for enabling the Azure discovery.
-#
-# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
-
-################################## Slow Log ##################################
-
-# Shard level query and fetch threshold logging.
-
-#index.search.slowlog.threshold.query.warn: 10s
-#index.search.slowlog.threshold.query.info: 5s
-#index.search.slowlog.threshold.query.debug: 2s
-#index.search.slowlog.threshold.query.trace: 500ms
-
-#index.search.slowlog.threshold.fetch.warn: 1s
-#index.search.slowlog.threshold.fetch.info: 800ms
-#index.search.slowlog.threshold.fetch.debug: 500ms
-#index.search.slowlog.threshold.fetch.trace: 200ms
-
-#index.indexing.slowlog.threshold.index.warn: 10s
-#index.indexing.slowlog.threshold.index.info: 5s
-#index.indexing.slowlog.threshold.index.debug: 2s
-#index.indexing.slowlog.threshold.index.trace: 500ms
-
-################################## GC Logging ################################
-
-#monitor.jvm.gc.young.warn: 1000ms
-#monitor.jvm.gc.young.info: 700ms
-#monitor.jvm.gc.young.debug: 400ms
-
-#monitor.jvm.gc.old.warn: 10s
-#monitor.jvm.gc.old.info: 5s
-#monitor.jvm.gc.old.debug: 2s
-
-################################## Security ################################
-
-# Uncomment if you want to enable JSONP as a valid return transport on the
-# http server. With this enabled, it may pose a security risk, so disabling
-# it unless you need it is recommended (it is disabled by default).
-#
-#http.jsonp.enable: true

+ 0 - 21
config/elasticsearch/es-create-index.sh

@@ -1,21 +0,0 @@
-#!/bin/bash
-
-curl -XDELETE http://localhost:9200/tfb
-
-curl -XPUT 'http://localhost:9200/tfb' -d '
-{
-  "settings": {
-    "index": {
-      "number_of_shards": 1,
-      "number_of_replicas": 1
-    }
-  },
-  "mappings": {
-    "world": {
-      "properties": {
-        "randomNumber": { "type" : "integer", "index" : "not_analyzed" }
-      }
-    }
-  }
-}
-'

+ 0 - 5
config/elasticsearch/es-db-data-gen.py

@@ -1,5 +0,0 @@
-from random import randint
-
-for i in range(1, 10001):
-  print """{ "index" : { "_id" : "%s" } }
-{ "randomNumber" : %s }""" % (i, randint(1, 10000))

+ 2 - 2
config/php-fpm.conf

@@ -139,8 +139,8 @@ events.mechanism = epoll
 ;       will be used.
 ;       will be used.
 ; Note: TFB does not run php-fpm as root, and therefore these directives are 
 ; Note: TFB does not run php-fpm as root, and therefore these directives are 
 ;       ignored. Commenting them out avoids spurious log messages
 ;       ignored. Commenting them out avoids spurious log messages
-; user = testrunner
-; group = testrunner
+; user = 
+; group = 
 
 
 ; The address on which to accept FastCGI requests.
 ; The address on which to accept FastCGI requests.
 ; Valid syntaxes are:
 ; Valid syntaxes are:

+ 0 - 18
config/travis_setup.sh

@@ -39,26 +39,8 @@ echo "database_host=127.0.0.1"                         >> benchmark.cfg
 echo "server_host=127.0.0.1"                           >> benchmark.cfg
 echo "server_host=127.0.0.1"                           >> benchmark.cfg
 echo "client_user=travis"                              >> benchmark.cfg
 echo "client_user=travis"                              >> benchmark.cfg
 echo "database_user=travis"                            >> benchmark.cfg
 echo "database_user=travis"                            >> benchmark.cfg
-echo "runner_user=testrunner"                          >> benchmark.cfg
-
-# Create the new testrunner user
-sudo useradd testrunner
-# Give him a home dir
-sudo mkdir /home/testrunner
-# Make testrunner the owner of his home dir
-sudo chown testrunner:testrunner /home/testrunner
-# Add the testrunner user to every group that the travis user is in
-sudo sed -i 's|:travis|:travis,testrunner,benchmarkdbuser|g' /etc/group
-# Maybe unneeded - add the travis user to the testrunner group
-sudo sed -i 's|testrunner:x:\(.*\):|testrunner:x:\1:travis|g' /etc/group
-# Need to add testrunner to the sudoers group AND default him to a sudoers
-# because the travis user isn't in the sudo group - he's a sudoer.
-echo "testrunner ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
-# Set the default shell for testrunner to /bin/bash
-sudo sed -i 's|/home/testrunner:/bin/sh|/home/testrunner:/bin/bash|g' /etc/passwd
 
 
 mkdir installs
 mkdir installs
-sudo chown testrunner:testrunner installs
 
 
 # =============Setup Databases===========================
 # =============Setup Databases===========================
 # NOTE: Do not run `--install database` in travis-ci! 
 # NOTE: Do not run `--install database` in travis-ci! 

+ 1 - 29
deployment/vagrant-common/bootstrap.sh

@@ -59,7 +59,6 @@ if [ ! -e "~/.firstboot" ]; then
   echo "export TFB_DATABASE_HOST=$DATABA_IP" >> ~/.bash_profile
   echo "export TFB_DATABASE_HOST=$DATABA_IP" >> ~/.bash_profile
   echo "export TFB_CLIENT_USER=$USER" >> ~/.bash_profile
   echo "export TFB_CLIENT_USER=$USER" >> ~/.bash_profile
   echo "export TFB_DATABASE_USER=$USER" >> ~/.bash_profile
   echo "export TFB_DATABASE_USER=$USER" >> ~/.bash_profile
-  echo "export TFB_RUNNER_USER=testrunner" >> ~/.bash_profile
   echo "export FWROOT=$HOME/FrameworkBenchmarks" >> ~/.bash_profile 
   echo "export FWROOT=$HOME/FrameworkBenchmarks" >> ~/.bash_profile 
   source ~/.bash_profile
   source ~/.bash_profile
 
 
@@ -75,12 +74,6 @@ if [ ! -e "~/.firstboot" ]; then
   echo $CLIENT_IP TFB-client   | sudo tee --append /etc/hosts
   echo $CLIENT_IP TFB-client   | sudo tee --append /etc/hosts
   echo $SERVER_IP TFB-server   | sudo tee --append /etc/hosts
   echo $SERVER_IP TFB-server   | sudo tee --append /etc/hosts
 
 
-  # Add user to run tests
-  sudo adduser --disabled-password --gecos "" testrunner
-  # WARN: testrunner will NOT have sudo access by round 11
-  #       please begin migrating scripts to not rely on sudo.
-  sudo bash -c "echo 'testrunner ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/90-tfb-testrunner"
-
   # Update hostname to reflect our current role
   # Update hostname to reflect our current role
   if [ "$ROLE" != "all" ]; then
   if [ "$ROLE" != "all" ]; then
     echo "Updating hostname"
     echo "Updating hostname"
@@ -111,8 +104,7 @@ if [ ! -e "~/.firstboot" ]; then
 
 
     # vboxfs does not support chown or chmod, which we need. 
     # vboxfs does not support chown or chmod, which we need. 
     # We therefore bind-mount a normal linux directory so we can
     # We therefore bind-mount a normal linux directory so we can
-    # use these operations. This enables us to 
-    # use `chown -R testrunner:testrunner $FWROOT/installs` later
+    # use these operations.
     #echo "Mounting over your installs folder"
     #echo "Mounting over your installs folder"
     #mkdir -p /tmp/TFB_installs
     #mkdir -p /tmp/TFB_installs
     #mkdir -p /FrameworkBenchmarks/installs
     #mkdir -p /FrameworkBenchmarks/installs
@@ -125,26 +117,6 @@ if [ ! -e "~/.firstboot" ]; then
     source ~/FrameworkBenchmarks/toolset/setup/linux/prerequisites.sh
     source ~/FrameworkBenchmarks/toolset/setup/linux/prerequisites.sh
   #fi
   #fi
 
 
-  # Everyone gets SSH access to localhost
-  echo "Setting up SSH access to localhost"
-  ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa
-  cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
-  sudo -u testrunner mkdir -p /home/testrunner/.ssh
-  sudo -u testrunner ssh-keygen -t rsa -N '' -f /home/testrunner/.ssh/id_rsa
-  sudo -u testrunner bash -c "cat /home/testrunner/.ssh/id_rsa.pub >> /home/testrunner/.ssh/authorized_keys"
-  sudo -u testrunner bash -c "cat /home/vagrant/.ssh/authorized_keys >> /home/testrunner/.ssh/authorized_keys"
-  chmod 600 ~/.ssh/authorized_keys
-  sudo -u testrunner chmod 600 /home/testrunner/.ssh/authorized_keys
-  
-  export RUNNER=testrunner
-  export ME=$(id -u -n)
-  sudo chown $RUNNER:$RUNNER /home/$RUNNER
-  sudo sed -i 's|:'"$ME"'|:'"$ME"','"$RUNNER"'|g' /etc/group
-  sudo sed -i 's|'"$ME"':x:\(.*\):|'"$ME"':x:\1:'"$RUNNER"'|g' /etc/group
-  sudo sed -i 's|'"$RUNNER"':x:\(.*\):|'"$RUNNER"':x:\1:'"$ME"'|g' /etc/group
-  echo "$RUNNER ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
-  sudo sed -i 's|/home/'"$RUNNER"':.*|/home/'"$RUNNER"':/bin/bash|g' /etc/passwd
-
   # Enable remote SSH access if we are running production environment
   # Enable remote SSH access if we are running production environment
   # Note : this is always copied from the local working copy using a
   # Note : this is always copied from the local working copy using a
   #        file provisioner. While they exist in the git clone we just 
   #        file provisioner. While they exist in the git clone we just 

+ 4 - 6
deployment/vagrant-common/core.rb

@@ -106,12 +106,10 @@ def provider_virtualbox(config, role, ip_address='172.16.0.16')
     # The VirtualBox file system for shared folders (vboxfs)
     # The VirtualBox file system for shared folders (vboxfs)
     # does not support posix's chown/chmod - these can only 
     # does not support posix's chown/chmod - these can only 
     # be set at mount time, and they are uniform for the entire
     # be set at mount time, and they are uniform for the entire
-    # shared directory. We require chown, because we have the 
-    # testrunner user account, so this is a problem. To mitigate
-    # the effects, we set the folders and files to 777 permissions. 
-    # Even though we cannot chown them to testrunner, with 777 and 
-    # owner vagrant *most* of the software works ok. Occasional 
-    # issues are still possible. 
+    # shared directory. To mitigate the effects, we set the 
+    # folders and files to 777 permissions. 
+    # With 777 and wner vagrant *most* of the software works ok. 
+    # Occasional issues are still possible. 
     #
     #
     # See mitchellh/vagrant#4997
     # See mitchellh/vagrant#4997
     # See http://superuser.com/a/640028/136050
     # See http://superuser.com/a/640028/136050

+ 1 - 1
frameworks/C++/cpoll_cppsp/setup.sh

@@ -2,7 +2,7 @@
 
 
 sed -i 's|#define BENCHMARK_DB_HOST ".*"|#define BENCHMARK_DB_HOST "'"$DBHOST"'"|g' www/connectioninfo.H
 sed -i 's|#define BENCHMARK_DB_HOST ".*"|#define BENCHMARK_DB_HOST "'"$DBHOST"'"|g' www/connectioninfo.H
 
 
-fw_depends cppsp
+fw_depends postgresql-server-dev-9.3 cppsp
 
 
 make clean
 make clean
 make
 make

+ 1 - 1
frameworks/C++/silicon/setup_lwan_mysql.sh

@@ -1,5 +1,5 @@
 #! /bin/bash
 #! /bin/bash
-
+ 
 fw_depends silicon lwan
 fw_depends silicon lwan
 
 
 rm -rf build
 rm -rf build

+ 3 - 3
frameworks/C/h2o/CMakeLists.txt

@@ -8,9 +8,9 @@ find_path(MUSTACHE_C_INCLUDE mustache.h)
 find_path(YAJL_INCLUDE yajl/yajl_gen.h)
 find_path(YAJL_INCLUDE yajl/yajl_gen.h)
 set(COMMON_OPTIONS -flto -pthread)
 set(COMMON_OPTIONS -flto -pthread)
 add_compile_options(-std=gnu11 -pedantic -Wall -Wextra ${COMMON_OPTIONS})
 add_compile_options(-std=gnu11 -pedantic -Wall -Wextra ${COMMON_OPTIONS})
-set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fstack-protector-all -D_FORTIFY_SOURCE=2")
-set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Ofast")
-set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -Ofast")
+set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -D_FORTIFY_SOURCE=2")
+set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3")
+set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3")
 add_definitions(-DH2O_USE_LIBUV=0)
 add_definitions(-DH2O_USE_LIBUV=0)
 include_directories(src ${H2O_INCLUDE} ${MUSTACHE_C_INCLUDE} ${YAJL_INCLUDE})
 include_directories(src ${H2O_INCLUDE} ${MUSTACHE_C_INCLUDE} ${YAJL_INCLUDE})
 file(GLOB SOURCES "src/*.c")
 file(GLOB SOURCES "src/*.c")

+ 12 - 1
frameworks/C/h2o/setup.sh

@@ -7,6 +7,16 @@ BUILD_DIR="${H2O_APP_HOME}_build"
 H2O_APP_PROFILE_PORT="54321"
 H2O_APP_PROFILE_PORT="54321"
 H2O_APP_PROFILE_URL="http://127.0.0.1:$H2O_APP_PROFILE_PORT"
 H2O_APP_PROFILE_URL="http://127.0.0.1:$H2O_APP_PROFILE_PORT"
 
 
+# A hacky way to detect whether we are running in the physical hardware or the cloud environment.
+if [[ $(nproc) -gt 16 ]]; then
+	# In the physical hardware environment the application server has more CPU cores than the
+	# database server, so we need to reduce the maximum number of database connections per
+	# thread accordingly.
+	DB_CONN=2
+else
+	DB_CONN=8
+fi
+
 build_h2o_app()
 build_h2o_app()
 {
 {
 	cmake -DCMAKE_INSTALL_PREFIX="$H2O_APP_HOME" -DCMAKE_BUILD_TYPE=Release \
 	cmake -DCMAKE_INSTALL_PREFIX="$H2O_APP_HOME" -DCMAKE_BUILD_TYPE=Release \
@@ -25,7 +35,7 @@ run_curl()
 
 
 run_h2o_app()
 run_h2o_app()
 {
 {
-	"$1/h2o_app" -a1 -f "$2/template/fortunes.mustache" -m5 "$3" "$4" \
+	"$1/h2o_app" -a1 -f "$2/template/fortunes.mustache" -m "$DB_CONN" "$3" "$4" \
 		-d "host=$DBHOST dbname=hello_world user=benchmarkdbuser password=benchmarkdbpass" &
 		-d "host=$DBHOST dbname=hello_world user=benchmarkdbuser password=benchmarkdbpass" &
 }
 }
 
 
@@ -54,4 +64,5 @@ build_h2o_app "-fprofile-use"
 make -j "$(nproc)" install
 make -j "$(nproc)" install
 popd
 popd
 rm -rf "$BUILD_DIR"
 rm -rf "$BUILD_DIR"
+echo "Maximum database connections per thread: $DB_CONN"
 run_h2o_app "${H2O_APP_HOME}/bin" "${H2O_APP_HOME}/share/h2o_app"
 run_h2o_app "${H2O_APP_HOME}/bin" "${H2O_APP_HOME}/share/h2o_app"

+ 1 - 1
frameworks/CSharp/revenj/benchmark_config.json

@@ -1,5 +1,5 @@
 {
 {
-  "framework": "Revenj",
+  "framework": "revenj",
   "tests": [{
   "tests": [{
     "windows": {
     "windows": {
       "setup_file": "setup",
       "setup_file": "setup",

+ 0 - 1
frameworks/CSharp/revenj/setup.sh

@@ -33,4 +33,3 @@ cat $TROOT/Revenj.Http.exe.config | sed 's|\(ConnectionString.*server=\)localhos
 
 
 echo "Running the Revenj instance"
 echo "Running the Revenj instance"
 mono $TROOT/exe/Revenj.Http.exe
 mono $TROOT/exe/Revenj.Http.exe
-sleep 5

+ 1 - 1
frameworks/Crystal/crystal/server.cr

@@ -1,7 +1,7 @@
 require "http/server"
 require "http/server"
 require "json"
 require "json"
 
 
-server = HTTP::Server.new(8080) do |context|
+server = HTTP::Server.new("0.0.0.0", 8080) do |context|
   response = context.response
   response = context.response
   response.headers["Server"] = "Crystal"
   response.headers["Server"] = "Crystal"
   response.headers["Date"] = Time.utc_now.to_s
   response.headers["Date"] = Time.utc_now.to_s

+ 1 - 1
frameworks/Go/go-std/setup_mongo.sh

@@ -2,7 +2,7 @@
 
 
 sed -i 's|connectionString = "localhost"|connectionString = "'"${DBHOST}"'"|g' hello_mongo.go
 sed -i 's|connectionString = "localhost"|connectionString = "'"${DBHOST}"'"|g' hello_mongo.go
 
 
-fw_depends go
+fw_depends go libsasl2-dev 
 
 
 go get gopkg.in/mgo.v2
 go get gopkg.in/mgo.v2
 
 

+ 0 - 1
frameworks/Java/beyondj/setup.sh

@@ -14,4 +14,3 @@ cd ../../
 
 
 echo "Launching BeyondJ from location:$PWD"
 echo "Launching BeyondJ from location:$PWD"
 java -jar beyondj-launcher/deploy/beyondj-launcher-1.0-SNAPSHOT.jar system.platform.dbserver=${DBHOST} numInstances=10
 java -jar beyondj-launcher/deploy/beyondj-launcher-1.0-SNAPSHOT.jar system.platform.dbserver=${DBHOST} numInstances=10
-

+ 1 - 1
frameworks/Java/gemini/start.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends java resin maven ant
+fw_depends mysql java resin maven ant
 
 
 sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' Docroot/WEB-INF/GeminiHello.conf
 sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' Docroot/WEB-INF/GeminiHello.conf
 sed -i 's|root-directory=".*/FrameworkBenchmarks/frameworks/Java/gemini|root-directory="'"$TROOT"'|g' Docroot/WEB-INF/resin.xml
 sed -i 's|root-directory=".*/FrameworkBenchmarks/frameworks/Java/gemini|root-directory="'"$TROOT"'|g' Docroot/WEB-INF/resin.xml

+ 1 - 1
frameworks/Java/gemini/start_postgres.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends java resin maven
+fw_depends postgresql java ant resin maven
 
 
 sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':5432/|g' Docroot/WEB-INF/GeminiHello.conf
 sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':5432/|g' Docroot/WEB-INF/GeminiHello.conf
 sed -i 's|root-directory=".*/FrameworkBenchmarks/frameworks/Java/gemini|root-directory="'"$TROOT"'|g' Docroot/WEB-INF/resin.xml
 sed -i 's|root-directory=".*/FrameworkBenchmarks/frameworks/Java/gemini|root-directory="'"$TROOT"'|g' Docroot/WEB-INF/resin.xml

+ 1 - 1
frameworks/Java/revenj/benchmark_config.json

@@ -1,5 +1,5 @@
 {
 {
-  "framework": "Revenj.JVM",
+  "framework": "revenj",
   "tests": [{
   "tests": [{
     "default": {
     "default": {
       "setup_file": "setup",
       "setup_file": "setup",

+ 0 - 14
frameworks/Java/servlet3-cass/README.md

@@ -1,14 +0,0 @@
-
-# Servlet3 API benchmarking test
-
-Framework permutation based on the following technology stack
-
-* Java
-* Resin
-* Servlet 3 with asynchronous processing
-* Apache Cassandra database
-* Jackson 2 for JSON processing
-
-Currently implements test types 1, 2, 3, 5 and 6.
-
-

+ 0 - 45
frameworks/Java/servlet3-cass/benchmark_config.json

@@ -1,45 +0,0 @@
-{
-  "framework": "servlet3-cass",
-  "tests": [{
-    "default": {
-      "setup_file": "setup",
-      "json_url": "/servlet3-cass/json",
-      "plaintext_url": "/servlet3-cass/plaintext",
-      "port": 8080,
-      "approach": "Stripped",
-      "classification": "Platform",
-      "database": "None",
-      "framework": "None",
-      "language": "Java",
-      "orm": "Raw",
-      "platform": "Servlet",
-      "webserver": "Resin",
-      "os": "Linux",
-      "database_os": "Linux",
-      "display_name": "servlet3-cass",
-      "notes": "",
-      "versus": "servlet3-cass"
-    },
-    "raw": {
-      "setup_file": "setup",
-      "db_url": "/servlet3-cass/db",
-      "query_url": "/servlet3-cass/queries?queries=",
-      "update_url": "/servlet3-cass/updates?queries=",
-      "port": 8080,
-      "approach": "Stripped",
-      "classification": "Platform",
-      "database": "Cassandra",
-      "framework": "None",
-      "language": "Java",
-      "flavor": "None",
-      "orm": "Raw",
-      "platform": "Servlet",
-      "webserver": "Resin",
-      "os": "Linux",
-      "database_os": "Linux",
-      "display_name": "servlet3-cass",
-      "notes": "",
-      "versus": "servlet3-cass"
-    }
-  }]
-}

+ 0 - 93
frameworks/Java/servlet3-cass/pom.xml

@@ -1,93 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>fi.markoa.tfb.servlet3</groupId>
-    <artifactId>servlet3-cass</artifactId>
-    <version>0.0.1-SNAPSHOT</version>
-    <packaging>war</packaging>
-    <name>servlet3-cass</name>
-
-    <properties>
-        <sourceEncoding>UTF-8</sourceEncoding>
-        <java.version>1.7</java.version>
-
-        <slf4j.version>1.7.10</slf4j.version>
-        <logback.version>1.1.2</logback.version>
-    </properties>
-
-    <build>
-        <finalName>servlet3-cass</finalName>
-
-        <plugins>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.2</version>
-                <configuration>
-                    <source>${java.version}</source>
-                    <target>${java.version}</target>
-                    <encoding>${sourceEncoding}</encoding>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-war-plugin</artifactId>
-                <version>2.6</version>
-                <configuration>
-                    <failOnMissingWebXml>false</failOnMissingWebXml>
-                </configuration>
-            </plugin>
-
-        </plugins>
-
-    </build>
-
-    <dependencies>
-
-        <dependency>
-            <groupId>javax.servlet</groupId>
-            <artifactId>javax.servlet-api</artifactId>
-            <version>3.1.0</version>
-            <scope>provided</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>javax.servlet</groupId>
-            <artifactId>jstl</artifactId>
-            <version>1.2</version>
-            <scope>runtime</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-databind</artifactId>
-            <version>2.5.1</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.datastax.cassandra</groupId>
-            <artifactId>cassandra-driver-core</artifactId>
-            <version>2.1.4</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <version>${slf4j.version}</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <!--
-        <dependency>
-            <groupId>ch.qos.logback</groupId>
-            <artifactId>logback-classic</artifactId>
-            <version>${logback.version}</version>
-            <scope>runtime</scope>
-        </dependency>
-        -->
-
-    </dependencies>
-
-</project>

+ 0 - 10
frameworks/Java/servlet3-cass/setup.sh

@@ -1,10 +0,0 @@
-#!/bin/bash
-
-sed -i 's|localhost|'"${DBHOST}"'|g' src/main/resources/application.properties
-
-fw_depends java resin maven
-
-mvn clean compile war:war
-rm -rf $RESIN_HOME/webapps/*
-cp target/servlet3-cass.war $RESIN_HOME/webapps
-$RESIN_HOME/bin/resinctl start

+ 0 - 22
frameworks/Java/servlet3-cass/source_code

@@ -1,22 +0,0 @@
-./servlet3-cass/src/main
-./servlet3-cass/src/main/java
-./servlet3-cass/src/main/java/fi
-./servlet3-cass/src/main/java/fi/markoa
-./servlet3-cass/src/main/java/fi/markoa/tfb
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseBaseServlet.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseQueriesServlet.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseQueryServlet.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseUpdatesServlet.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/HelloMessage.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/JsonSerializationServlet.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/MessageDAO.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/MessageDAOCassImpl.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/PlaintextServlet.java
-./servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/World.java
-./servlet3-cass/src/main/resources
-./servlet3-cass/src/main/resources/application.properties
-./servlet3-cass/src/main/resources/logback.xml
-./servlet3-cass/src/main/webapp
-./servlet3-cass/src/main/webapp/jsp
-./servlet3-cass/src/main/webapp/jsp/error.jsp

+ 0 - 105
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseBaseServlet.java

@@ -1,105 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.util.concurrent.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.AsyncContext;
-import javax.servlet.ServletConfig;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Executor;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Base class for Web Framework Benchmarks database test type implementations.
- *
- * @author marko asplund
- */
-public abstract class DatabaseBaseServlet extends HttpServlet {
-  private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseBaseServlet.class);
-  protected static final ObjectMapper mapper = new ObjectMapper();
-  protected static final String MEDIATYPE_APPLICATION_JSON = "application/json";
-  protected static final int WORLD_LEAST_VALUE = 1;
-  protected static final int WORLD_BOUND_VALUE = 10000;
-
-  protected static final ListeningExecutorService executorService =
-    MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
-  protected MessageDAOCassImpl dao;
-
-  @Override
-  public void init(ServletConfig config) throws ServletException {
-    dao = new MessageDAOCassImpl();
-    dao.init(executorService);
-  }
-
-  /**
-   * callback for sending the response back to the client
-   *
-   * @param asyncContext Servlet asynchronous context
-   * @param future ListenableFuture holding the backend response
-   * @param executor ExecutorService instance for executing the ListenableFuture
-   */
-  protected void addResponseCallback(final AsyncContext asyncContext, ListenableFuture<?> future, Executor executor) {
-    Futures.addCallback(future, new FutureCallback<Object>() {
-      @Override
-      public void onSuccess(Object world) {
-        try {
-          mapper.writeValue(asyncContext.getResponse().getOutputStream(), world);
-        } catch (IOException ex) {
-          LOGGER.error("failed to get output stream", ex);
-        }
-        asyncContext.complete();
-      }
-
-      @Override
-      public void onFailure(Throwable th) {
-        LOGGER.error("failed to read data", th);
-        errorDispatch(asyncContext, HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "failed to read data: "+th.getMessage());
-      }
-    }, executor);
-  }
-
-  protected void errorDispatch(AsyncContext asyncContext, int statusCode, String message) {
-    asyncContext.getRequest().setAttribute("statusCode", statusCode);
-    asyncContext.getRequest().setAttribute("message", message);
-    asyncContext.dispatch("/jsp/error.jsp");
-  }
-
-  protected int getQueries(String queries) {
-    int q;
-    if(queries == null) {
-      return 1;
-    }
-    try {
-      q = Integer.parseInt(queries);
-    } catch (NumberFormatException ex) {
-      return 1;
-    }
-    if(q > 500)
-      return 500;
-    else if(q < 1)
-      return 1;
-
-    return q;
-  }
-
-  protected List<Integer> generateRandomNumbers(int count, int least, int bound) {
-    List<Integer> ids = new ArrayList<>();
-    for(int cnt = 0; cnt < count; cnt++)
-      ids.add(ThreadLocalRandom.current().nextInt(least, bound));
-    return ids;
-  }
-
-  @Override
-  public void destroy() {
-    dao.destroy();
-    executorService.shutdown();
-  }
-}

+ 0 - 33
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseQueriesServlet.java

@@ -1,33 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.google.common.util.concurrent.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.AsyncContext;
-import javax.servlet.ServletException;
-import javax.servlet.annotation.WebServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
-/**
- * Web Framework Benchmarks
- * Test type 3: Multiple database queries
- *
- * @author marko asplund
- */
-@WebServlet(urlPatterns={"/queries"}, asyncSupported=true)
-public class DatabaseQueriesServlet extends DatabaseBaseServlet {
-  private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseQueriesServlet.class);
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
-    resp.setContentType(MEDIATYPE_APPLICATION_JSON);
-    final AsyncContext asyncContext = req.startAsync();
-    ListenableFuture<?> future = dao.read(generateRandomNumbers(getQueries(req.getParameter("queries")),
-      WORLD_LEAST_VALUE, WORLD_BOUND_VALUE+1));
-    addResponseCallback(asyncContext, future, executorService);
-  }
-
-}

+ 0 - 34
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseQueryServlet.java

@@ -1,34 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.google.common.util.concurrent.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.AsyncContext;
-import javax.servlet.ServletException;
-import javax.servlet.annotation.WebServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Web Framework Benchmarks
- * Test type 2: Single database query
- *
- * @author marko asplund
- */
-@WebServlet(urlPatterns={"/db"}, asyncSupported=true)
-public class DatabaseQueryServlet extends DatabaseBaseServlet {
-  private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseQueryServlet.class);
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
-    resp.setContentType(MEDIATYPE_APPLICATION_JSON);
-    AsyncContext asyncContext = req.startAsync();
-    int randId = ThreadLocalRandom.current().nextInt(WORLD_LEAST_VALUE, WORLD_BOUND_VALUE+1);
-    ListenableFuture<?> future = dao.read(randId);
-    addResponseCallback(asyncContext, future, executorService);
-  }
-
-}

+ 0 - 86
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/DatabaseUpdatesServlet.java

@@ -1,86 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.AsyncContext;
-import javax.servlet.ServletException;
-import javax.servlet.annotation.WebServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-/**
- * Web Framework Benchmarks
- * Test type 5: Database updates
- *
- * @author marko asplund
- */
-
-@WebServlet(urlPatterns={"/updates"}, asyncSupported=true)
-public class DatabaseUpdatesServlet extends DatabaseBaseServlet {
-  private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseUpdatesServlet.class);
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
-    resp.setContentType(MEDIATYPE_APPLICATION_JSON);
-    final int queries = getQueries(req.getParameter("queries"));
-    final AsyncContext asyncContext = req.startAsync();
-    ListenableFuture<List<World>> readFuture = dao.read(generateRandomNumbers(queries,
-      WORLD_LEAST_VALUE, WORLD_BOUND_VALUE+1));
-    final ListenableFuture<List<Integer>> newRandomsFuture = generateRandomNumbersFuture(queries,
-      WORLD_LEAST_VALUE, WORLD_BOUND_VALUE+1);
-
-    Futures.addCallback(readFuture, new FutureCallback<List<World>>() {
-      @Override
-      public void onSuccess(List<World> worlds) {
-        List<Integer> newRandoms;
-        try {
-          newRandoms = newRandomsFuture.get();
-        } catch (InterruptedException | ExecutionException ex) {
-          LOGGER.error("failed to generate random numbers", ex);
-          errorDispatch(asyncContext, HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "failed to generate random numbers"+ex.getMessage());
-          return;
-        }
-        List<World> newWorlds = new ArrayList<>();
-        for(int i = 0; i < worlds.size(); i++)
-          newWorlds.add(new World(worlds.get(i).getId(), newRandoms.get(i)));
-        dao.update(newWorlds);
-
-        try {
-          mapper.writeValue(asyncContext.getResponse().getOutputStream(), newWorlds);
-        } catch (IOException ex) {
-          LOGGER.error("failed to get output stream", ex);
-        }
-        asyncContext.complete();
-
-        LOGGER.debug("update done");
-      }
-
-      @Override
-      public void onFailure(Throwable th) {
-        LOGGER.error("update failed", th);
-        errorDispatch(asyncContext, HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "update failed: "+th.getMessage());
-      }
-    }, executorService);
-
-  }
-
-  protected ListenableFuture<List<Integer>> generateRandomNumbersFuture(final int count, final int least, final int bound) {
-    return executorService.submit(new Callable<List<Integer>>() {
-      @Override
-      public List<Integer> call() throws Exception {
-        return generateRandomNumbers(count, least, bound);
-      }
-    });
-  }
-
-}

+ 0 - 12
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/HelloMessage.java

@@ -1,12 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-public class HelloMessage {
-  private String message = "Hello, World!";
-
-  public HelloMessage() {
-  }
-
-  public String getMessage() {
-    return message;
-  }
-}

+ 0 - 33
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/JsonSerializationServlet.java

@@ -1,33 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.ServletException;
-import javax.servlet.annotation.WebServlet;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
-/**
- * Web Framework Benchmarks
- * Test type 1: JSON serialization
- *
- * @author marko asplund
- */
-@WebServlet("/json")
-public class JsonSerializationServlet extends HttpServlet {
-  private static final Logger LOGGER = LoggerFactory.getLogger(JsonSerializationServlet.class);
-  private static final ObjectMapper mapper = new ObjectMapper();
-  private static final String MEDIATYPE_APPLICATION_JSON = "application/json";
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
-    LOGGER.debug("doGet");
-    resp.setContentType(MEDIATYPE_APPLICATION_JSON);
-    mapper.writeValue(resp.getOutputStream(), new HelloMessage());
-  }
-
-}

+ 0 - 14
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/MessageDAO.java

@@ -1,14 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
-import java.util.List;
-
-public interface MessageDAO {
-  void init(ListeningExecutorService executorService);
-  ListenableFuture<World> read(int id);
-  ListenableFuture<List<World>> read(List<Integer> ids);
-  ListenableFuture<Void> update(List<World> worlds);
-  void destroy();
-}

+ 0 - 93
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/MessageDAOCassImpl.java

@@ -1,93 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import com.datastax.driver.core.*;
-import com.google.common.base.Function;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.*;
-
-/**
- * Cassandra data access implementation class for the "World" domain model.
- *
- * @author marko asplund
- */
-public class MessageDAOCassImpl implements MessageDAO {
-  private static final Logger LOGGER = LoggerFactory.getLogger(MessageDAOCassImpl.class);
-  private static final String CONFIG_FILE_NAME= "/application.properties";
-  private Cluster cluster;
-  private Session session;
-  private Map<String, PreparedStatement> statements;
-
-  @Override
-  public void init(ListeningExecutorService executorService) {
-    LOGGER.debug("init()");
-
-    Properties conf;
-    try (InputStream is = this.getClass().getClassLoader().getResourceAsStream(CONFIG_FILE_NAME)) {
-      if(is == null)
-        throw new IOException("file not found: "+CONFIG_FILE_NAME);
-      conf = new Properties();
-      conf.load(is);
-    } catch (IOException ex) {
-      LOGGER.error("failed to open config file", ex);
-      throw new RuntimeException(ex);
-    }
-
-    cluster = Cluster.builder()
-      .addContactPoint(conf.getProperty("cassandra.host"))
-//      .withCredentials(conf.getProperty("cassandra.user"), conf.getProperty("cassandra.pwd"))
-      .build();
-    session = cluster.connect(conf.getProperty("cassandra.keyspace"));
-
-    Map<String, PreparedStatement> stmts = new HashMap<>();
-    stmts.put("get_by_id", session.prepare("SELECT randomnumber FROM world WHERE id=?"));
-    stmts.put("update_by_id", session.prepare("UPDATE world SET randomnumber=? WHERE id=?"));
-    statements = Collections.unmodifiableMap(stmts);
-  }
-
-  @Override
-  public ListenableFuture<World> read(final int id) {
-    Function<ResultSet, World> transformation = new Function<ResultSet, World>() {
-      @Override
-      public World apply(ResultSet results) {
-        Row r = results.one();
-        return new World(id, r.getInt("randomnumber"));
-      }
-    };
-    return Futures.transform(session.executeAsync(statements.get("get_by_id").bind(id)), transformation);
-  }
-
-  public ListenableFuture<List<World>> read(List<Integer> ids) {
-    List<ListenableFuture<World>> futures = new ArrayList<>();
-    for(Integer id : ids)
-      futures.add(read(id));
-    return Futures.allAsList(futures);
-  }
-
-  public ListenableFuture<Void> update(List<World> worlds) {
-    Function<ResultSet, Void> transformation = new Function<ResultSet, Void>() {
-      @Override
-      public Void apply(ResultSet rows) {
-        return null;
-      }
-    };
-    BatchStatement bs = new BatchStatement(BatchStatement.Type.UNLOGGED);
-    for(World w : worlds)
-      bs.add(statements.get("update_by_id").bind(w.getId(), w.getRandomNumber()));
-    return Futures.transform(session.executeAsync(bs), transformation);
-  }
-
-  @Override
-  public void destroy() {
-    LOGGER.debug("destroy()");
-    session.close();
-    cluster.close();
-  }
-
-}

+ 0 - 32
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/PlaintextServlet.java

@@ -1,32 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.ServletException;
-import javax.servlet.annotation.WebServlet;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
-/**
- * Web Framework Benchmarks
- * Test type 6: Plaintext
- *
- * @author marko asplund
- */
-@WebServlet("/plaintext")
-public class PlaintextServlet extends HttpServlet {
-  private static final Logger LOGGER = LoggerFactory.getLogger(PlaintextServlet.class);
-  private static final String MEDIATYPE_TEXT_PLAIN = "text/plain";
-  private static final byte[] CONTENT = "Hello, World!".getBytes();
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
-    LOGGER.debug("doGet");
-    resp.setContentType(MEDIATYPE_TEXT_PLAIN);
-    resp.getOutputStream().write(CONTENT);
-  }
-
-}

+ 0 - 27
frameworks/Java/servlet3-cass/src/main/java/fi/markoa/tfb/servlet3/World.java

@@ -1,27 +0,0 @@
-package fi.markoa.tfb.servlet3;
-
-public class World {
-  private int id;
-  private int randomNumber;
-
-  public World(int id, int randomNumber) {
-    this.id = id;
-    this.randomNumber = randomNumber;
-  }
-
-  public int getId() {
-    return id;
-  }
-
-  @Override
-  public String toString() {
-    return "World{" +
-      "id=" + id +
-      ", randomNumber=" + randomNumber +
-      '}';
-  }
-
-  public int getRandomNumber() {
-    return randomNumber;
-  }
-}

+ 0 - 2
frameworks/Java/servlet3-cass/src/main/resources/application.properties

@@ -1,2 +0,0 @@
-cassandra.host=localhost
-cassandra.keyspace=tfb

+ 0 - 16
frameworks/Java/servlet3-cass/src/main/resources/logback.xml

@@ -1,16 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<configuration>
-    <appender name="FILE" class="ch.qos.logback.core.FileAppender">
-        <file>servlet3-cass.log</file>
-        <append>true</append>
-        <encoder>
-            <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{35} - %msg%n</pattern>
-        </encoder>
-    </appender>
-
-    <logger name="fi.markoa.tfb.servlet3" level="DEBUG"/>
-
-    <root level="INFO">
-        <appender-ref ref="FILE" />
-    </root>
-</configuration>

+ 0 - 17
frameworks/Java/servlet3-cass/src/main/webapp/jsp/error.jsp

@@ -1,17 +0,0 @@
-<%@ taglib prefix="c" uri="http://java.sun.com/jsp/jstl/core" %>
-<%@ page session="false" %>
-<%
-    if(request.getAttribute("statusCode") != null)
-        response.setStatus((Integer)request.getAttribute("statusCode"));
-    else
-        response.setStatus(500);
-%>
-<html>
-<head>
-<title>error</title>
-</head>
-<body>
-<h1>error</h1>
-${message}
-</body>
-</html>

+ 1 - 1
frameworks/Java/undertow/setup.sh

@@ -2,7 +2,7 @@
 
 
 sed -i 's|DATABASE_HOST|'"${DBHOST}"'|g' src/main/resources/hello/server.properties
 sed -i 's|DATABASE_HOST|'"${DBHOST}"'|g' src/main/resources/hello/server.properties
 
 
-fw_depends java maven
+fw_depends mongodb postgresql mysql java maven
 
 
 mvn clean compile assembly:single
 mvn clean compile assembly:single
 cd target
 cd target

+ 0 - 3
frameworks/PHP/README.md

@@ -136,9 +136,6 @@ because some gitignore files in this repo contain `*.lock` to avoid Ruby's lock
 If you are prompted for input during the `run-tests.py` script above, then you
 If you are prompted for input during the `run-tests.py` script above, then you
 need to generate your lock file manually so that you may answer the input 
 need to generate your lock file manually so that you may answer the input 
 queries as they are shown. Use these steps
 queries as they are shown. Use these steps
-
-    # Switch to the user that runs tests
-    sudo su testrunner
     
     
     # Define the environment variables you need (modify as needed)
     # Define the environment variables you need (modify as needed)
     export IROOT=/home/you/FrameworkBenchmarks/installs
     export IROOT=/home/you/FrameworkBenchmarks/installs

+ 1 - 0
frameworks/PHP/phalcon/.gitignore

@@ -2,6 +2,7 @@
 /app/logs
 /app/logs
 /app/compiled-templates/*.compiled
 /app/compiled-templates/*.compiled
 /bin
 /bin
+/vendor
 /vendors
 /vendors
 /build
 /build
 /dist
 /dist

+ 3 - 3
frameworks/PHP/phalcon/app/controllers/MongobenchController.php

@@ -8,18 +8,18 @@ class MongobenchController extends BenchController
 
 
     protected function getRandomWorld()
     protected function getRandomWorld()
     {
     {
-        return WorldsCollection::findFirst(array(array('_id' => mt_rand(1, 10000))));
+        return MongoWorldsCollection::findFirst(array(array('_id' => mt_rand(1, 10000))));
     }
     }
 
 
     protected function getFortunesArray()
     protected function getFortunesArray()
     {
     {
-        return FortunesCollection::find();
+        return MongoFortunesCollection::find();
     }
     }
 
 
     protected function buildFortune()
     protected function buildFortune()
     {
     {
         $fortune = parent::buildFortune();
         $fortune = parent::buildFortune();
-        $newFortune = new FortunesCollection();
+        $newFortune = new MongoFortunesCollection();
         $newFortune->_id = $fortune['id'];
         $newFortune->_id = $fortune['id'];
         $newFortune->message = $fortune['message'];
         $newFortune->message = $fortune['message'];
         return $newFortune;
         return $newFortune;

+ 14 - 0
frameworks/PHP/phalcon/app/models/MongoFortunesCollection.php

@@ -0,0 +1,14 @@
+<?php
+
+class MongoFortunesCollection extends \Phalcon\Mvc\MongoCollection
+{
+
+    public $_id;
+    public $message;
+
+    public function getSource()
+    {
+        return "fortune";
+    }
+
+}

+ 14 - 0
frameworks/PHP/phalcon/app/models/MongoWorldsCollection.php

@@ -0,0 +1,14 @@
+<?php
+
+class MongoWorldsCollection extends \Phalcon\Mvc\MongoCollection
+{
+
+    public $_id;
+    public $randomNumber;
+
+    public function getSource()
+    {
+        return "world";
+    }
+
+}

+ 1 - 1
frameworks/PHP/phalcon/benchmark_config.json

@@ -1,5 +1,5 @@
 {
 {
-  "framework": "php-phalcon",
+  "framework": "phalcon",
   "tests": [{
   "tests": [{
     "default": {
     "default": {
       "setup_file": "setup",
       "setup_file": "setup",

+ 6 - 0
frameworks/PHP/phalcon/composer.json

@@ -0,0 +1,6 @@
+{
+   "require": {
+   	"mongodb/mongodb" : "1.0.2",
+   	"phalcon/incubator": "3.0.2"
+   }
+}

+ 5 - 4
frameworks/PHP/phalcon/public/index.php

@@ -1,6 +1,7 @@
 <?php
 <?php
 
 
 define('APP_PATH', realpath('..'));
 define('APP_PATH', realpath('..'));
+require APP_PATH . "/vendor/autoload.php";
 
 
 try {
 try {
 
 
@@ -75,11 +76,11 @@ try {
     // Setting up the mongodb connection
     // Setting up the mongodb connection
     $di->set('mongo', function() use ($config) {
     $di->set('mongo', function() use ($config) {
         $mongodbConfig = $config->mongodb;
         $mongodbConfig = $config->mongodb;
-        
-        $mongo = new \MongoClient($mongodbConfig->url);
-        return $mongo->{$mongodbConfig->db};
+
+        $mongo = new \Phalcon\Db\Adapter\MongoDB\Client($mongodbConfig->url);
+        return $mongo->selectDatabase($mongodbConfig->db);
     });
     });
-    
+
     //Registering the collectionManager service
     //Registering the collectionManager service
     $di->set('collectionManager', function() {
     $di->set('collectionManager', function() {
         // Setting a default EventsManager
         // Setting a default EventsManager

+ 1 - 1
frameworks/PHP/phalcon/setup.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends php5 phalcon nginx
+fw_depends php7 phalcon nginx composer
 
 
 sed -i 's|mongodb://localhost|mongodb://'"${DBHOST}"'|g' app/config/config.php
 sed -i 's|mongodb://localhost|mongodb://'"${DBHOST}"'|g' app/config/config.php
 sed -i 's|localhost|'"${DBHOST}"'|g' app/config/config.php
 sed -i 's|localhost|'"${DBHOST}"'|g' app/config/config.php

+ 1 - 1
frameworks/Ruby/ngx_mruby/setup.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends rvm nginx
+fw_depends rvm nginx libhiredis-dev
 
 
 # We assume single-user installation as 
 # We assume single-user installation as 
 # done in our rvm.sh script and 
 # done in our rvm.sh script and 

+ 17 - 5
frameworks/Rust/iron/Cargo.toml

@@ -1,9 +1,21 @@
 [package]
 [package]
-
 name = "iron"
 name = "iron"
-version = "0.0.1"
+version = "0.0.2"
+build = "build.rs"
+
+[build-dependencies]
+serde_codegen = "0.8"
 
 
 [dependencies]
 [dependencies]
-rustc-serialize = "0.3.19"
-iron = "0.3.0"
-router = "0.1.1"
+serde = "0.8.19"
+serde_json = "0.8.3"
+iron = "0.4.0"
+router = "0.4.0"
+persistent = "0.2.1"
+hyper = "0.9.13"
+rand = "0.3"
+postgres = "0.13.3"
+r2d2 = "0.7.1"
+r2d2_postgres = "0.11.0"
+mustache = "0.8.0"
+rustc-serialize = "0.3"

+ 5 - 1
frameworks/Rust/iron/benchmark_config.json

@@ -4,11 +4,15 @@
     "default": {
     "default": {
       "setup_file": "setup",
       "setup_file": "setup",
       "json_url": "/json",
       "json_url": "/json",
+      "db_url": "/db",
+      "fortune_url": "/fortune",
+      "query_url": "/queries?queries=",
+      "update_url": "/updates?queries=",
       "plaintext_url": "/plaintext",
       "plaintext_url": "/plaintext",
       "port": 8080,
       "port": 8080,
       "approach": "Realistic",
       "approach": "Realistic",
       "classification": "Micro",
       "classification": "Micro",
-      "database": "None",
+      "database": "Postgres",
       "framework": "iron",
       "framework": "iron",
       "language": "rust",
       "language": "rust",
       "orm": "raw",
       "orm": "raw",

+ 13 - 0
frameworks/Rust/iron/build.rs

@@ -0,0 +1,13 @@
+extern crate serde_codegen;
+
+use std::env;
+use std::path::Path;
+
+fn main() {
+    let out_dir = env::var_os("OUT_DIR").unwrap();
+
+    let src = Path::new("src/main_types.in.rs");
+    let dst = Path::new(&out_dir).join("main_types.rs");
+
+    serde_codegen::expand(&src, &dst).unwrap();
+}

+ 216 - 20
frameworks/Rust/iron/setup.sh

@@ -1,36 +1,232 @@
 extern crate iron;
 extern crate iron;
-extern crate router;
+extern crate persistent;
+#[macro_use] extern crate router;
+extern crate serde;
+extern crate serde_json;
+extern crate hyper;
+extern crate rand;
+extern crate r2d2;
+extern crate postgres;
+extern crate r2d2_postgres;
+extern crate mustache;
 extern crate rustc_serialize;
 extern crate rustc_serialize;
 
 
-use iron::{Iron, Request, Response, IronResult};
+use iron::prelude::*;
 use iron::status;
 use iron::status;
-use router::Router;
-use rustc_serialize::json;
-use iron::mime::Mime;
-use iron::headers::Server;
 use iron::modifiers::Header;
 use iron::modifiers::Header;
+use iron::typemap::Key;
+use hyper::header::{Server, ContentType};
+use rand::distributions::{Range, IndependentSample};
+use r2d2_postgres::{PostgresConnectionManager, TlsMode};
+use persistent::{Read};
+use r2d2::Pool;
 
 
-#[derive(RustcDecodable, RustcEncodable)]
-struct Message {
-    message: String,
+include!(concat!(env!("OUT_DIR"),"/main_types.rs"));
+
+pub type PostgresPool = Pool<PostgresConnectionManager>;
+
+struct DbPool;
+impl Key for DbPool { type Value = PostgresPool; }
+
+struct FortuneTemplate;
+impl Key for FortuneTemplate { type Value = mustache::Template; }
+
+#[derive(RustcEncodable)]
+struct FortuneRow {
+    id: i32,
+    message: String
 }
 }
 
 
 fn main() {
 fn main() {
-    let mut router = Router::new();
-    router.get("/json", json_handler);
-    router.get("/plaintext", plaintext_handler);
-
-    Iron::new(router).http("0.0.0.0:8080").unwrap();
+    let dbhost = match option_env!("DBHOST") {
+        Some(it) => it,
+        _ => "localhost"
+    };
+    let r2d2_config = r2d2::Config::default();
+    let pg_conn_manager = PostgresConnectionManager::new(
+        format!("postgres://benchmarkdbuser:benchmarkdbpass@{dbhost}/hello_world", dbhost=dbhost),
+        TlsMode::None).unwrap();
+    let pool = r2d2::Pool::new(r2d2_config, pg_conn_manager).unwrap();
+    let template = mustache::compile_str("<!DOCTYPE html>
+    <html> <head><title>Fortunes</title></head>
+    <body> <table> 
+    <tr><th>id</th><th>message</th></tr> 
+    {{#.}} <tr><td>{{id}}</td><td>{{message}}</td></tr> 
+    {{/.}} 
+    </table> </body> </html>").unwrap();
+    let app = router!(
+            json: get "/json" => json_handler,
+            single_db_query: get "/db" => single_db_query_handler,
+            plaintext: get "/plaintext" => plaintext_handler,
+            queries: get "/queries" => queries_handler,
+            fortune: get "/fortune" => fortune_handler,
+            updates: get "/updates" => updates_handler
+        );
+    let mut middleware = Chain::new(app);
+    middleware.link(Read::<DbPool>::both(pool));
+    middleware.link(Read::<FortuneTemplate>::both(template));
+    println!("Starting server...");
+    Iron::new(middleware).http("0.0.0.0:8080").unwrap();
 }
 }
 
 
 fn json_handler(_: &mut Request) -> IronResult<Response> {
 fn json_handler(_: &mut Request) -> IronResult<Response> {
-    let message: Message = Message { message: "Hello, World!".to_string() };
-    let mime: Mime = "application/json".parse().unwrap();
-    let server = Header(Server(String::from("Iron")));
-    Ok(Response::with((status::Ok, json::encode(&message).unwrap(), mime, server)))
+    let message: Message = Message { 
+        message: "Hello, World!".to_owned() 
+    };
+    let content_type = Header(ContentType::json());
+    let server = Header(Server("Iron".to_owned()));
+    Ok(Response::with(
+        (status::Ok,
+        serde_json::to_string(&message).unwrap(),
+        content_type,
+        server
+        )))
 }
 }
 
 
 fn plaintext_handler(_: &mut Request) -> IronResult<Response> {
 fn plaintext_handler(_: &mut Request) -> IronResult<Response> {
-    let server = Header(Server(String::from("Iron")));
-    Ok(Response::with((status::Ok, "Hello, World!", server)))
+    let server = Header(Server("Iron".to_owned()));
+    Ok(Response::with((
+        status::Ok, 
+        "Hello, World!", 
+        server)))
+}
+
+fn single_db_query_handler(req: &mut Request) -> IronResult<Response> {
+    let content_type = Header(ContentType::json());
+    let server = Header(Server("Iron".to_owned()));
+    let pool = req.get::<Read<DbPool>>().unwrap();
+    let conn = pool.get().unwrap();
+    let row = random_row(conn);
+    Ok(Response::with((
+        status::Ok,
+        serde_json::to_string(&row).unwrap(),
+        server,
+        content_type
+        )))
+}
+
+fn queries_handler(req: &mut Request) -> IronResult<Response> {
+    let content_type = Header(ContentType::json());
+    let server = Header(Server("Iron".to_owned()));
+    let pool = req.get::<Read<DbPool>>().unwrap();
+    let query = req.url.query().unwrap();
+    let param = match get_param(query, "queries") {
+        Some(n) => match n.parse::<usize>() {
+            Ok(m) => match m {
+                e @ 1...500 => e,
+                e if e > 500 => 500,
+                _ => 1
+            },
+            _ => 1
+        },
+        _ => 1
+    };
+    let mut res: Vec<DatabaseRow> = Vec::with_capacity(param);
+    for _ in 0..param {
+        let conn = pool.get().unwrap();
+        res.push(random_row(conn))
+    };
+    Ok(
+        Response::with((
+            status::Ok, 
+            serde_json::to_string(&res).unwrap(),
+            server,
+            content_type
+    )))
+}
+
+fn fortune_handler(req: &mut Request) -> IronResult<Response> {
+    let content_type = Header(ContentType::html());
+    let server = Header(Server("Iron".to_owned()));
+    let template = req.get::<Read<FortuneTemplate>>().unwrap();
+    let pool = req.get::<Read<DbPool>>().unwrap();
+    let conn = pool.get().unwrap();
+    let query_res = &conn.query("SELECT id, message FROM Fortune",&[]).unwrap();
+    let query_res_iter = query_res.iter();
+    let mut rows: Vec<FortuneRow> = query_res_iter.map(|row| FortuneRow {
+        id: row.get(0),
+        message: row.get(1)
+    }).collect();
+    rows.push(FortuneRow {
+        id: 0,
+        message: "Additional fortune added at request time.".to_string()
+    });
+    rows.sort_by(|it, next| it.message.cmp(&next.message));
+    let mut res = vec![];
+    template.render(&mut res, &rows).unwrap();
+    Ok(
+        Response::with((
+            status::Ok,
+            res,
+            server,
+            content_type
+    )))
+}
+
+fn updates_handler(req: &mut Request) -> IronResult<Response> {
+    let mut rng = rand::thread_rng();
+    let between = Range::new(1,10000);
+    let content_type = Header(ContentType::json());
+    let server = Header(Server("Iron".to_owned()));
+    let pool = req.get::<Read<DbPool>>().unwrap();
+    let query = req.url.query().unwrap();
+    let param = match get_param(query, "queries") {
+        Some(n) => match n.parse::<usize>() {
+            Ok(m) => match m {
+                e @ 1...500 => e,
+                e if e > 500 => 500,
+                _ => 1
+            },
+            _ => 1
+        },
+        _ => 1
+    };
+    let mut dbres: Vec<DatabaseRow> = Vec::with_capacity(param);
+    for _ in 0..param {
+        let conn = pool.get().unwrap();
+        dbres.push(random_row(conn))
+    };
+    let conn = pool.get().unwrap();
+    let trans = conn.transaction().unwrap();
+    // Sorting guarantees no deadlocks between multiple concurrent threads
+    dbres.sort_by_key(|it| it.id );
+    let mut res: Vec<DatabaseRow> = Vec::with_capacity(param);
+    for row in dbres {
+        let num = between.ind_sample(&mut rng);
+        trans.execute("UPDATE World SET randomnumber = $1 WHERE id = $2", &[&num, &row.id]).unwrap();
+        res.push(DatabaseRow {
+            id: row.id,
+            randomNumber: num 
+        })
+    }
+    trans.commit().unwrap();
+    Ok(
+        Response::with((
+            status::Ok,
+            serde_json::to_string(&res).unwrap(),
+            server,
+            content_type
+    )))
+}
+
+fn random_row(conn: r2d2::PooledConnection<PostgresConnectionManager>) -> DatabaseRow {
+    let mut rng = rand::thread_rng();
+    let between = Range::new(1,10000);
+    let num = between.ind_sample(&mut rng);
+    let rows = &conn.query("SELECT id, randomnumber FROM World WHERE id = $1",&[&num]).unwrap();
+    let row = rows.get(0);
+    DatabaseRow {
+        id: row.get(0),
+        randomNumber: row.get(1)
+    }
+}
+
+fn get_param<'a>(querystring: &'a str, param: &'a str) -> Option<&'a str> {
+    let n = querystring.split("&").find(
+        |&it| !(it.find(param).is_none())
+    ); 
+    match n {
+        Some(n) => n.split("=").nth(1),
+        _ => n
+    }
 }
 }

+ 11 - 0
frameworks/Rust/iron/src/main_types.in.rs

@@ -0,0 +1,11 @@
+#[derive(Serialize, Deserialize)]
+struct Message {
+    message: String,
+}
+
+#[allow(non_snake_case)]
+#[derive(Serialize, Deserialize)]
+struct DatabaseRow {
+	id: i32,
+	randomNumber: i32
+}

+ 50 - 33
toolset/benchmark/benchmarker.py

@@ -1,4 +1,3 @@
-from setup.linux.installer import Installer
 from setup.linux import setup_util
 from setup.linux import setup_util
 
 
 from benchmark import framework_test
 from benchmark import framework_test
@@ -546,7 +545,6 @@ class Benchmarker:
           p.communicate("""
           p.communicate("""
             sudo restart mysql
             sudo restart mysql
             sudo restart mongod
             sudo restart mongod
-            sudo service redis-server restart
             sudo service postgresql restart
             sudo service postgresql restart
             sudo service cassandra restart
             sudo service cassandra restart
             /opt/elasticsearch/elasticsearch restart
             /opt/elasticsearch/elasticsearch restart
@@ -556,7 +554,6 @@ class Benchmarker:
           st = verify_database_connections([
           st = verify_database_connections([
             ("mysql", self.database_host, 3306),
             ("mysql", self.database_host, 3306),
             ("mongodb", self.database_host, 27017),
             ("mongodb", self.database_host, 27017),
-            ("redis", self.database_host, 6379),
             ("postgresql", self.database_host, 5432),
             ("postgresql", self.database_host, 5432),
             ("cassandra", self.database_host, 9160),
             ("cassandra", self.database_host, 9160),
             ("elasticsearch", self.database_host, 9200)
             ("elasticsearch", self.database_host, 9200)
@@ -566,21 +563,16 @@ class Benchmarker:
         self.__cleanup_leftover_processes_before_test();
         self.__cleanup_leftover_processes_before_test();
 
 
         if self.__is_port_bound(test.port):
         if self.__is_port_bound(test.port):
-          # This can happen sometimes - let's try again
-          self.__stop_test(out)
+          # We gave it our all
+          self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
+          out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
           out.flush()
           out.flush()
-          time.sleep(15)
-          if self.__is_port_bound(test.port):
-            # We gave it our all
-            self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
-            out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
-            out.flush()
-            print "Error: Unable to recover port, cannot start test"
-            return exit_with_code(1)
+          print "Error: Unable to recover port, cannot start test"
+          return exit_with_code(1)
 
 
-        result = test.start(out)
+        result, process = test.start(out)
         if result != 0:
         if result != 0:
-          self.__stop_test(out)
+          self.__stop_test(out, process)
           time.sleep(5)
           time.sleep(5)
           out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
           out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
           out.flush()
           out.flush()
@@ -618,15 +610,15 @@ class Benchmarker:
         ##########################
         ##########################
         out.write(header("Stopping %s" % test.name))
         out.write(header("Stopping %s" % test.name))
         out.flush()
         out.flush()
-        self.__stop_test(out)
+        self.__stop_test(out, process)
         out.flush()
         out.flush()
-        time.sleep(15)
+        time.sleep(5)
 
 
         if self.__is_port_bound(test.port):
         if self.__is_port_bound(test.port):
           # This can happen sometimes - let's try again
           # This can happen sometimes - let's try again
-          self.__stop_test(out)
+          self.__stop_test(out, process)
           out.flush()
           out.flush()
-          time.sleep(15)
+          time.sleep(5)
           if self.__is_port_bound(test.port):
           if self.__is_port_bound(test.port):
             # We gave it our all
             # We gave it our all
             self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
             self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
@@ -636,7 +628,6 @@ class Benchmarker:
 
 
         out.write(header("Stopped %s" % test.name))
         out.write(header("Stopped %s" % test.name))
         out.flush()
         out.flush()
-        time.sleep(5)
 
 
         ##########################################################
         ##########################################################
         # Remove contents of  /tmp folder
         # Remove contents of  /tmp folder
@@ -669,7 +660,7 @@ class Benchmarker:
         traceback.print_exc(file=out)
         traceback.print_exc(file=out)
         out.flush()
         out.flush()
         try:
         try:
-          self.__stop_test(out)
+          self.__stop_test(out, process)
         except (subprocess.CalledProcessError) as e:
         except (subprocess.CalledProcessError) as e:
           self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
           self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
           out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
           out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
@@ -680,7 +671,7 @@ class Benchmarker:
       # TODO - subprocess should not catch this exception!
       # TODO - subprocess should not catch this exception!
       # Parent process should catch it and cleanup/exit
       # Parent process should catch it and cleanup/exit
       except (KeyboardInterrupt) as e:
       except (KeyboardInterrupt) as e:
-        self.__stop_test(out)
+        self.__stop_test(out, process)
         out.write(header("Cleaning up..."))
         out.write(header("Cleaning up..."))
         out.flush()
         out.flush()
         self.__finish()
         self.__finish()
@@ -697,16 +688,46 @@ class Benchmarker:
   # __stop_test(benchmarker)
   # __stop_test(benchmarker)
   # Stops all running tests
   # Stops all running tests
   ############################################################
   ############################################################
-  def __stop_test(self, out):
+  def __stop_test(self, out, process):
+    if process is not None and process.poll() is None:
+      # Stop 
+      pids = self.__find_child_processes(process.pid)
+      if pids is not None:
+        stop = ['kill', '-STOP'] + pids
+        subprocess.call(stop, stderr=out, stdout=out)
+      pids = self.__find_child_processes(process.pid)
+      if pids is not None:
+        term = ['kill', '-TERM'] + pids
+        subprocess.call(term, stderr=out, stdout=out)
+      # Okay, if there are any more PIDs, kill them harder
+      pids = self.__find_child_processes(process.pid)
+      if pids is not None:
+        kill = ['kill', '-KILL'] + pids
+        subprocess.call(kill, stderr=out, stdout=out)
+      process.terminate()
+  ############################################################
+  # End __stop_test
+  ############################################################
+
+  ############################################################
+  # __find_child_processes
+  # Recursively finds all child processes for the given PID.
+  ############################################################
+  def __find_child_processes(self, pid):
+    toRet = []
     try:
     try:
-      subprocess.check_call('sudo killall -s 9 -u %s' % self.runner_user, shell=True, stderr=out, stdout=out)
-      retcode = 0
-    except Exception:
-      retcode = 1
+      pids = subprocess.check_output(['pgrep','-P',str(pid)]).split()
+      toRet.extend(pids)
+      for aPid in pids:
+        toRet.extend(self.__find_child_processes(aPid))
+    except:
+      # pgrep will return a non-zero status code if there are no
+      # processes who have a PPID of PID.
+      pass
 
 
-    return retcode
+    return toRet
   ############################################################
   ############################################################
-  # End __stop_test
+  # End __find_child_processes
   ############################################################
   ############################################################
 
 
   def is_port_bound(self, port):
   def is_port_bound(self, port):
@@ -1034,10 +1055,6 @@ class Benchmarker:
     if self.client_identity_file != None:
     if self.client_identity_file != None:
       self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
       self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
 
 
-    if self.install is not None:
-      install = Installer(self, self.install_strategy)
-      install.install_software()
-
   ############################################################
   ############################################################
   # End __init__
   # End __init__
   ############################################################
   ############################################################

+ 32 - 36
toolset/benchmark/framework_test.py

@@ -187,12 +187,31 @@ class FrameworkTest:
                 self.benchmarker.threads,
                 self.benchmarker.threads,
                 max(self.benchmarker.concurrency_levels)))
                 max(self.benchmarker.concurrency_levels)))
 
 
-    # Always ensure that IROOT belongs to the runner_user
+    # Always ensure that IROOT exists
     if not os.path.exists(self.install_root):
     if not os.path.exists(self.install_root):
       os.mkdir(self.install_root)
       os.mkdir(self.install_root)
-    chown = "sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
-      self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_root))
-    subprocess.check_call(chown, shell=True, cwd=self.fwroot, executable='/bin/bash')
+
+    if not os.path.exists(os.path.join(self.install_root,"TFBReaper")):
+      subprocess.check_call(['gcc', 
+        '-std=c99', 
+        '-o%s/TFBReaper' % self.install_root, 
+        os.path.join(self.fwroot,'toolset/setup/linux/TFBReaper.c')  ],
+        stderr=out, stdout=out)
+
+    # Check that the client is setup
+    if not os.path.exists(os.path.join(self.install_root, 'client.installed')):
+      print("\nINSTALL: Installing client software\n")    
+      # TODO: hax; should dynamically know where this file is
+      with open (self.fwroot + "/toolset/setup/linux/client.sh", "r") as myfile:
+        remote_script=myfile.read()
+        print("\nINSTALL: %s" % self.benchmarker.client_ssh_string)
+        p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" ") + ["bash"], stdin=subprocess.PIPE)
+        p.communicate(remote_script)
+        returncode = p.returncode
+        if returncode != 0:
+          self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.client_ssh_string))
+      print("\nINSTALL: Finished installing client software\n")
+      subprocess.check_call('touch client.installed', shell=True, cwd=self.install_root, executable='/bin/bash')
 
 
     # Run the module start inside parent of TROOT
     # Run the module start inside parent of TROOT
     #  - we use the parent as a historical accident, a number of tests
     #  - we use the parent as a historical accident, a number of tests
@@ -201,31 +220,7 @@ class FrameworkTest:
     os.chdir(os.path.dirname(self.troot))
     os.chdir(os.path.dirname(self.troot))
     logging.info("Running setup module start (cwd=%s)", self.directory)
     logging.info("Running setup module start (cwd=%s)", self.directory)
 
 
-    # Run the start script for the test as the "testrunner" user
-    #
-    # `sudo` - Switching user requires superuser privs
-    #   -u [username] The username
-    #   -E Preserves the current environment variables
-    #   -H Forces the home var (~) to be reset to the user specified
-    # `stdbuf` - Disable buffering, send output to python ASAP
-    #   -o0 zero-sized buffer for stdout
-    #   -e0 zero-sized buffer for stderr
-    # `bash` - Run the setup.sh script using bash
-    #   -e Force bash to exit on first error
-    #   -x Turn on bash tracing e.g. print commands before running
-    #
-    # Most servers do not output to stdout/stderr while serving
-    # requests so there is no performance hit from disabling
-    # output buffering. This disabling is necessary to
-    # a) allow TFB to show output in real time and b) avoid loosing
-    # output in the buffer when the testrunner processes are forcibly
-    # killed
-    #
-    # See http://www.pixelbeat.org/programming/stdio_buffering/
-    # See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
-    # See http://eyalarubas.com/python-subproc-nonblock.html
-    command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
-      self.benchmarker.runner_user,
+    command = 'bash -exc "source %s && source %s.sh"' % (
       bash_functions_path,
       bash_functions_path,
       os.path.join(self.troot, self.setup_file))
       os.path.join(self.troot, self.setup_file))
 
 
@@ -238,7 +233,7 @@ class FrameworkTest:
       export MAX_THREADS=%s     &&  \\
       export MAX_THREADS=%s     &&  \\
       export MAX_CONCURRENCY=%s && \\
       export MAX_CONCURRENCY=%s && \\
       cd %s && \\
       cd %s && \\
-      %s''' % (self.fwroot,
+      %s/TFBReaper "bash -exc \\\"source %s && source %s.sh\\\"''' % (self.fwroot,
         self.directory,
         self.directory,
         self.install_root,
         self.install_root,
         self.database_host,
         self.database_host,
@@ -246,7 +241,9 @@ class FrameworkTest:
         self.benchmarker.threads,
         self.benchmarker.threads,
         max(self.benchmarker.concurrency_levels),
         max(self.benchmarker.concurrency_levels),
         self.directory,
         self.directory,
-        command)
+        self.install_root,
+        bash_functions_path,
+        os.path.join(self.troot, self.setup_file))
     logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)
     logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)
 
 
 
 
@@ -265,8 +262,9 @@ class FrameworkTest:
       out.flush()
       out.flush()
 
 
     # Start the setup.sh command
     # Start the setup.sh command
-    p = subprocess.Popen(command, cwd=self.directory,
-          shell=True, stdout=subprocess.PIPE,
+    p = subprocess.Popen(["%s/TFBReaper" % self.install_root,command],
+          cwd=self.directory,
+          stdout=subprocess.PIPE,
           stderr=subprocess.STDOUT)
           stderr=subprocess.STDOUT)
     nbsr = setup_util.NonBlockingStreamReader(p.stdout,
     nbsr = setup_util.NonBlockingStreamReader(p.stdout,
       "%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
       "%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
@@ -364,7 +362,7 @@ class FrameworkTest:
     logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
     logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
     os.chdir(previousDir)
     os.chdir(previousDir)
 
 
-    return retcode
+    return retcode, p
   ############################################################
   ############################################################
   # End start
   # End start
   ############################################################
   ############################################################
@@ -855,8 +853,6 @@ class FrameworkTest:
     logging.basicConfig(stream=sys.stderr, level=logging.INFO)
     logging.basicConfig(stream=sys.stderr, level=logging.INFO)
 
 
     self.install_root="%s/%s" % (self.fwroot, "installs")
     self.install_root="%s/%s" % (self.fwroot, "installs")
-    if benchmarker.install_strategy is 'pertest':
-      self.install_root="%s/pertest/%s" % (self.install_root, name)
 
 
     # Used in setup.sh scripts for consistency with
     # Used in setup.sh scripts for consistency with
     # the bash environment variables
     # the bash environment variables

+ 4 - 20
toolset/run-tests.py

@@ -126,7 +126,6 @@ def main(argv=None):
     parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
     parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
     parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
     parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
     parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
     parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
-    parser.add_argument('-r', '--runner-user', default=runnerUser, help='The user to run each test as.')
     parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
     parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
                         help='The key to use for SSH to the client instance.')
                         help='The key to use for SSH to the client instance.')
     parser.add_argument('-d', '--database-host', default=databaHost,
     parser.add_argument('-d', '--database-host', default=databaHost,
@@ -134,18 +133,9 @@ def main(argv=None):
     parser.add_argument('--database-user', default=databaUser,
     parser.add_argument('--database-user', default=databaUser,
                         help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
                         help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
     parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
     parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
-                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.')
-    parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
-    
+                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.') 
     
     
     # Install options
     # Install options
-    parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
-                        help='Runs installation script(s) before continuing on to execute the tests.')
-    parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
-    parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified', 
-        help='''Affects : With unified, all server software is installed into a single directory. 
-        With pertest each test gets its own installs directory, but installation takes longer''')
-    parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit')
     parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
     parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
     parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
     parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
 
 
@@ -176,16 +166,10 @@ def main(argv=None):
 
 
     # Verify and massage options
     # Verify and massage options
     if args.client_user is None:
     if args.client_user is None:
-      print 'Usernames (e.g. --client-user, --runner-user, and --database-user) are required!'
+      print 'Usernames (e.g. --client-user, and --database-user) are required!'
       print 'The system will SSH into the client and the database for the install stage'
       print 'The system will SSH into the client and the database for the install stage'
       print 'Aborting'
       print 'Aborting'
-      exit(1)
-
-    if args.runner_user is None:
-      print 'Usernames (e.g. --client-user, --runner-user, and --database-user) are required!'
-      print 'The system will run each test as the runner-user'
-      print 'Aborting'
-      exit(1)        
+      exit(1)    
 
 
     if args.database_user is None:
     if args.database_user is None:
       args.database_user = args.client_user
       args.database_user = args.client_user
@@ -208,7 +192,7 @@ def main(argv=None):
       benchmarker.run_list_test_metadata()
       benchmarker.run_list_test_metadata()
     elif args.parse != None:
     elif args.parse != None:
       benchmarker.parse_timestamp()
       benchmarker.parse_timestamp()
-    elif not args.install_only:
+    else:
       return benchmarker.run()
       return benchmarker.run()
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 62 - 0
toolset/setup/linux/TFBReaper.c

@@ -0,0 +1,62 @@
+#define _DEFAULT_SOURCE
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <string.h>
+
+int main(int argc, char *argv[])
+{
+  // Gather the command line arguments for the pass-through.
+  int count = argc - 1;
+  int *sizes = malloc(sizeof(int) * count);
+  int total_size = 0;
+  for( int i = 1; i < argc; i++ ) {
+    sizes[i - 1] = strlen(argv[i]);
+    total_size += sizes[i - 1];
+  }
+  char *result = malloc(sizeof(char) * total_size + count);
+  char *ptr = result;
+  for( int i = 1; i < argc; i++ ) {
+    memcpy(ptr, argv[i], sizes[i - 1]);
+    ptr[sizes[i - 1]] = ' ';
+    ptr += sizes[i - 1] + 1;
+  }
+  *ptr = '\0';
+  free(sizes);
+
+  // Here is the magic. This sets any child processes to
+  // use THIS process as a 'subreaper'. What that means is
+  // even if the process uses the fork-exit technicque for
+  // running a daemon (which normally orphans the process
+  // and causes init(1) to adopt it, which is problematic
+  // for TFB because we cannot then generally kill the
+  // process since it has lost all context available to us)
+  // the child process will have the parent id of THIS
+  // process, allowing us to kill all the processes started
+  // by the suite in this way generally.
+  //
+  // See: http://man7.org/linux/man-pages/man2/prctl.2.html
+  prctl(PR_SET_CHILD_SUBREAPER,1);
+
+  // This invokes whatever was passed as arguments to TFBReaper
+  // on the system. This program is merely a pass-through to
+  // a shell with the subreaper stuff enabled.
+  int ret = system(result);
+
+  // We need to wait forever; the suite will clean this 
+  // process up later.
+  if (ret == 0) {
+    for(;;) { 
+      // Pause to keep us from spiking CPU; whenever a signal
+      // occurs (except SIGTERM etc which will kill this process)
+      // just iterate and pause again.
+      pause(); 
+    }
+  }
+
+  // If the scripts failed, we should return that code.
+  return ret;
+}
+

+ 6 - 2
toolset/setup/linux/bash_functions.sh

@@ -84,7 +84,6 @@ fw_traperror () {
 # Jester, etc. Users should be know this 
 # Jester, etc. Users should be know this 
 # fairly well (e.g. you can't use Yaf without PHP)
 # fairly well (e.g. you can't use Yaf without PHP)
 fw_depends() {
 fw_depends() {
-
   # Turn on errtrace (-E), so that our ERR
   # Turn on errtrace (-E), so that our ERR
   # trap is passed on to any subshells
   # trap is passed on to any subshells
   set -E
   set -E
@@ -131,8 +130,13 @@ fw_depends() {
       echo Installing framework: $depend in $relative_wd
       echo Installing framework: $depend in $relative_wd
       set -x
       set -x
       . $FWROOT/toolset/setup/linux/frameworks/${depend}.sh
       . $FWROOT/toolset/setup/linux/frameworks/${depend}.sh
+    elif [ -f $FWROOT/toolset/setup/linux/databases/${depend}.sh ]; then
+      echo Installing database: $depend in $relative_wd
+      set -x
+      . $FWROOT/toolset/setup/linux/databases/${depend}.sh
     else
     else
-      echo WARN: No installer found for $depend
+      echo WARN: No installer found for $depend, attempting to install with 'apt-get'...
+      sudo apt-get install -o Dpkg::Options::="--force-confold" --force-yes ${depend}
       # Return whence you came.
       # Return whence you came.
       popd
       popd
       continue
       continue

+ 2 - 0
toolset/setup/linux/client.sh

@@ -21,6 +21,7 @@ sudo sh -c "echo '*               -    nofile          65535' >> /etc/security/l
 # wrk
 # wrk
 ##############################
 ##############################
 
 
+rm -rf wrk-4.0.1.tar.gz wrk-4.0.1
 curl -sL -o wrk-4.0.1.tar.gz https://github.com/wg/wrk/archive/4.0.1.tar.gz
 curl -sL -o wrk-4.0.1.tar.gz https://github.com/wg/wrk/archive/4.0.1.tar.gz
 tar xzf wrk-4.0.1.tar.gz
 tar xzf wrk-4.0.1.tar.gz
 cd wrk-4.0.1
 cd wrk-4.0.1
@@ -31,6 +32,7 @@ cd ~
 #############################
 #############################
 # pipeline.lua
 # pipeline.lua
 #############################
 #############################
+rm -rf pipeline.lua
 cat << EOF | tee pipeline.lua
 cat << EOF | tee pipeline.lua
 init = function(args)
 init = function(args)
   local r = {}
   local r = {}

+ 0 - 281
toolset/setup/linux/database.sh

@@ -1,281 +0,0 @@
-#!/bin/bash
-#
-# Configures the database server for TFB
-#
-# Note: This is not used for Travis-CI. See run-ci.py to see
-# how databases are configured for Travis.
-#
-# Note on Compatibility: TFB *only* supports Ubuntu 14.04 64bit
-# (e.g. trusty64). However, it's nice to retain 12.04 support
-# where possible, as it's still heavily used.
-#
-# Database setup is one core area where we can help ensure TFB
-# works on 12.04 with minimal frustration. In some cases we
-# manually install the DB version that's expected, instead of the
-# 12.04 default. In other cases we can use a 12.04 specific
-# configuration file. These patches are not intended to enable
-# benchmarking (e.g. there are no guarantees that
-# the databases will be tuned for performance correctly), but
-# they do allow users on 12.04 to install and run most TFB tests.
-# Some tests internally have 12.04 incompatibilities, we make no
-# concentrated effort to address these cases, but PR's for specific
-# problems are welcome
-
-set -x
-export DEBIAN_FRONTEND=noninteractive
-
-source /etc/lsb-release
-export TFB_DISTRIB_ID=$DISTRIB_ID
-export TFB_DISTRIB_RELEASE=$DISTRIB_RELEASE
-export TFB_DISTRIB_CODENAME=$DISTRIB_CODENAME
-export TFB_DISTRIB_DESCRIPTION=$DISTRIB_DESCRIPTION
-
-##############################
-# check environment
-##############################
-
-# verify that $TFB_DBHOST is set
-echo "TFB_DBHOST: $TFB_DBHOST"
-[ -z "$TFB_DBHOST" ] && echo "ERROR: TFB_DBHOST is not set!"
-
-##############################
-# Prerequisites
-##############################
-sudo apt-get -y update
-# WARNING: DONT PUT A SPACE AFTER ANY BACKSLASH OR APT WILL BREAK
-# Dpkg::Options avoid hangs on Travis-CI, don't affect clean systems
-sudo apt-get -y install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
-    build-essential \
-    git \
-    libev-dev \
-    libpq-dev \
-    libreadline6-dev \
-    postgresql        `# Installs 9.1 or 9.3, based on Ubuntu version` \
-    redis-server      `# Installs 2.4 or 2.6, based on Ubuntu version` \
-    lsb-core          `# Ensure that lsb_release can be used`
-
-sudo sh -c "echo '*               -    nofile          65535' >> /etc/security/limits.conf"
-
-# Create a user-owned directory for our databases
-sudo mkdir -p /ssd
-sudo mkdir -p /ssd/log
-sudo chown -R $USER:$USER /ssd
-
-# Additional user account (only use if required)
-sudo useradd benchmarkdbuser -p benchmarkdbpass
-
-##############################
-# MySQL
-##############################
-echo "Setting up MySQL database"
-sudo DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-server
-
-sudo stop mysql
-# disable checking of disk size
-sudo mv mysql /etc/init.d/mysql
-sudo chmod +x /etc/init.d/mysql
-sudo mv mysql.conf /etc/init/mysql.conf
-# use the my.cnf file to overwrite /etc/mysql/my.cnf
-sudo mv /etc/mysql/my.cnf /etc/mysql/my.cnf.orig
-sudo mv my.cnf /etc/mysql/my.cnf
-
-sudo rm -rf /ssd/mysql
-sudo rm -rf /ssd/log/mysql
-sudo cp -R -p /var/lib/mysql /ssd/
-sudo cp -R -p /var/log/mysql /ssd/log
-sudo cp usr.sbin.mysqld /etc/apparmor.d/
-sudo /etc/init.d/apparmor reload
-sudo start mysql
-
-# Set root password
-sudo mysqladmin -u root password secret
-# Insert data
-mysql -uroot -psecret < create.sql
-rm create.sql
-
-##############################
-# Postgres
-# Version: 9.*
-##############################
-
-echo "Setting up Postgres database"
-
-# This will support all 9.* versions depending on the machine
-PG_VERSION=`pg_config --version | grep -oP '\d\.\d'`
-
-sudo service postgresql stop
-
-# Sometimes this doesn't work with postgresql
-sudo killall -s 9 -u postgres
-sudo mv postgresql.conf /etc/postgresql/${PG_VERSION}/main/postgresql.conf
-sudo mv pg_hba.conf /etc/postgresql/${PG_VERSION}/main/pg_hba.conf
-
-# Make sure all the configuration files in main belong to postgres
-sudo chown -Rf postgres:postgres /etc/postgresql/${PG_VERSION}/main
-
-sudo rm -rf /ssd/postgresql
-sudo cp -R -p /var/lib/postgresql/${PG_VERSION}/main /ssd/postgresql
-sudo mv 60-postgresql-shm.conf /etc/sysctl.d/60-postgresql-shm.conf
-
-sudo chown postgres:postgres /etc/sysctl.d/60-postgresql-shm.conf
-sudo chown postgres:postgres create-postgres*
-
-sudo service postgresql start
-
-sudo -u postgres psql template1 < create-postgres-database.sql
-sudo -u postgres psql hello_world < create-postgres.sql
-sudo rm create-postgres-database.sql create-postgres.sql
-
-##############################
-# MongoDB
-#
-# Note for 12.04: Using mongodb.org ensures 2.6 is installed
-##############################
-echo "Setting up MongoDB database"
-sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
-echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
-sudo apt-get -y update
-sudo apt-get -y remove mongodb-clients
-sudo apt-get -y install mongodb-org
-
-sudo service mongod stop
-sudo mv /etc/mongodb.conf /etc/mongodb.conf.orig
-sudo cp mongodb.conf /etc/mongodb.conf
-sudo mv mongodb.conf /etc/mongod.conf
-sudo rm -rf /ssd/mongodb
-sudo rm -rf /ssd/log/mongodb
-sudo cp -R -p /var/lib/mongodb /ssd/
-sudo cp -R -p /var/log/mongodb /ssd/log/
-sudo service mongod start
-
-for i in {1..15}; do
-  nc -z localhost 27017 && break || sleep 1;
-  echo "Waiting for MongoDB ($i/15}"
-done
-nc -z localhost 27017
-if [ $? -eq 0 ]; then
-  mongo < create.js
-  rm create.js
-  mongod --version
-else
-  >&2 echo "MongoDB did not start, skipping"
-fi
-
-##############################
-# Apache Cassandra
-##############################
-echo "Setting up Apache Cassandra database"
-sudo apt-get install -qqy openjdk-7-jdk
-
-sudo addgroup --system cassandra
-sudo adduser --system --home /ssd/cassandra --no-create-home --ingroup cassandra cassandra
-
-export CASS_V=2.0.12
-#wget -nv http://archive.apache.org/dist/cassandra/$CASS_V/apache-cassandra-$CASS_V-bin.tar.gz
-curl -Os http://archive.apache.org/dist/cassandra/$CASS_V/apache-cassandra-$CASS_V-bin.tar.gz
-sudo tar xzf apache-cassandra-$CASS_V-bin.tar.gz -C /opt
-sudo ln -s /opt/apache-cassandra-$CASS_V /opt/cassandra
-
-rm -rf /ssd/cassandra /ssd/log/cassandra
-mkdir -p /ssd/cassandra /ssd/log/cassandra
-sudo chown -R cassandra:cassandra /ssd/cassandra /ssd/log/cassandra
-
-cp cassandra/cassandra.yaml cassandra/cassandra.yaml.mod
-cat <<EOF > cassandra/cass_conf_replace.sed
-s/- seeds: "\([^"]*\)"/- seeds: "$TFB_DBHOST"/
-s/listen_address: \(.*\)/listen_address: $TFB_DBHOST/
-s/rpc_address: \(.*\)/rpc_address: $TFB_DBHOST/
-EOF
-sed -i -f cassandra/cass_conf_replace.sed cassandra/cassandra.yaml.mod
-
-sudo cp -f cassandra/cassandra.init /etc/init.d/cassandra
-sudo cp -f cassandra/cassandra.init.env /etc/default/cassandra
-sudo cp -f cassandra/cassandra.yaml.mod /opt/apache-cassandra-$CASS_V/conf/cassandra.yaml
-sudo cp -f cassandra/log4j-server.properties /opt/apache-cassandra-$CASS_V/conf
-
-sudo update-rc.d cassandra defaults
-sudo service cassandra restart
-
-for i in {1..15}; do
-  nc -z $TFB_DBHOST 9160 && break || sleep 1;
-  echo "Waiting for Cassandra ($i/15}"
-done
-nc -z $TFB_DBHOST 9160
-if [ $? -eq 0 ]; then
-  cat cassandra/cleanup-keyspace.cql | /opt/apache-cassandra-$CASS_V/bin/cqlsh $TFB_DBHOST
-  python cassandra/db-data-gen.py > cassandra/tfb-data.cql
-  /opt/apache-cassandra-$CASS_V/bin/cqlsh -f cassandra/create-keyspace.cql $TFB_DBHOST
-  /opt/apache-cassandra-$CASS_V/bin/cqlsh -f cassandra/tfb-data.cql $TFB_DBHOST
-else
-  >&2 echo "Cassandra did not start, skipping"
-fi
-
-##############################
-# Elasticsearch
-##############################
-echo "Setting up Elasticsearch"
-
-export ES_V=1.5.0
-#wget -nv https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-$ES_V.tar.gz
-curl -Os https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-$ES_V.tar.gz
-sudo tar zxf elasticsearch-$ES_V.tar.gz -C /opt
-sudo ln -s /opt/elasticsearch-$ES_V /opt/elasticsearch
-
-rm -rf /ssd/elasticsearch /ssd/log/elasticsearch
-mkdir -p /ssd/elasticsearch /ssd/log/elasticsearch
-
-sudo cp elasticsearch/elasticsearch.yml /opt/elasticsearch/config
-sudo cp elasticsearch/elasticsearch /opt/elasticsearch
-
-/opt/elasticsearch/elasticsearch restart
-
-for i in {1..15}; do
-  nc -z $TFB_DBHOST 9200 && break || sleep 1;
-  echo "Waiting for Elasticsearch ($i/15}"
-done
-nc -z $TFB_DBHOST 9200
-if [ $? -eq 0 ]; then
-  sh elasticsearch/es-create-index.sh
-  python elasticsearch/es-db-data-gen.py > elasticsearch/tfb-data.json
-  curl -sS -D - -o /dev/null -XPOST localhost:9200/tfb/world/_bulk --data-binary @elasticsearch/tfb-data.json
-  echo "Elasticsearch DB populated"
-else
-  >&2 echo "Elasticsearch did not start, skipping"
-fi
-
-##############################
-# Redis
-##############################
-echo "Setting up Redis database"
-if [ "$TFB_DISTRIB_CODENAME" == "precise" ]; then
-  echo "WARNING: Downgrading Redis configuration for Ubuntu 12.04"
-
-  # On 12.04, Redis 2.4 is installed. It doesn't support
-  # some of the 2.6 options, so we have to remove or comment
-  # those
-  sed -i 's/tcp-keepalive/# tcp-keepalive/' redis.conf
-  sed -i 's/stop-writes-on-bgsave-error/# stop-writes-on-bgsave-error/' redis.conf
-  sed -i 's/rdbchecksum/# rdbchecksum/' redis.conf
-  sed -i 's/slave-read-only/# slave-read-only/' redis.conf
-  sed -i 's/repl-disable-tcp-nodelay/# repl-disable-tcp-nodelay/' redis.conf
-  sed -i 's/slave-priority/# slave-priority/' redis.conf
-  sed -i 's/auto-aof-rewrite-percentage/# auto-aof-rewrite-percentage/' redis.conf
-  sed -i 's/auto-aof-rewrite-min-size/# auto-aof-rewrite-min-size/' redis.conf
-
-  sed -i 's/lua-time-limit/# lua-time-limit/' redis.conf
-  sed -i 's/notify-keyspace-events/# notify-keyspace-events/' redis.conf
-  sed -i 's/hash-max-ziplist-entries/# hash-max-ziplist-entries/' redis.conf
-  sed -i 's/hash-max-ziplist-value/# hash-max-ziplist-value/' redis.conf
-  sed -i 's/zset-max-ziplist-entries/# zset-max-ziplist-entries/' redis.conf
-  sed -i 's/zset-max-ziplist-value/# zset-max-ziplist-value/' redis.conf
-  sed -i 's/client-output-buffer-limit/# client-output-buffer-limit/' redis.conf
-
-  sed -i 's/hz 10/# hz 10/' redis.conf
-  sed -i 's/aof-rewrite-incremental-fsync/# aof-rewrite-incremental-fsync/' redis.conf
-fi
-
-sudo service redis-server stop
-sudo mv redis.conf /etc/redis/redis.conf
-sudo service redis-server start
-bash create-redis.sh
-rm create-redis.sh

+ 21 - 0
toolset/setup/linux/databases/databases.sh

@@ -0,0 +1,21 @@
+#!/bin/bash
+
+RETCODE=$(fw_exists ${IROOT}/databases.installed)
+[ ! "$RETCODE" == 0 ] || { \
+  source $IROOT/databases.installed
+  return 0; }
+
+# Create a user-owned directory for our databases
+ssh $DBHOST 'bash' <<EOF
+sudo apt-get -y update
+sudo apt-get -y install -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' build-essential libev-dev libpq-dev libreadline6-dev lsb-core
+sudo mkdir -p /ssd
+sudo mkdir -p /ssd/log
+sudo chown -R $(whoami):$(whoami) /ssd
+
+id -u benchmarkdbuser &> /dev/null || sudo useradd benchmarkdbuser -p benchmarkdbpass
+EOF
+
+echo -e "" > $IROOT/databases.installed
+
+source $IROOT/databases.installed

+ 37 - 0
toolset/setup/linux/databases/mongodb.sh

@@ -0,0 +1,37 @@
+#!/bin/bash
+
+fw_depends databases
+
+RETCODE=$(fw_exists ${IROOT}/mongodb.installed)
+[ ! "$RETCODE" == 0 ] || { \
+  source $IROOT/mongodb.installed
+  return 0; }
+
+# send over the required files
+scp $FWROOT/config/mongodb.conf $DBHOST:~/
+scp $FWROOT/config/create.js $DBHOST:~/
+
+# install mysql on database machine
+ssh $DBHOST 'bash' <<EOF
+sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
+sudo apt-get -y update
+sudo apt-get -y remove mongodb-clients
+sudo apt-get -y install -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' mongodb-org
+
+nc -zvv $DBHOST 27017 &> /dev/null && sudo service mongod stop
+sudo mv /etc/mongodb.conf /etc/mongodb.conf.orig
+sudo cp mongodb.conf /etc/mongodb.conf
+sudo mv mongodb.conf /etc/mongod.conf
+sudo rm -rf /ssd/mongodb
+sudo rm -rf /ssd/log/mongodb
+sudo cp -R -p /var/lib/mongodb /ssd/
+sudo cp -R -p /var/log/mongodb /ssd/log/
+nc -zvv $DBHOST 27017 &> /dev/null || sudo service mongod start
+EOF
+
+echo -e "ssh \$DBHOST 'bash' <<EOF" > $IROOT/mongodb.installed
+echo -e "mongo < create.js" >> $IROOT/mongodb.installed
+echo -e "EOF" >> $IROOT/mongodb.installed
+
+source $IROOT/mongodb.installed

+ 48 - 0
toolset/setup/linux/databases/mysql.sh

@@ -0,0 +1,48 @@
+#!/bin/bash
+
+fw_depends databases
+
+RETCODE=$(fw_exists ${IROOT}/mysql.installed)
+[ ! "$RETCODE" == 0 ] || { \
+  source $IROOT/mysql.installed
+  return 0; }
+
+# send over the required files
+scp $FWROOT/config/create.sql $DBHOST:~/
+scp $FWROOT/config/mysql $DBHOST:~/
+scp $FWROOT/config/mysql.conf $DBHOST:~/
+scp $FWROOT/config/my.cnf $DBHOST:~/
+scp $FWROOT/config/usr.sbin.mysqld $DBHOST:~/
+
+# install mysql on database machine
+ssh $DBHOST 'bash' <<EOF
+sudo DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-server
+
+sudo stop mysql
+
+sudo mv mysql /etc/init.d/mysql
+sudo chmod +x /etc/init.d/mysql
+sudo mv mysql.conf /etc/init/mysql.conf
+
+sudo mv /etc/mysql/my.cnf /etc/mysql/my.cnf.orig
+sudo mv my.cnf /etc/mysql/my.cnf
+
+sudo rm -rf /ssd/mysql
+sudo rm -rf /ssd/log/mysql
+sudo cp -R -p /var/lib/mysql /ssd/
+sudo cp -R -p /var/log/mysql /ssd/log
+sudo cp usr.sbin.mysqld /etc/apparmor.d/
+sudo /etc/init.d/apparmor reload
+sudo start mysql
+
+mysql -uroot -psecret -e'quit' &> /dev/null || sudo mysqladmin -u root password secret
+EOF
+
+# Install the mysql client
+sudo apt-get install -y mysql-client
+
+echo -e "ssh \$DBHOST 'bash' <<EOF" > $IROOT/mysql.installed
+echo -e "mysql -uroot -psecret < create.sql" >> $IROOT/mysql.installed
+echo -e "EOF" >> $IROOT/mysql.installed
+
+source $IROOT/mysql.installed

+ 56 - 0
toolset/setup/linux/databases/postgresql.sh

@@ -0,0 +1,56 @@
+#!/bin/bash
+
+fw_depends databases
+
+RETCODE=$(fw_exists ${IROOT}/postgresql.installed)
+[ ! "$RETCODE" == 0 ] || { \
+  source $IROOT/postgresql.installed
+  return 0; }
+
+# delete any old required files that do not belong to us as
+# scp will fail otherwise
+ssh $DBHOST 'bash' <<EOF
+  sudo rm -rf create-postgres-database.sql
+  sudo rm -rf create-postgres.sql
+EOF
+
+# send over the required files
+scp $FWROOT/config/postgresql.conf $DBHOST:~/
+scp $FWROOT/config/pg_hba.conf $DBHOST:~/
+scp $FWROOT/config/60-postgresql-shm.conf $DBHOST:~/
+scp $FWROOT/config/create-postgres-database.sql $DBHOST:~/
+scp $FWROOT/config/create-postgres.sql $DBHOST:~/
+
+ssh $DBHOST 'bash' <<EOF
+# install postgresql on database machine
+sudo apt-get -y update
+sudo apt-get -y install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" postgresql
+
+# This will support all 9.* versions depending on the machine
+service postgresql status &> /dev/null && sudo service postgresql stop
+# Because postgresql...
+sudo killall -9 -u postgres
+
+# Make sure all the configuration files in main belong to postgres
+PG_VERSION=`pg_config --version | grep -oP '\d\.\d'`
+sudo mv postgresql.conf /etc/postgresql/\${PG_VERSION}/main/postgresql.conf
+sudo mv pg_hba.conf /etc/postgresql/\${PG_VERSION}/main/pg_hba.conf
+
+sudo chown -Rf postgres:postgres /etc/postgresql/\${PG_VERSION}/main
+
+sudo rm -rf /ssd/postgresql
+sudo cp -R -p /var/lib/postgresql/\${PG_VERSION}/main /ssd/postgresql
+sudo mv 60-postgresql-shm.conf /etc/sysctl.d/60-postgresql-shm.conf
+
+sudo chown postgres:postgres /etc/sysctl.d/60-postgresql-shm.conf
+sudo chown postgres:postgres create-postgres*
+
+service postgresql status &> /dev/null || sudo service postgresql start
+EOF
+
+echo -e "ssh \$DBHOST <<EOF" > $IROOT/postgresql.installed
+echo "sudo -u postgres psql -q template1 < create-postgres-database.sql" >> $IROOT/postgresql.installed
+echo "sudo -u postgres psql -q hello_world < create-postgres.sql" >> $IROOT/postgresql.installed
+echo "EOF" >> $IROOT/postgresql.installed
+
+source $IROOT/postgresql.installed

+ 1 - 1
toolset/setup/linux/frameworks/ffead-cpp-apache.sh

@@ -9,7 +9,7 @@ cd unixODBC-2.3.4
 ./configure --enable-stats=no --enable-gui=no --enable-drivers=no --enable-iconv --with-iconv-char-enc=UTF8 --with-iconv-ucode-enc=UTF16LE --libdir=/usr/lib/x86_64-linux-gnu --prefix=/usr --sysconfdir=/etc
 ./configure --enable-stats=no --enable-gui=no --enable-drivers=no --enable-iconv --with-iconv-char-enc=UTF8 --with-iconv-ucode-enc=UTF16LE --libdir=/usr/lib/x86_64-linux-gnu --prefix=/usr --sysconfdir=/etc
 sudo make install
 sudo make install
 
 
-sudo apt-get install build-essential
+sudo apt-get install -y build-essential
 sudo apt-get install -y uuid-dev libmyodbc odbc-postgresql
 sudo apt-get install -y uuid-dev libmyodbc odbc-postgresql
 
 
 fw_get -o ffead-cpp-2.0.tar.gz https://github.com/sumeetchhetri/ffead-cpp/releases/download/2.0/ffead-cpp-2.0-te-bin.tar.gz
 fw_get -o ffead-cpp-2.0.tar.gz https://github.com/sumeetchhetri/ffead-cpp/releases/download/2.0/ffead-cpp-2.0-te-bin.tar.gz

+ 1 - 1
toolset/setup/linux/frameworks/ffead-cpp-nginx.sh

@@ -9,7 +9,7 @@ cd unixODBC-2.3.4
 ./configure --enable-stats=no --enable-gui=no --enable-drivers=no --enable-iconv --with-iconv-char-enc=UTF8 --with-iconv-ucode-enc=UTF16LE --libdir=/usr/lib/x86_64-linux-gnu --prefix=/usr --sysconfdir=/etc
 ./configure --enable-stats=no --enable-gui=no --enable-drivers=no --enable-iconv --with-iconv-char-enc=UTF8 --with-iconv-ucode-enc=UTF16LE --libdir=/usr/lib/x86_64-linux-gnu --prefix=/usr --sysconfdir=/etc
 sudo make install
 sudo make install
 
 
-sudo apt-get install build-essential
+sudo apt-get install -y build-essential
 sudo apt-get install -y uuid-dev libmyodbc odbc-postgresql
 sudo apt-get install -y uuid-dev libmyodbc odbc-postgresql
 
 
 fw_get -o ffead-cpp-2.0.tar.gz https://github.com/sumeetchhetri/ffead-cpp/releases/download/2.0/ffead-cpp-2.0-te-bin.tar.gz
 fw_get -o ffead-cpp-2.0.tar.gz https://github.com/sumeetchhetri/ffead-cpp/releases/download/2.0/ffead-cpp-2.0-te-bin.tar.gz

+ 2 - 5
toolset/setup/linux/frameworks/ffead-cpp.sh

@@ -3,7 +3,7 @@
 RETCODE=$(fw_exists ${IROOT}/ffead-cpp.installed)
 RETCODE=$(fw_exists ${IROOT}/ffead-cpp.installed)
 [ ! "$RETCODE" == 0 ] || { return 0; }
 [ ! "$RETCODE" == 0 ] || { return 0; }
 
 
-sudo apt-get remove libodbc1 unixodbc unixodbc-dev
+sudo apt-get remove -y libodbc1 unixodbc unixodbc-dev
 
 
 fw_get -o unixODBC-2.3.4.tar.gz ftp://ftp.unixodbc.org/pub/unixODBC/unixODBC-2.3.4.tar.gz
 fw_get -o unixODBC-2.3.4.tar.gz ftp://ftp.unixodbc.org/pub/unixODBC/unixODBC-2.3.4.tar.gz
 fw_untar unixODBC-2.3.4.tar.gz
 fw_untar unixODBC-2.3.4.tar.gz
@@ -11,7 +11,7 @@ cd unixODBC-2.3.4
 ./configure --enable-stats=no --enable-gui=no --enable-drivers=no --enable-iconv --with-iconv-char-enc=UTF8 --with-iconv-ucode-enc=UTF16LE --libdir=/usr/lib/x86_64-linux-gnu --prefix=/usr --sysconfdir=/etc
 ./configure --enable-stats=no --enable-gui=no --enable-drivers=no --enable-iconv --with-iconv-char-enc=UTF8 --with-iconv-ucode-enc=UTF16LE --libdir=/usr/lib/x86_64-linux-gnu --prefix=/usr --sysconfdir=/etc
 sudo make install
 sudo make install
 
 
-sudo apt-get install build-essential
+sudo apt-get install -y build-essential
 sudo apt-get install -y uuid-dev libmyodbc odbc-postgresql
 sudo apt-get install -y uuid-dev libmyodbc odbc-postgresql
 
 
 fw_get -o ffead-cpp-2.0.tar.gz https://github.com/sumeetchhetri/ffead-cpp/releases/download/2.0/ffead-cpp-2.0-te-bin.tar.gz
 fw_get -o ffead-cpp-2.0.tar.gz https://github.com/sumeetchhetri/ffead-cpp/releases/download/2.0/ffead-cpp-2.0-te-bin.tar.gz
@@ -22,9 +22,6 @@ cp -R ffead-cpp-2.0-bin/ ${TROOT}
 mv ${TROOT}/ffead-cpp-2.0-bin ${TROOT}/ffead-cpp-2.0
 mv ${TROOT}/ffead-cpp-2.0-bin ${TROOT}/ffead-cpp-2.0
 rm -rf ffead-cpp-2.0/
 rm -rf ffead-cpp-2.0/
 
 
-sudo chown -R testrunner:testrunner ${TROOT}/ffead-cpp-2.0
-sudo chmod -R g+rw ${TROOT}/ffead-cpp-2.0
-
 sudo sed -i 's|localhost|'${DBHOST}'|g' ${TROOT}/ffead-cpp-2.0/web/te-benchmark/config/sdorm*
 sudo sed -i 's|localhost|'${DBHOST}'|g' ${TROOT}/ffead-cpp-2.0/web/te-benchmark/config/sdorm*
 
 
 sudo rm -f /etc/odbcinst.ini
 sudo rm -f /etc/odbcinst.ini

+ 1 - 1
toolset/setup/linux/frameworks/jester.sh

@@ -14,7 +14,7 @@ cd jester
 # 2015-06-25
 # 2015-06-25
 git checkout 71b8cc069a0d271d619c2dc41bc6479047885587
 git checkout 71b8cc069a0d271d619c2dc41bc6479047885587
 nimble update
 nimble update
-# If /home/testrunner/.nimble/pkgs/jester exists, write over it.
+# If ~/.nimble/pkgs/jester exists, write over it.
 echo 'y' | nimble install
 echo 'y' | nimble install
 
 
 echo "export JESTER_HOME=${JESTER}" > $IROOT/jester.installed
 echo "export JESTER_HOME=${JESTER}" > $IROOT/jester.installed

+ 2 - 0
toolset/setup/linux/frameworks/lwan.sh

@@ -1,5 +1,7 @@
 #!/bin/bash
 #!/bin/bash
 
 
+fw_depends libjemalloc-dev libluajit-5.1-dev
+
 RETCODE=$(fw_exists ${IROOT}/lwan.installed)
 RETCODE=$(fw_exists ${IROOT}/lwan.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \
   source $IROOT/lwan.installed
   source $IROOT/lwan.installed

+ 7 - 12
toolset/setup/linux/frameworks/phalcon.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends php5
+fw_depends php7
 
 
 RETCODE=$(fw_exists ${IROOT}/phalcon.installed)
 RETCODE=$(fw_exists ${IROOT}/phalcon.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \
@@ -10,17 +10,12 @@ RETCODE=$(fw_exists ${IROOT}/phalcon.installed)
 # Enable the PHP phalcon extension
 # Enable the PHP phalcon extension
 sed -i 's|;extension=phalcon.so|extension=phalcon.so|g' $PHP_HOME/lib/php.ini
 sed -i 's|;extension=phalcon.so|extension=phalcon.so|g' $PHP_HOME/lib/php.ini
 
 
-fw_get -O https://github.com/phalcon/cphalcon/archive/phalcon-v2.0.13.tar.gz
-fw_untar phalcon-v2.0.13.tar.gz
-cd cphalcon-phalcon-v2.0.13/build/64bits 
-$PHP_HOME/bin/phpize
-# For some reason we have to point to php-config 
-# explicitly, it's not found by the prefix settings
-./configure --prefix=$PHP_HOME --exec-prefix=$PHP_HOME \
-  --with-php-config=$PHP_HOME/bin/php-config \
-  --enable-phalcon --quiet
-make --quiet
-make install
+VERSION="3.0.2"
+
+fw_get -O https://github.com/phalcon/cphalcon/archive/v${VERSION}.tar.gz
+fw_untar v${VERSION}.tar.gz
+cd cphalcon-${VERSION}/build
+./install
 
 
 echo "" > $IROOT/phalcon.installed
 echo "" > $IROOT/phalcon.installed
 
 

+ 1 - 1
toolset/setup/linux/frameworks/silicon.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends clang-3.8 gcc-4.9
+fw_depends libboost-dev clang-3.8 gcc-4.9
 
 
 RETCODE=$(fw_exists ${IROOT}/silicon.installed)
 RETCODE=$(fw_exists ${IROOT}/silicon.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \

+ 0 - 113
toolset/setup/linux/installer.py

@@ -1,113 +0,0 @@
-import subprocess
-import os
-import os.path
-import time
-import traceback
-import sys
-import glob
-import logging
-import setup_util
-
-from benchmark.utils import gather_tests
-
-class Installer:
-
-  ############################################################
-  # install_software
-  ############################################################
-  def install_software(self):
-    linux_install_root = self.fwroot + "/toolset/setup/linux"
-    imode = self.benchmarker.install
-
-    script_vars = {
-      'TFB_DBHOST': self.benchmarker.database_host
-    }
-    l=[]
-    for k,v in script_vars.iteritems():
-      l.append("export %s=%s" % (k,v))
-    script_vars_str = "\n".join(l) + "\n\n"
-    
-    if imode == 'all' or imode == 'database':
-      print("\nINSTALL: Installing database software\n")   
-      self.__run_command("cd .. && " + self.benchmarker.database_sftp_string(batch_file="../config/database_sftp_batch"), True)
-      with open (linux_install_root + "/database.sh", "r") as myfile:
-        print("\nINSTALL: %s" % self.benchmarker.database_ssh_string)
-        p = subprocess.Popen(self.benchmarker.database_ssh_string.split(" ") +
-                             ["bash"], stdin=subprocess.PIPE)
-        remote_script = myfile.read()
-        p.communicate(script_vars_str + remote_script)
-        returncode = p.returncode
-        if returncode != 0:
-          self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.database_ssh_string))
-      print("\nINSTALL: Finished installing database software\n")
-
-    if imode == 'all' or imode == 'client':
-      print("\nINSTALL: Installing client software\n")    
-      with open (linux_install_root + "/client.sh", "r") as myfile:
-        remote_script=myfile.read()
-        print("\nINSTALL: %s" % self.benchmarker.client_ssh_string)
-        p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" ") + ["bash"], stdin=subprocess.PIPE)
-        p.communicate(remote_script)
-        returncode = p.returncode
-        if returncode != 0:
-          self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.client_ssh_string))
-      print("\nINSTALL: Finished installing client software\n")
-  ############################################################
-  # End install_software
-  ############################################################
-
-  ############################################################
-  # __install_error
-  ############################################################
-  def __install_error(self, message):
-    print("\nINSTALL ERROR: %s\n" % message)
-    if self.benchmarker.install_error_action == 'abort':
-      sys.exit("Installation aborted.")
-  ############################################################
-  # End __install_error
-  ############################################################
-
-  ############################################################
-  # __run_command
-  ############################################################
-  def __run_command(self, command, send_yes=False, cwd=None):
-    if cwd is None: 
-        cwd = self.install_dir
-
-    if send_yes:
-      command = "yes yes | " + command
-        
-    rel_cwd = setup_util.path_relative_to_root(cwd)
-    print("INSTALL: %s (cwd=$FWROOT/%s)" % (command, rel_cwd))
-
-    try:
-      subprocess.check_call(command, shell=True, cwd=cwd, executable='/bin/bash')
-    except:
-      exceptionType, exceptionValue, exceptionTraceBack = sys.exc_info()
-      error_message = "".join(traceback.format_exception_only(exceptionType, exceptionValue))
-      self.__install_error(error_message)
-  ############################################################
-  # End __run_command
-  ############################################################
-
-  ############################################################
-  # __init__(benchmarker)
-  ############################################################
-  def __init__(self, benchmarker, install_strategy):
-    self.benchmarker = benchmarker
-    self.install_dir = "installs"
-    self.fwroot = benchmarker.fwroot
-    self.strategy = install_strategy
-    
-    # setup logging
-    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
-
-    try:
-      os.mkdir(self.install_dir)
-    except OSError:
-      pass
-  ############################################################
-  # End __init__
-  ############################################################
-
-# vim: sw=2

+ 4 - 2
toolset/setup/linux/languages/dlang.sh

@@ -1,10 +1,12 @@
 #!/bin/bash
 #!/bin/bash
 
 
+fw_depends xdg-utils
+
 RETCODE=$(fw_exists ${IROOT}/dlang.installed)
 RETCODE=$(fw_exists ${IROOT}/dlang.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \
   source $IROOT/dlang.installed
   source $IROOT/dlang.installed
-  return 0; }
-  
+  return 0; }  
+
 DLANG=$IROOT/dlang
 DLANG=$IROOT/dlang
 DMDVER="2.071.1"
 DMDVER="2.071.1"
 LDCVER="1.0.0"
 LDCVER="1.0.0"

+ 2 - 2
toolset/setup/linux/languages/php5.sh

@@ -22,8 +22,8 @@ cd php5
 echo "Configuring PHP5 quietly..."
 echo "Configuring PHP5 quietly..."
 ./configure --prefix=$PHP_HOME --with-pdo-mysql \
 ./configure --prefix=$PHP_HOME --with-pdo-mysql \
   --with-mysql --with-mcrypt --enable-intl --enable-mbstring \
   --with-mysql --with-mcrypt --enable-intl --enable-mbstring \
-  --enable-fpm --with-fpm-user=testrunner --with-fpm-group=testrunner \
-  --with-openssl --with-mysqli --with-zlib --enable-opcache --quiet
+  --enable-fpm --with-openssl --with-mysqli --with-zlib \
+  --enable-opcache --quiet
 echo "Making PHP5 quietly..."
 echo "Making PHP5 quietly..."
 make --quiet
 make --quiet
 echo "Installing PHP5 quietly"
 echo "Installing PHP5 quietly"

+ 2 - 2
toolset/setup/linux/languages/php7.sh

@@ -17,8 +17,8 @@ cd php7
 echo "Configuring PHP quietly..."
 echo "Configuring PHP quietly..."
 ./configure --prefix=$PHP_HOME --with-pdo-mysql \
 ./configure --prefix=$PHP_HOME --with-pdo-mysql \
   --with-mcrypt --enable-intl --enable-mbstring \
   --with-mcrypt --enable-intl --enable-mbstring \
-  --enable-fpm --with-fpm-user=testrunner --with-fpm-group=testrunner \
-  --with-openssl --with-mysqli --with-zlib --enable-opcache --quiet
+  --enable-fpm --with-openssl --with-mysqli \
+  --with-zlib --enable-opcache --quiet
 echo "Making PHP quietly..."
 echo "Making PHP quietly..."
 make --quiet
 make --quiet
 echo "Installing PHP quietly"
 echo "Installing PHP quietly"

+ 1 - 1
toolset/setup/linux/languages/ruby-2.0.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends rvm
+fw_depends llvm-dev rvm
 
 
 RETCODE=$(fw_exists ${IROOT}/ruby-2.0.installed)
 RETCODE=$(fw_exists ${IROOT}/ruby-2.0.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \

+ 1 - 1
toolset/setup/linux/languages/ruby-2.1.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends rvm
+fw_depends llvm-dev rvm
 
 
 RETCODE=$(fw_exists ${IROOT}/ruby-2.1.installed)
 RETCODE=$(fw_exists ${IROOT}/ruby-2.1.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \

+ 1 - 1
toolset/setup/linux/languages/ruby-2.2.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-fw_depends rvm
+fw_depends llvm-dev rvm
 
 
 RETCODE=$(fw_exists ${IROOT}/ruby-2.2.installed)
 RETCODE=$(fw_exists ${IROOT}/ruby-2.2.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \

+ 1 - 1
toolset/setup/linux/languages/rust.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 #!/bin/bash
 
 
-RUST_VERSION="1.9.0"
+RUST_VERSION="1.13.0"
 
 
 RETCODE=$(fw_exists $IROOT/rust.installed)
 RETCODE=$(fw_exists $IROOT/rust.installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \

+ 1 - 9
toolset/setup/linux/prerequisites.sh

@@ -9,8 +9,7 @@ RETCODE=$(fw_exists fwbm_prereqs_installed)
 [ ! "$RETCODE" == 0 ] || { \
 [ ! "$RETCODE" == 0 ] || { \
   echo "Prerequisites installed!"; 
   echo "Prerequisites installed!"; 
   return 0; }
   return 0; }
-
-
+  
 # Use a more recent version of Mongo shell
 # Use a more recent version of Mongo shell
 sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
 sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
 echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
 echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
@@ -47,14 +46,7 @@ sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options:
   liborc-0.4-0 libwxbase2.8-0 libwxgtk2.8-0 libgnutls-dev \
   liborc-0.4-0 libwxbase2.8-0 libwxgtk2.8-0 libgnutls-dev \
   libjson0-dev libmcrypt-dev libicu-dev gettext \
   libjson0-dev libmcrypt-dev libicu-dev gettext \
   libpq-dev mlton \
   libpq-dev mlton \
-  libjemalloc-dev libluajit-5.1-dev `# Needed by lwan at least` \
-  libhiredis-dev                    `# Redis client - Needed by ngx_mruby at least` \
   cloc dstat                        `# Collect resource usage statistics` \
   cloc dstat                        `# Collect resource usage statistics` \
-  libsasl2-dev                      `# Needed by mgo for go-mongodb test` \
-  llvm-dev                          `# Required for correct Ruby installation` \
-  libboost-dev                      `# Silicon relies on boost::lexical_cast.` \
-  postgresql-server-dev-9.3         `# Needed by cpoll.` \
-  xdg-utils                         `# Needed by dlang.` \
   python-pip
   python-pip
 
 
 sudo pip install colorama==0.3.1
 sudo pip install colorama==0.3.1