Browse Source

implement test types 1, 2, 3, 5 and 6 with Scala, Spray + Elasticsearch
upgrade sbt

marko asplund 10 years ago
parent
commit
5af5f372fd

+ 1 - 0
.travis.yml

@@ -140,6 +140,7 @@ env:
     - "TESTDIR=Scala/scalatra"
     - "TESTDIR=Scala/scalatra"
     - "TESTDIR=Scala/scruffy"
     - "TESTDIR=Scala/scruffy"
     - "TESTDIR=Scala/spray"
     - "TESTDIR=Scala/spray"
+    - "TESTDIR=Scala/spray-es"
     - "TESTDIR=Scala/unfiltered"
     - "TESTDIR=Scala/unfiltered"
     - "TESTDIR=Ur/urweb"
     - "TESTDIR=Ur/urweb"
 
 

+ 2 - 0
config/database_sftp_batch

@@ -13,5 +13,7 @@ put config/mysql
 put config/mysql.conf
 put config/mysql.conf
 -mkdir cassandra
 -mkdir cassandra
 put -r config/cassandra
 put -r config/cassandra
+-mkdir elasticsearch
+put -r config/elasticsearch
 put config/redis.conf
 put config/redis.conf
 put config/create-redis.sh
 put config/create-redis.sh

+ 24 - 0
config/elasticsearch/elasticsearch

@@ -0,0 +1,24 @@
+#!/bin/bash
+
+start() {
+  /opt/elasticsearch/bin/elasticsearch -d -p /ssd/elasticsearch/es.pid
+}
+
+stop() {
+  kill -HUP `cat /ssd/elasticsearch/es.pid`
+}
+
+case "$1" in
+  start)
+    start
+    ;;
+  stop)
+    stop
+    ;;
+  restart)
+    stop
+    sleep 10
+    start
+    ;;
+esac
+

+ 389 - 0
config/elasticsearch/elasticsearch.yml

@@ -0,0 +1,389 @@
+##################### Elasticsearch Configuration Example #####################
+
+# This file contains an overview of various configuration settings,
+# targeted at operations staff. Application developers should
+# consult the guide at <http://elasticsearch.org/guide>.
+#
+# The installation procedure is covered at
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
+#
+# Elasticsearch comes with reasonable defaults for most settings,
+# so you can try it out without bothering with configuration.
+#
+# Most of the time, these defaults are just fine for running a production
+# cluster. If you're fine-tuning your cluster, or wondering about the
+# effect of certain configuration option, please _do ask_ on the
+# mailing list or IRC channel [http://elasticsearch.org/community].
+
+# Any element in the configuration can be replaced with environment variables
+# by placing them in ${...} notation. For example:
+#
+#node.rack: ${RACK_ENV_VAR}
+
+# For information on supported formats and syntax for the config file, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
+
+
+################################### Cluster ###################################
+
+# Cluster name identifies your cluster for auto-discovery. If you're running
+# multiple clusters on the same network, make sure you're using unique names.
+#
+#cluster.name: elasticsearch
+
+
+#################################### Node #####################################
+
+# Node names are generated dynamically on startup, so you're relieved
+# from configuring them manually. You can tie this node to a specific name:
+#
+#node.name: "Franz Kafka"
+
+# Every node can be configured to allow or deny being eligible as the master,
+# and to allow or deny to store the data.
+#
+# Allow this node to be eligible as a master node (enabled by default):
+#
+#node.master: true
+#
+# Allow this node to store data (enabled by default):
+#
+#node.data: true
+
+# You can exploit these settings to design advanced cluster topologies.
+#
+# 1. You want this node to never become a master node, only to hold data.
+#    This will be the "workhorse" of your cluster.
+#
+#node.master: false
+#node.data: true
+#
+# 2. You want this node to only serve as a master: to not store any data and
+#    to have free resources. This will be the "coordinator" of your cluster.
+#
+#node.master: true
+#node.data: false
+#
+# 3. You want this node to be neither master nor data node, but
+#    to act as a "search load balancer" (fetching data from nodes,
+#    aggregating results, etc.)
+#
+#node.master: false
+#node.data: false
+
+# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
+# Node Info API [http://localhost:9200/_nodes] or GUI tools
+# such as <http://www.elasticsearch.org/overview/marvel/>,
+# <http://github.com/karmi/elasticsearch-paramedic>,
+# <http://github.com/lukas-vlcek/bigdesk> and
+# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
+
+# A node can have generic attributes associated with it, which can later be used
+# for customized shard allocation filtering, or allocation awareness. An attribute
+# is a simple key value pair, similar to node.key: value, here is an example:
+#
+#node.rack: rack314
+
+# By default, multiple nodes are allowed to start from the same installation location
+# to disable it, set the following:
+#node.max_local_storage_nodes: 1
+
+
+#################################### Index ####################################
+
+# You can set a number of options (such as shard/replica options, mapping
+# or analyzer definitions, translog settings, ...) for indices globally,
+# in this file.
+#
+# Note, that it makes more sense to configure index settings specifically for
+# a certain index, either when creating it or by using the index templates API.
+#
+# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
+# for more information.
+
+# Set the number of shards (splits) of an index (5 by default):
+#
+#index.number_of_shards: 5
+
+# Set the number of replicas (additional copies) of an index (1 by default):
+#
+#index.number_of_replicas: 1
+
+# Note, that for development on a local machine, with small indices, it usually
+# makes sense to "disable" the distributed features:
+#
+#index.number_of_shards: 1
+#index.number_of_replicas: 0
+
+# These settings directly affect the performance of index and search operations
+# in your cluster. Assuming you have enough machines to hold shards and
+# replicas, the rule of thumb is:
+#
+# 1. Having more *shards* enhances the _indexing_ performance and allows to
+#    _distribute_ a big index across machines.
+# 2. Having more *replicas* enhances the _search_ performance and improves the
+#    cluster _availability_.
+#
+# The "number_of_shards" is a one-time setting for an index.
+#
+# The "number_of_replicas" can be increased or decreased anytime,
+# by using the Index Update Settings API.
+#
+# Elasticsearch takes care about load balancing, relocating, gathering the
+# results from nodes, etc. Experiment with different settings to fine-tune
+# your setup.
+
+# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
+# the index status.
+
+
+#################################### Paths ####################################
+
+# Path to directory containing configuration (this file and logging.yml):
+#
+#path.conf: /path/to/conf
+
+# Path to directory where to store index data allocated for this node.
+#
+#path.data: /path/to/data
+path.data: /ssd/elasticsearch/data
+
+#
+# Can optionally include more than one location, causing data to be striped across
+# the locations (a la RAID 0) on a file level, favouring locations with most free
+# space on creation. For example:
+#
+#path.data: /path/to/data1,/path/to/data2
+
+# Path to temporary files:
+#
+#path.work: /path/to/work
+path.work: /ssd/elasticsearch/work
+
+# Path to log files:
+#
+#path.logs: /path/to/logs
+path.logs: /ssd/log/elasticsearch
+
+# Path to where plugins are installed:
+#
+#path.plugins: /path/to/plugins
+
+
+#################################### Plugin ###################################
+
+# If a plugin listed here is not installed for current node, the node will not start.
+#
+#plugin.mandatory: mapper-attachments,lang-groovy
+
+
+################################### Memory ####################################
+
+# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
+# it _never_ swaps.
+#
+# Set this property to true to lock the memory:
+#
+#bootstrap.mlockall: true
+
+# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
+# to the same value, and that the machine has enough memory to allocate
+# for Elasticsearch, leaving enough memory for the operating system itself.
+#
+# You should also make sure that the Elasticsearch process is allowed to lock
+# the memory, eg. by using `ulimit -l unlimited`.
+
+
+############################## Network And HTTP ###############################
+
+# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
+# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
+# communication. (the range means that if the port is busy, it will automatically
+# try the next port).
+
+# Set the bind address specifically (IPv4 or IPv6):
+#
+#network.bind_host: 192.168.0.1
+
+# Set the address other nodes will use to communicate with this node. If not
+# set, it is automatically derived. It must point to an actual IP address.
+#
+#network.publish_host: 192.168.0.1
+
+# Set both 'bind_host' and 'publish_host':
+#
+#network.host: 192.168.0.1
+
+# Set a custom port for the node to node communication (9300 by default):
+#
+#transport.tcp.port: 9300
+
+# Enable compression for all communication between nodes (disabled by default):
+#
+#transport.tcp.compress: true
+
+# Set a custom port to listen for HTTP traffic:
+#
+#http.port: 9200
+
+# Set a custom allowed content length:
+#
+#http.max_content_length: 100mb
+
+# Disable HTTP completely:
+#
+#http.enabled: false
+
+
+################################### Gateway ###################################
+
+# The gateway allows for persisting the cluster state between full cluster
+# restarts. Every change to the state (such as adding an index) will be stored
+# in the gateway, and when the cluster starts up for the first time,
+# it will read its state from the gateway.
+
+# There are several types of gateway implementations. For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
+
+# The default gateway type is the "local" gateway (recommended):
+#
+#gateway.type: local
+
+# Settings below control how and when to start the initial recovery process on
+# a full cluster restart (to reuse as much local data as possible when using shared
+# gateway).
+
+# Allow recovery process after N nodes in a cluster are up:
+#
+#gateway.recover_after_nodes: 1
+
+# Set the timeout to initiate the recovery process, once the N nodes
+# from previous setting are up (accepts time value):
+#
+#gateway.recover_after_time: 5m
+
+# Set how many nodes are expected in this cluster. Once these N nodes
+# are up (and recover_after_nodes is met), begin recovery process immediately
+# (without waiting for recover_after_time to expire):
+#
+#gateway.expected_nodes: 2
+
+
+############################# Recovery Throttling #############################
+
+# These settings allow to control the process of shards allocation between
+# nodes during initial recovery, replica allocation, rebalancing,
+# or when adding and removing nodes.
+
+# Set the number of concurrent recoveries happening on a node:
+#
+# 1. During the initial recovery
+#
+#cluster.routing.allocation.node_initial_primaries_recoveries: 4
+#
+# 2. During adding/removing nodes, rebalancing, etc
+#
+#cluster.routing.allocation.node_concurrent_recoveries: 2
+
+# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
+#
+#indices.recovery.max_bytes_per_sec: 20mb
+
+# Set to limit the number of open concurrent streams when
+# recovering a shard from a peer:
+#
+#indices.recovery.concurrent_streams: 5
+
+
+################################## Discovery ##################################
+
+# Discovery infrastructure ensures nodes can be found within a cluster
+# and master node is elected. Multicast discovery is the default.
+
+# Set to ensure a node sees N other master eligible nodes to be considered
+# operational within the cluster. This should be set to a quorum/majority of 
+# the master-eligible nodes in the cluster.
+#
+#discovery.zen.minimum_master_nodes: 1
+
+# Set the time to wait for ping responses from other nodes when discovering.
+# Set this option to a higher value on a slow or congested network
+# to minimize discovery failures:
+#
+#discovery.zen.ping.timeout: 3s
+
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
+
+# Unicast discovery allows to explicitly control which nodes will be used
+# to discover the cluster. It can be used when multicast is not present,
+# or to restrict the cluster communication-wise.
+#
+# 1. Disable multicast discovery (enabled by default):
+#
+#discovery.zen.ping.multicast.enabled: false
+#
+# 2. Configure an initial list of master nodes in the cluster
+#    to perform discovery when new nodes (master or data) are started:
+#
+#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
+
+# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
+#
+# You have to install the cloud-aws plugin for enabling the EC2 discovery.
+#
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
+#
+# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
+# for a step-by-step tutorial.
+
+# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
+#
+# You have to install the cloud-gce plugin for enabling the GCE discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
+
+# Azure discovery allows to use Azure API in order to perform discovery.
+#
+# You have to install the cloud-azure plugin for enabling the Azure discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
+
+################################## Slow Log ##################################
+
+# Shard level query and fetch threshold logging.
+
+#index.search.slowlog.threshold.query.warn: 10s
+#index.search.slowlog.threshold.query.info: 5s
+#index.search.slowlog.threshold.query.debug: 2s
+#index.search.slowlog.threshold.query.trace: 500ms
+
+#index.search.slowlog.threshold.fetch.warn: 1s
+#index.search.slowlog.threshold.fetch.info: 800ms
+#index.search.slowlog.threshold.fetch.debug: 500ms
+#index.search.slowlog.threshold.fetch.trace: 200ms
+
+#index.indexing.slowlog.threshold.index.warn: 10s
+#index.indexing.slowlog.threshold.index.info: 5s
+#index.indexing.slowlog.threshold.index.debug: 2s
+#index.indexing.slowlog.threshold.index.trace: 500ms
+
+################################## GC Logging ################################
+
+#monitor.jvm.gc.young.warn: 1000ms
+#monitor.jvm.gc.young.info: 700ms
+#monitor.jvm.gc.young.debug: 400ms
+
+#monitor.jvm.gc.old.warn: 10s
+#monitor.jvm.gc.old.info: 5s
+#monitor.jvm.gc.old.debug: 2s
+
+################################## Security ################################
+
+# Uncomment if you want to enable JSONP as a valid return transport on the
+# http server. With this enabled, it may pose a security risk, so disabling
+# it unless you need it is recommended (it is disabled by default).
+#
+#http.jsonp.enable: true

+ 21 - 0
config/elasticsearch/es-create-index.sh

@@ -0,0 +1,21 @@
+#!/bin/bash
+
+curl -XDELETE http://localhost:9200/tfb
+
+curl -XPUT 'http://localhost:9200/tfb' -d '
+{
+  "settings": {
+    "index": {
+      "number_of_shards": 1,
+      "number_of_replicas": 1
+    }
+  },
+  "mappings": {
+    "world": {
+      "properties": {
+        "randomNumber": { "type" : "integer", "index" : "not_analyzed" }
+      }
+    }
+  }
+}
+'

+ 5 - 0
config/elasticsearch/es-db-data-gen.py

@@ -0,0 +1,5 @@
+from random import randint
+
+for i in range(1, 10001):
+  print """{ "index" : { "_id" : "%s" } }
+{ "randomNumber" : %s }""" % (i, randint(1, 10000))

+ 11 - 0
frameworks/Scala/spray-es/README.md

@@ -0,0 +1,11 @@
+
+# Spray benchmarking test
+
+Framework permutation based on the following technology stack
+
+* Scala
+* spray
+* Elasticsearch
+* json4s-jackson for JSON processing
+
+Currently implements test types 1, 2, 3, 5 and 6.

+ 44 - 0
frameworks/Scala/spray-es/benchmark_config

@@ -0,0 +1,44 @@
+{
+  "framework": "spray-es",
+  "tests": [{
+    "default": {
+      "setup_file": "setup",
+      "json_url": "/json",
+      "plaintext_url": "/plaintext",
+      "port": 8080,
+      "approach": "Realistic",
+      "classification": "Platform",
+      "database": "None",
+      "framework": "spray-es",
+      "language": "Scala",
+      "orm": "Raw",
+      "platform": "Spray",
+      "webserver": "Spray",
+      "os": "Linux",
+      "database_os": "Linux",
+      "display_name": "spray-es",
+      "notes": "",
+      "versus": ""
+    },
+    "raw": {
+      "setup_file": "setup",
+      "db_url": "/db",
+      "query_url": "/queries?queries=",
+      "update_url": "/updates?queries=",
+      "port": 8080,
+      "approach": "Realistic",
+      "classification": "Platform",
+      "database": "Elasticsearch",
+      "framework": "spray-es",
+      "language": "Scala",
+      "orm": "Raw",
+      "platform": "Spray",
+      "webserver": "Spray",
+      "os": "Linux",
+      "database_os": "Linux",
+      "display_name": "spray-es",
+      "notes": "",
+      "versus": ""
+    }
+  }]
+}

+ 29 - 0
frameworks/Scala/spray-es/build.sbt

@@ -0,0 +1,29 @@
+organization  := "fi.markoa"
+
+version       := "0.1"
+
+scalaVersion  := "2.11.6"
+
+scalacOptions := Seq("-encoding", "utf8")
+
+libraryDependencies ++= {
+  val akkaV = "2.3.9"
+  val sprayV = "1.3.3"
+  val json4sV = "3.2.11"
+  Seq(
+    "io.spray" %% "spray-can" % sprayV,
+    "io.spray" %% "spray-routing" % sprayV,
+    "io.spray" %% "spray-client" % sprayV,
+    "org.json4s" %% "json4s-jackson" % json4sV,
+    "org.json4s" %% "json4s-ext" % json4sV,
+    "com.typesafe.akka" %%  "akka-actor" % akkaV,
+    "com.typesafe" % "config" % "1.2.1",
+    "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0",
+    "ch.qos.logback" % "logback-classic" % "1.1.3",
+    "io.spray" %% "spray-testkit" % sprayV % "test",
+    "com.typesafe.akka" %%  "akka-testkit" % akkaV % "test",
+    "org.specs2" %% "specs2-core" % "2.3.11" % "test"
+  )
+}
+
+Revolver.settings

+ 12 - 0
frameworks/Scala/spray-es/install.sh

@@ -0,0 +1,12 @@
+#!/bin/bash
+
+fw_depends java8 sbt
+
+export JAVA_HOME=/opt/java8
+export PATH=$JAVA_HOME/bin:$PATH:$IROOT/sbt/bin
+
+sbt_opts='-J-XX:+UseG1GC -J-Xmx2G -J-XX:MaxMetaspaceSize=512m'
+
+cd $TROOT
+sbt $sbt_opts package
+sbt $sbt_opts assembly

+ 1 - 0
frameworks/Scala/spray-es/project/build.properties

@@ -0,0 +1 @@
+sbt.version=0.13.8

+ 3 - 0
frameworks/Scala/spray-es/project/plugins.sbt

@@ -0,0 +1,3 @@
+addSbtPlugin("io.spray" % "sbt-revolver" % "0.7.2")
+
+addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.13.0")

+ 3 - 0
frameworks/Scala/spray-es/setup.sh

@@ -0,0 +1,3 @@
+#!/bin/bash
+
+/opt/java8/bin/java -Dtfb.db_host=$DBHOST -jar target/scala-2.11/spray-es-assembly-0.1.jar &

+ 18 - 0
frameworks/Scala/spray-es/src/main/resources/application.conf

@@ -0,0 +1,18 @@
+akka {
+  loglevel = info
+}
+
+tfb {
+  db_host = 127.0.0.1
+}
+
+spray.can {
+  server {
+    request-timeout = 2s
+  }
+  host-connector {
+    max-connections = 10
+    max-retries = 1
+    pipelining = off
+  }
+}

+ 133 - 0
frameworks/Scala/spray-es/src/main/scala/fi/markoa/tfb/spray_es/BenchmarkService.scala

@@ -0,0 +1,133 @@
+package fi.markoa.tfb.spray_es
+
+import java.util.concurrent.ThreadLocalRandom
+
+import com.typesafe.config.ConfigFactory
+import com.typesafe.scalalogging.Logger
+import org.json4s.Extraction
+import org.json4s.JsonAST.JInt
+import org.slf4j.LoggerFactory
+import spray.http.HttpHeaders.{`Content-Type`, `Content-Length`}
+import spray.routing.HttpService
+
+import akka.actor.Actor
+import spray.http._
+import MediaTypes._
+
+import scala.util.{Try, Failure, Success}
+
+import scala.concurrent.Future
+
+
+class BenchmarkServiceActor extends Actor with BenchmarkService {
+  def actorRefFactory = context
+  def receive = runRoute(myRoute)
+}
+
+case class HelloMessage(message: String)
+case class World(id: Int, randomNumber: Int)
+
+trait BenchmarkService extends HttpService {
+  import scala.concurrent._
+  import org.json4s.jackson.JsonMethods._
+  import spray.http._
+  import spray.client.pipelining._
+  import Boot.system.dispatcher
+  implicit val formats = org.json4s.DefaultFormats
+  val logger = Logger(LoggerFactory.getLogger(classOf[BenchmarkService]))
+  val pipeline: HttpRequest => Future[HttpResponse] = sendReceive
+  val config = ConfigFactory.load()
+  val tfbDbHost = config.getString("tfb.db_host")
+  val baseUrl = s"http://$tfbDbHost:9200/tfb/world"
+  val MinWorldId = 1
+  val MaxWorldId = 10000
+  val MinQueries = 1
+  val MaxQueries = 500
+
+  val helloPlain = "Hello, World!"
+
+  val myRoute =
+    get {
+      path("plaintext") { // Test type 6: Plaintext
+        respondWithMediaType(`text/plain`) {
+          complete(helloPlain)
+        }
+      } ~
+      path("json") { // Test type 1: JSON serialization
+        respondWithMediaType(`application/json`) {
+          complete(compact(Extraction.decompose(HelloMessage(helloPlain))))
+        }
+      } ~
+      path("db") { // Test type 2: Single database query
+        respondWithMediaType(`application/json`) {
+          onComplete(getRandomWorldFuture) {
+            case Success(Right(r)) => complete(compact(Extraction.decompose(r)))
+            case Success(Left(msg)) => complete(StatusCodes.InternalServerError, msg)
+            case Failure(ex) => failWith(ex)
+          }
+        }
+      } ~
+      path("queries") { // Test type 3: Multiple database queries
+        parameter("queries".?) { queries =>
+          respondWithMediaType(`application/json`) {
+            onComplete(getWorldQueries(queries)) {
+              case Success(s) =>
+                complete((s collect {
+                  case Right(w) => compact(Extraction.decompose(w))
+                  case Left(msg) => logger.error(msg)
+                }) mkString("[", ",", "]"))
+              case Failure(ex) => failWith(ex)
+            }
+          }
+        }
+      } ~
+      path("updates") { // Test type 5: Database updates
+        parameter("queries".?) { queries =>
+          respondWithMediaType(`application/json`) {
+            onComplete(getRandomInts(getQueriesParameter(queries), MinWorldId, MaxWorldId) zip getWorldQueries(queries)) {
+              case Success((randoms, queryResults)) =>
+                val results = (queryResults zip randoms).foldLeft((List[String](), List[String]())) { (acc, i) => i match {
+                  case (Right(world), rand) =>
+                    (compact(Extraction.decompose(world.copy(randomNumber = rand))) :: acc._1,
+                      raw"""{ "index" : { "_id" : "${world.id}" } }
+{ "randomNumber" : $rand }""" :: acc._2)
+                  case _ => ("" :: acc._1, "" :: acc._2)
+                  }
+                }
+                pipeline(Post(s"$baseUrl/_bulk", results._2.mkString("\n")))
+                complete(results._1.mkString("[", ",", "]"))
+              case Failure(ex) => failWith(ex)
+            }
+          }
+        }
+      }
+    }
+
+  def getRandomInts(count: Int, min: Int, max: Int) =
+    Future { 1 to count map { i => ThreadLocalRandom.current.nextInt(min, max+1) } }
+
+  def getWorldQueries(queries: Option[String]) =
+    Future.sequence(1 to getQueriesParameter(queries) map (i => getRandomWorldFuture))
+
+  def getQueriesParameter(queries: Option[String]) =
+    (queries map (m => Try(m.toInt).map { i =>
+      if (i < MinQueries) MinQueries
+      else if (i > MaxQueries) MaxQueries
+      else i
+    }.getOrElse(MinQueries))).getOrElse(MinQueries)
+
+  def getRandomWorldFuture = {
+    (for {
+      id <- Future { ThreadLocalRandom.current.nextInt(1, MaxWorldId+1) }
+      res <- pipeline(Get(s"$baseUrl/$id"))
+    } yield (id, res)) map { r =>
+      if(r._2.status.intValue == 200) {
+        parseOpt(r._2.entity.asString) map (c => c \ "_source" \ "randomNumber") match {
+          case Some(JInt(rand)) => Right(World(r._1, rand.toInt))
+          case _ => Left(s"parse error ${r._1}")
+        }
+      } else Left(s"error: id: ${r._1}, ${r._2.status}")
+    }
+  }
+
+}

+ 21 - 0
frameworks/Scala/spray-es/src/main/scala/fi/markoa/tfb/spray_es/Boot.scala

@@ -0,0 +1,21 @@
+package fi.markoa.tfb.spray_es
+
+import akka.actor.{Props, ActorSystem}
+import akka.io.IO
+import akka.util.Timeout
+import akka.pattern.ask
+import com.typesafe.scalalogging.Logger
+import org.slf4j.LoggerFactory
+import spray.can.Http
+import scala.concurrent.duration._
+
+object Boot extends App {
+  val logger = Logger(LoggerFactory.getLogger(Boot.getClass))
+
+  implicit val system = ActorSystem("tfb-service")
+
+  val service = system.actorOf(Props[BenchmarkServiceActor], "tfb-service")
+
+  implicit val timeout = Timeout(5.seconds)
+  IO(Http) ? Http.Bind(service, interface = "0.0.0.0", port = 8080)
+}

+ 3 - 1
toolset/benchmark/benchmarker.py

@@ -546,6 +546,7 @@ class Benchmarker:
             sudo service redis-server restart
             sudo service redis-server restart
             sudo service postgresql restart
             sudo service postgresql restart
             sudo service cassandra restart
             sudo service cassandra restart
+            /opt/elasticsearch/elasticsearch restart
           """)
           """)
           time.sleep(10)
           time.sleep(10)
 
 
@@ -554,7 +555,8 @@ class Benchmarker:
             ("mongodb", self.database_host, 27017),
             ("mongodb", self.database_host, 27017),
             ("redis", self.database_host, 6379),
             ("redis", self.database_host, 6379),
             ("postgresql", self.database_host, 5432),
             ("postgresql", self.database_host, 5432),
-            ("cassandra", self.database_host, 9160)
+            ("cassandra", self.database_host, 9160),
+            ("elasticsearch", self.database_host, 9200)
           ])
           ])
           print "database connection test results:\n" + "\n".join(st[1])
           print "database connection test results:\n" + "\n".join(st[1])
 
 

+ 23 - 1
toolset/run-ci.py

@@ -34,7 +34,7 @@ class CIRunnner:
   Only verifies the first test in each directory 
   Only verifies the first test in each directory 
   '''
   '''
 
 
-  SUPPORTED_DATABASES = "mysql postgres mongodb cassandra sqlite none".split()
+  SUPPORTED_DATABASES = "mysql postgres mongodb cassandra elasticsearch sqlite none".split()
   
   
   def __init__(self, mode, testdir=None):
   def __init__(self, mode, testdir=None):
     '''
     '''
@@ -429,6 +429,28 @@ class CIRunnner:
       >&2 echo "Cassandra did not start, skipping"
       >&2 echo "Cassandra did not start, skipping"
     fi
     fi
 
 
+    # Setup Elasticsearch
+    curl -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.5.0.deb
+    sudo dpkg -i --force-confnew elasticsearch-1.5.0.deb
+    sudo update-rc.d elasticsearch defaults 95 10
+    sudo service elasticsearch restart
+
+    echo "Populating Elasticsearch database"
+    for i in {1..45}; do
+      nc -z localhost 9200 && break || sleep 1;
+      echo "Waiting for Elasticsearch ($i/45}"
+    done
+    nc -z localhost 9200
+    if [ $? -eq 0 ]; then
+      curl localhost:9200
+      sh config/elasticsearch/es-create-index.sh
+      python config/elasticsearch/es-db-data-gen.py > config/elasticsearch/tfb-data.json
+      curl -sS -D - -o /dev/null -XPOST localhost:9200/tfb/world/_bulk --data-binary @config/elasticsearch/tfb-data.json
+      echo "Elasticsearch DB populated"
+    else
+      >&2 echo "Elasticsearch did not start, skipping"
+    fi
+
     # Setup MongoDB
     # Setup MongoDB
     echo "Populating MongoDB database"
     echo "Populating MongoDB database"
     for i in {1..45}; do
     for i in {1..45}; do

+ 32 - 0
toolset/setup/linux/database.sh

@@ -210,6 +210,38 @@ else
   >&2 echo "Cassandra did not start, skipping"
   >&2 echo "Cassandra did not start, skipping"
 fi
 fi
 
 
+##############################
+# Elasticsearch
+##############################
+echo "Setting up Elasticsearch"
+
+export ES_V=1.5.0
+wget -nv https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-$ES_V.tar.gz
+sudo tar zxf elasticsearch-$ES_V.tar.gz -C /opt
+sudo ln -s /opt/elasticsearch-$ES_V /opt/elasticsearch
+
+rm -rf /ssd/elasticsearch /ssd/log/elasticsearch
+mkdir -p /ssd/elasticsearch /ssd/log/elasticsearch
+
+sudo cp elasticsearch/elasticsearch.yml /opt/elasticsearch/config
+sudo cp elasticsearch/elasticsearch /opt/elasticsearch
+
+/opt/elasticsearch/elasticsearch restart
+
+for i in {1..45}; do
+  nc -z $TFB_DBHOST 9200 && break || sleep 1;
+  echo "Waiting for Elasticsearch ($i/45}"
+done
+nc -z $TFB_DBHOST 9200
+if [ $? -eq 0 ]; then
+  sh elasticsearch/es-create-index.sh
+  python elasticsearch/es-db-data-gen.py > elasticsearch/tfb-data.json
+  curl -sS -D - -o /dev/null -XPOST localhost:9200/tfb/world/_bulk --data-binary @elasticsearch/tfb-data.json
+  echo "Elasticsearch DB populated"
+else
+  >&2 echo "Elasticsearch did not start, skipping"
+fi
+
 ##############################
 ##############################
 # Redis
 # Redis
 ##############################
 ##############################

+ 3 - 2
toolset/setup/linux/systools/sbt.sh

@@ -3,7 +3,8 @@
 RETCODE=$(fw_exists ${IROOT}/sbt.installed)
 RETCODE=$(fw_exists ${IROOT}/sbt.installed)
 [ ! "$RETCODE" == 0 ] || { return 0; }
 [ ! "$RETCODE" == 0 ] || { return 0; }
 
 
-fw_get http://dl.bintray.com/sbt/native-packages/sbt/0.13.5/sbt-0.13.5.zip -O sbt-0.13.5.zip
-fw_unzip sbt-0.13.5.zip
+sbt_ver=0.13.8
+fw_get http://dl.bintray.com/sbt/native-packages/sbt/$sbt_ver/sbt-$sbt_ver.zip -O sbt-$sbt_ver.zip
+fw_unzip sbt-$sbt_ver.zip
 
 
 touch ${IROOT}/sbt.installed
 touch ${IROOT}/sbt.installed