Browse Source

Add CPU_COUNT env variable / get MAX_THREADS for load_gen (#2586)

* get threads programmatically

* update

* add cpu_count

* some fixes

* cast cpu_count to string

* add concurrency and query levels back

* missed a few
Nate 8 years ago
parent
commit
c621b4196c
47 changed files with 85 additions and 97 deletions
  1. 0 1
      benchmark.cfg.example
  2. 3 4
      frameworks/C++/cutelyst/config.sh
  3. 1 1
      frameworks/C++/poco/setup.sh
  4. 1 1
      frameworks/C++/silicon/setup_mhd_epoll_mysql.sh
  5. 1 1
      frameworks/C++/silicon/setup_mhd_tpc_mysql.sh
  6. 1 1
      frameworks/C++/ulib/setup_elasticsearch.sh
  7. 2 2
      frameworks/C++/ulib/setup_json.sh
  8. 2 2
      frameworks/C++/ulib/setup_json_extra.sh
  9. 1 1
      frameworks/C++/ulib/setup_json_large.sh
  10. 2 2
      frameworks/C++/ulib/setup_json_medium.sh
  11. 2 2
      frameworks/C++/ulib/setup_mongodb.sh
  12. 2 2
      frameworks/C++/ulib/setup_mysql.sh
  13. 2 2
      frameworks/C++/ulib/setup_plaintext.sh
  14. 1 1
      frameworks/C++/ulib/setup_postgres.sh
  15. 2 2
      frameworks/C++/ulib/setup_redis.sh
  16. 2 2
      frameworks/C++/ulib/setup_sqlite.sh
  17. 1 1
      frameworks/C++/wt/setup.sh
  18. 1 1
      frameworks/C++/wt/setup_postgres.sh
  19. 2 2
      frameworks/CSharp/aspnet/setup_nginx.sh
  20. 1 1
      frameworks/CSharp/evhttp-sharp/setup.sh
  21. 2 2
      frameworks/CSharp/nancy/setup_libevent.sh
  22. 2 2
      frameworks/CSharp/nancy/setup_nginx.sh
  23. 2 2
      frameworks/CSharp/servicestack/setup_nginx.sh
  24. 1 1
      frameworks/Dart/dart-raw/setup.sh
  25. 1 1
      frameworks/Dart/redstone/setup.sh
  26. 5 5
      frameworks/Dart/start/setup.sh
  27. 5 5
      frameworks/Dart/stream/setup.sh
  28. 1 1
      frameworks/Haskell/servant/setup.sh
  29. 1 1
      frameworks/Haskell/spock/setup.sh
  30. 1 1
      frameworks/Haskell/wai/setup.sh
  31. 1 1
      frameworks/Haskell/yesod/run_yesod_mysql_mongo.sh
  32. 1 1
      frameworks/Haskell/yesod/run_yesod_postgres.sh
  33. 1 1
      frameworks/Lua/openresty/setup.sh
  34. 1 1
      frameworks/Perl/dancer/setup.sh
  35. 1 1
      frameworks/Perl/kelp/setup.sh
  36. 1 1
      frameworks/Perl/plack/setup.sh
  37. 1 1
      frameworks/Perl/web-simple/setup.sh
  38. 1 1
      frameworks/Python/bottle/setup_nginxuwsgi.sh
  39. 1 1
      frameworks/Python/flask/setup_nginxuwsgi.sh
  40. 1 1
      frameworks/Python/uwsgi/setup.sh
  41. 1 1
      frameworks/Python/uwsgi/setup_nginx.sh
  42. 1 1
      frameworks/Python/weppy/setup_nginxuwsgi.sh
  43. 1 1
      frameworks/Ur/urweb/setup.sh
  44. 1 1
      frameworks/Ur/urweb/setup_mysql.sh
  45. 0 2
      toolset/benchmark/benchmarker.py
  46. 18 19
      toolset/benchmark/framework_test.py
  47. 2 9
      toolset/run-tests.py

+ 0 - 1
benchmark.cfg.example

@@ -18,7 +18,6 @@ install_only=False
 list_tests=False
 concurrency_levels=[8, 16, 32, 64, 128, 256]
 query_levels=[1, 5,10,15,20]
-threads=8
 mode=benchmark
 sleep=60
 test=None

+ 3 - 4
frameworks/C++/cutelyst/config.sh

@@ -30,7 +30,7 @@ cd ${CROOT}/benchmarks
 # build
 export CMAKE_PREFIX_PATH=/opt/qt${QT_VERSION_MM}:${CROOT}
 cmake $TROOT -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$CROOT
-make -j $MAX_THREADS
+make -j $CPU_COUNT
 
 if [ -n "${UWSGI}" ]; then
   cp -v ${TROOT}/config/config_socket.ini ${CROOT}/config.ini
@@ -47,9 +47,9 @@ sed -i "s|SendDate=.*|SendDate=${SEND_DATE}|g" ${CROOT}/config.ini
 export LD_LIBRARY_PATH=/opt/qt${QT_VERSION_MM}/lib:${CROOT}/lib/x86_64-linux-gnu/
 
 if [ -n "${UWSGI}" ]; then
-  uwsgi --ini ${CROOT}/config.ini --plugin ${CROOT}/lib/uwsgi/plugins/cutelyst_plugin.so --cutelyst-app ${CROOT}/benchmarks/src/libcutelyst_benchmarks.so ${PROCESS_OR_THREAD} $MAX_THREADS &
+  uwsgi --ini ${CROOT}/config.ini --plugin ${CROOT}/lib/uwsgi/plugins/cutelyst_plugin.so --cutelyst-app ${CROOT}/benchmarks/src/libcutelyst_benchmarks.so ${PROCESS_OR_THREAD} $CPU_COUNT &
 else
-  ${CROOT}/bin/cutelyst-wsgi --ini ${CROOT}/config.ini -a ${CROOT}/benchmarks/src/libcutelyst_benchmarks.so ${PROCESS_OR_THREAD} $MAX_THREADS --socket-timeout 0 ${BALANCER} &
+  ${CROOT}/bin/cutelyst-wsgi --ini ${CROOT}/config.ini -a ${CROOT}/benchmarks/src/libcutelyst_benchmarks.so ${PROCESS_OR_THREAD} $CPU_COUNT --socket-timeout 0 ${BALANCER} &
 fi
 
 # configure Nginx
@@ -59,4 +59,3 @@ if [ -n "${NGINX}" ]; then
   sed -i "s|include .*/conf/uwsgi_params;|include ${NGINX_HOME}/conf/uwsgi_params;|g" ${CROOT}/nginx.conf
   nginx -c ${CROOT}/nginx.conf
 fi
-

+ 1 - 1
frameworks/C++/poco/setup.sh

@@ -3,5 +3,5 @@
 fw_depends poco
 
 g++-4.8 -O3 -DNDEBUG -std=c++0x -o poco benchmark.cpp -I$POCO_HOME/Foundation/include -I$POCO_HOME/Util/include -I$POCO_HOME/Net/include -L$POCO_HOME/lib/Linux/x86_64 -lPocoNet -lPocoUtil -lPocoFoundation -lPocoXML -lPocoJSON
-./poco 8080 $MAX_THREADS
+./poco 8080 $CPU_COUNT
 

+ 1 - 1
frameworks/C++/silicon/setup_mhd_epoll_mysql.sh

@@ -8,4 +8,4 @@ cd build
 cmake .. -DCMAKE_CXX_COMPILER=clang++-3.9
 make silicon_epoll_mysql
 
-$TROOT/build/silicon_epoll_mysql ${DBHOST} 8080 ${MAX_THREADS} &
+$TROOT/build/silicon_epoll_mysql ${DBHOST} 8080 ${CPU_COUNT} &

+ 1 - 1
frameworks/C++/silicon/setup_mhd_tpc_mysql.sh

@@ -8,4 +8,4 @@ cd build
 cmake .. -DCMAKE_CXX_COMPILER=clang++-3.9
 make silicon_tpc_mysql
 
-$TROOT/build/silicon_tpc_mysql ${DBHOST} 8080 ${MAX_THREADS} &
+$TROOT/build/silicon_tpc_mysql ${DBHOST} 8080 ${CPU_COUNT} &

+ 1 - 1
frameworks/C++/ulib/setup_elasticsearch.sh

@@ -2,7 +2,7 @@
 
 fw_depends ulib
 
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 
 # 1. Change ULib Server (userver_tcp) configuration
 sed -i "s|TCP_LINGER_SET .*|TCP_LINGER_SET 0|g"									  $IROOT/ULib/benchmark.cfg

+ 2 - 2
frameworks/C++/ulib/setup_json.sh

@@ -4,9 +4,9 @@ fw_depends ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 2 - 2
frameworks/C++/ulib/setup_json_extra.sh

@@ -4,9 +4,9 @@ fw_depends ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS ))
+MAX_THREADS=$(( 3 * $CPU_COUNT ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 1 - 1
frameworks/C++/ulib/setup_json_large.sh

@@ -2,7 +2,7 @@
 
 fw_depends ulib
 
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 
 # 1. Change ULib Server (userver_tcp) configuration
 sed -i "s|TCP_LINGER_SET .*|TCP_LINGER_SET 0|g"									  $IROOT/ULib/benchmark.cfg

+ 2 - 2
frameworks/C++/ulib/setup_json_medium.sh

@@ -4,9 +4,9 @@ fw_depends ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 2 - 2
frameworks/C++/ulib/setup_mongodb.sh

@@ -4,9 +4,9 @@ fw_depends mongodb ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 2 - 2
frameworks/C++/ulib/setup_mysql.sh

@@ -4,9 +4,9 @@ fw_depends mysql ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 2 - 2
frameworks/C++/ulib/setup_plaintext.sh

@@ -4,9 +4,9 @@ fw_depends ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 1 - 1
frameworks/C++/ulib/setup_postgres.sh

@@ -2,7 +2,7 @@
 
 fw_depends postgresql ulib
 
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 
 # 1. Change ULib Server (userver_tcp) configuration
 sed -i "s|TCP_LINGER_SET .*|TCP_LINGER_SET -2|g"								  $IROOT/ULib/benchmark.cfg

+ 2 - 2
frameworks/C++/ulib/setup_redis.sh

@@ -4,9 +4,9 @@ fw_depends ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 2 - 2
frameworks/C++/ulib/setup_sqlite.sh

@@ -4,9 +4,9 @@ fw_depends ulib
 
 # Travis is broken
 if [ "$TRAVIS" != "true" ]; then
-MAX_THREADS=$(( 3 * $MAX_THREADS / 2 ))
+MAX_THREADS=$(( 3 * $CPU_COUNT / 2 ))
 else
-MAX_THREADS=$(( 2 * $MAX_THREADS ))
+MAX_THREADS=$(( 2 * $CPU_COUNT ))
 fi
 
 # 1. Change ULib Server (userver_tcp) configuration

+ 1 - 1
frameworks/C++/wt/setup.sh

@@ -6,4 +6,4 @@ sed -i 's|INSERT_DB_HOST_HERE|'"${DBHOST}"'|g' benchmark.cpp
 
 g++-4.8 -O3 -DNDEBUG -std=c++0x -L${BOOST_LIB} -I${BOOST_INC} -L${WT_LIB} -I${WT_INC} -o benchmark.wt benchmark.cpp -lwt -lwthttp -lwtdbo -lwtdbomysql -lboost_thread -lboost_system
 
-./benchmark.wt -c wt_config.xml -t ${MAX_THREADS} --docroot . --http-address 0.0.0.0 --http-port 8080 --accesslog=- --no-compression
+./benchmark.wt -c wt_config.xml -t ${CPU_COUNT} --docroot . --http-address 0.0.0.0 --http-port 8080 --accesslog=- --no-compression

+ 1 - 1
frameworks/C++/wt/setup_postgres.sh

@@ -6,4 +6,4 @@ sed -i 's|INSERT_DB_HOST_HERE|'"${DBHOST}"'|g' benchmark.cpp
 
 g++-4.8 -O3 -DNDEBUG -DBENCHMARK_USE_POSTGRES -std=c++0x -L${BOOST_LIB} -I${BOOST_INC} -L${WT_LIB} -I${WT_INC} -o benchmark_postgres.wt benchmark.cpp -lwt -lwthttp -lwtdbo -lwtdbopostgres -lboost_thread -lboost_system
 
-./benchmark_postgres.wt -c wt_config.xml -t ${MAX_THREADS} --docroot . --http-address 0.0.0.0 --http-port 8080 --accesslog=- --no-compression
+./benchmark_postgres.wt -c wt_config.xml -t ${CPU_COUNT} --docroot . --http-address 0.0.0.0 --http-port 8080 --accesslog=- --no-compression

+ 2 - 2
frameworks/CSharp/aspnet/setup_nginx.sh

@@ -14,7 +14,7 @@ xbuild src/Benchmarks.build.proj /t:Build
 # one fastcgi instance for each thread
 # load balanced by nginx
 port_start=9001
-port_end=$(($port_start+$MAX_THREADS))
+port_end=$(($port_start+$CPU_COUNT))
 
 # nginx
 conf="upstream mono {\n"
@@ -24,7 +24,7 @@ done
 conf+="}"
 echo -e $conf > $TROOT/nginx.upstream.conf
 
-nginx -c $TROOT/nginx.conf -g "worker_processes ${MAX_THREADS};"
+nginx -c $TROOT/nginx.conf -g "worker_processes ${CPU_COUNT};"
 
 # To debug, use --printlog --verbose --loglevels=All
 for port in $(seq $port_start $port_end); do

+ 1 - 1
frameworks/CSharp/evhttp-sharp/setup.sh

@@ -10,4 +10,4 @@ xbuild src/EvHttpSharpBenchmark.csproj /p:Configuration=Release
 
 export MONO_GC_PARAMS=nursery-size=64m
 
-mono -O=all $TROOT/src/bin/Release/EvHttpSharpBenchmark.exe 127.0.0.1 8085 $MAX_THREADS &
+mono -O=all $TROOT/src/bin/Release/EvHttpSharpBenchmark.exe 127.0.0.1 8085 $CPU_COUNT &

+ 2 - 2
frameworks/CSharp/nancy/setup_libevent.sh

@@ -17,7 +17,7 @@ xbuild ${LIBEVENTHOST_HOME}/LibeventHost.csproj /p:Configuration=Release
 
 # nginx
 port_start=9001
-port_end=$((${port_start}+${MAX_THREADS}))
+port_end=$((${port_start}+${CPU_COUNT}))
 conf="upstream mono {\n"
 for port in $(seq ${port_start} ${port_end} ); do
   conf+="\tserver 127.0.0.1:${port};\n"
@@ -25,7 +25,7 @@ done
 conf+="}"
 
 echo -e $conf > ${TROOT}/nginx.upstream.conf
-${NGINX_HOME}/sbin/nginx -c ${TROOT}/nginx.conf.libevent -g "worker_processes '"${MAX_THREADS}"';"
+${NGINX_HOME}/sbin/nginx -c ${TROOT}/nginx.conf.libevent -g "worker_processes '"${CPU_COUNT}"';"
 
 # Start fastcgi for each thread
 # To debug, use --printlog --verbose --loglevels=All

+ 2 - 2
frameworks/CSharp/nancy/setup_nginx.sh

@@ -12,7 +12,7 @@ xbuild src/NancyBenchmark.csproj /p:Configuration=Release
 
 # nginx
 port_start=9001
-port_end=$((${port_start}+${MAX_THREADS}))
+port_end=$((${port_start}+${CPU_COUNT}))
 conf="upstream mono {\n"
 for port in $(seq ${port_start} $port_end); do
   conf+="\tserver 127.0.0.1:${port};\n"
@@ -27,7 +27,7 @@ echo "include $IROOT/nginx/conf/fastcgi_params;" > $TROOT/nginx.osenv.conf
 
 
 echo -e $conf > $TROOT/nginx.upstream.conf
-${NGINX_HOME}/sbin/nginx -c $TROOT/nginx.conf -g "worker_processes '"${MAX_THREADS}"';"
+${NGINX_HOME}/sbin/nginx -c $TROOT/nginx.conf -g "worker_processes '"${CPU_COUNT}"';"
 
 # Start fastcgi for each thread
 # To debug, use --printlog --verbose --loglevels=All

+ 2 - 2
frameworks/CSharp/servicestack/setup_nginx.sh

@@ -12,7 +12,7 @@ xbuild src/ServiceStackBenchmark.csproj /t:Build
 # one fastcgi instance for each thread
 # load balanced by nginx
 port_start=9001
-port_end=$(($port_start+$MAX_THREADS))
+port_end=$(($port_start+$CPU_COUNT))
 # nginx
 conf="upstream mono {\n"
 for port in $(seq $port_start $port_end); do
@@ -20,7 +20,7 @@ conf+="\tserver 127.0.0.1:${port};\n"
 done
 conf+="}"
 echo -e $conf > $TROOT/nginx.upstream.conf
-nginx -c $TROOT/nginx.conf -g "worker_processes ${MAX_THREADS};"
+nginx -c $TROOT/nginx.conf -g "worker_processes ${CPU_COUNT};"
 # To debug, use --printlog --verbose --loglevels=All
 for port in $(seq $port_start $port_end); do
 	MONO_OPTIONS=--gc=sgen fastcgi-mono-server4 --applications=/:$TROOT/src --socket=tcp:127.0.0.1:$port &

+ 1 - 1
frameworks/Dart/dart-raw/setup.sh

@@ -6,4 +6,4 @@ fw_depends postgresql dart
 
 pub upgrade
 
-dart server.dart -a 0.0.0.0 -p 8080 -d ${MAX_CONCURRENCY} -i ${MAX_THREADS} &
+dart server.dart -a 0.0.0.0 -p 8080 -d ${MAX_CONCURRENCY} -i ${CPU_COUNT} &

+ 1 - 1
frameworks/Dart/redstone/setup.sh

@@ -7,4 +7,4 @@ fw_depends dart
 
 pub upgrade
 
-dart server.dart -a 0.0.0.0 -p 8080 -d ${MAX_THREADS} -i ${MAX_THREADS} &
+dart server.dart -a 0.0.0.0 -p 8080 -d ${CPU_COUNT} -i ${CPU_COUNT} &

+ 5 - 5
frameworks/Dart/start/setup.sh

@@ -11,9 +11,9 @@ pub upgrade
 # start dart servers
 #
 current=9001
-end=$(($current+$MAX_THREADS))
+end=$(($current+$CPU_COUNT))
 while [ $current -lt $end ]; do
-  dart server.dart -a 127.0.0.1 -p $current -d ${MAX_THREADS} &
+  dart server.dart -a 127.0.0.1 -p $current -d ${CPU_COUNT} &
   let current=current+1
 done
 
@@ -21,7 +21,7 @@ done
 #
 # create nginx configuration
 #
-conf+="worker_processes ${MAX_THREADS};\n"
+conf+="worker_processes ${CPU_COUNT};\n"
 conf+="error_log /dev/null error;\n"
 conf+="events {\n"
 conf+="\tworker_connections 1024;\n"
@@ -33,12 +33,12 @@ conf+="\tdefault_type application/octet-stream;\n"
 conf+="\tsendfile on;\n"
 conf+="\tupstream dart_cluster {\n"
 current=9001
-end=$(($current+$MAX_THREADS))
+end=$(($current+$CPU_COUNT))
 while [ $current -lt $end ]; do
   conf+="\t\tserver 127.0.0.1:${current};\n"
   let current=current+1
 done
-conf+="\t\tkeepalive ${MAX_THREADS};\n"
+conf+="\t\tkeepalive ${CPU_COUNT};\n"
 conf+="\t}\n"
 conf+="\tserver {\n"
 conf+="\t\tlisten 8080;\n"

+ 5 - 5
frameworks/Dart/stream/setup.sh

@@ -11,9 +11,9 @@ pub upgrade
 # start dart servers
 #
 current=9001
-end=$(($current+$MAX_THREADS))
+end=$(($current+$CPU_COUNT))
 while [ $current -lt $end ]; do
-  dart server.dart -a 127.0.0.1 -p $current -d ${MAX_THREADS} &
+  dart server.dart -a 127.0.0.1 -p $current -d ${CPU_COUNT} &
   let current=current+1
 done
 
@@ -21,7 +21,7 @@ done
 #
 # create nginx configuration
 #
-conf+="worker_processes ${MAX_THREADS};\n"
+conf+="worker_processes ${CPU_COUNT};\n"
 conf+="error_log /dev/null error;\n"
 conf+="events {\n"
 conf+="\tworker_connections 1024;\n"
@@ -33,12 +33,12 @@ conf+="\tdefault_type application/octet-stream;\n"
 conf+="\tsendfile on;\n"
 conf+="\tupstream dart_cluster {\n"
 current=9001
-end=$(($current+$MAX_THREADS))
+end=$(($current+$CPU_COUNT))
 while [ $current -lt $end ]; do
   conf+="\t\tserver 127.0.0.1:${current};\n"
   let current=current+1
 done
-conf+="\t\tkeepalive ${MAX_THREADS};\n"
+conf+="\t\tkeepalive ${CPU_COUNT};\n"
 conf+="\t}\n"
 conf+="\tserver {\n"
 conf+="\t\tlisten 8080;\n"

+ 1 - 1
frameworks/Haskell/servant/setup.sh

@@ -5,4 +5,4 @@ fw_depends postgresql stack
 ${IROOT}/stack --allow-different-user setup
 ${IROOT}/stack --allow-different-user build
 
-${IROOT}/stack --allow-different-user exec servant-exe -- ${DBHOST} +RTS -A32m -N${MAX_THREADS} &
+${IROOT}/stack --allow-different-user exec servant-exe -- ${DBHOST} +RTS -A32m -N${CPU_COUNT} &

+ 1 - 1
frameworks/Haskell/spock/setup.sh

@@ -10,4 +10,4 @@ fi
 
 ${IROOT}/stack --allow-different-user build --install-ghc
 
-${IROOT}/stack --allow-different-user exec spock-exe -- +RTS -A32m -N${MAX_THREADS} &
+${IROOT}/stack --allow-different-user exec spock-exe -- +RTS -A32m -N${CPU_COUNT} &

+ 1 - 1
frameworks/Haskell/wai/setup.sh

@@ -6,4 +6,4 @@ cd bench
 
 ${IROOT}/stack --allow-different-user build --install-ghc
 
-${IROOT}/stack --allow-different-user exec bench -- ${MAX_THREADS} ${DBHOST} +RTS -A32m -N${MAX_THREADS} &
+${IROOT}/stack --allow-different-user exec bench -- ${CPU_COUNT} ${DBHOST} +RTS -A32m -N${CPU_COUNT} &

+ 1 - 1
frameworks/Haskell/yesod/run_yesod_mysql_mongo.sh

@@ -6,4 +6,4 @@ cd yesod-mysql-mongo
 
 ${IROOT}/stack --allow-different-user build --install-ghc
 
-${IROOT}/stack --allow-different-user exec yesod-mysql-mongo -- ${MAX_THREADS} ${DBHOST} +RTS -A32m -N${MAX_THREADS} &
+${IROOT}/stack --allow-different-user exec yesod-mysql-mongo -- ${CPU_COUNT} ${DBHOST} +RTS -A32m -N${CPU_COUNT} &

+ 1 - 1
frameworks/Haskell/yesod/run_yesod_postgres.sh

@@ -6,4 +6,4 @@ cd yesod-postgres
 
 ${IROOT}/stack --allow-different-user build --install-ghc
 
-${IROOT}/stack --allow-different-user exec yesod-postgres -- ${MAX_THREADS} ${DBHOST} +RTS -A32m -N${MAX_THREADS} &
+${IROOT}/stack --allow-different-user exec yesod-postgres -- ${CPU_COUNT} ${DBHOST} +RTS -A32m -N${CPU_COUNT} &

+ 1 - 1
frameworks/Lua/openresty/setup.sh

@@ -7,4 +7,4 @@ fw_depends mysql lua luarocks openresty
 
 luarocks install lua-resty-template
 
-nginx -c $TROOT/nginx.conf -g "worker_processes '"${MAX_THREADS}"';" &
+nginx -c $TROOT/nginx.conf -g "worker_processes '"${CPU_COUNT}"';" &

+ 1 - 1
frameworks/Perl/dancer/setup.sh

@@ -17,4 +17,4 @@ cpanm --notest --no-man-page \
     
 nginx -c ${TROOT}/nginx.conf
 
-plackup -E production -s Starman --workers=${MAX_THREADS} -l ${TROOT}/frameworks-benchmark.sock -a ./app.pl &
+plackup -E production -s Starman --workers=${CPU_COUNT} -l ${TROOT}/frameworks-benchmark.sock -a ./app.pl &

+ 1 - 1
frameworks/Perl/kelp/setup.sh

@@ -18,4 +18,4 @@ cpanm --notest --no-man-page \
 
 nginx -c ${TROOT}/nginx.conf
 
-plackup -E production -s Starman --workers=${MAX_THREADS} -l ${TROOT}/frameworks-benchmark.sock -a ./app.pl &
+plackup -E production -s Starman --workers=${CPU_COUNT} -l ${TROOT}/frameworks-benchmark.sock -a ./app.pl &

+ 1 - 1
frameworks/Perl/plack/setup.sh

@@ -14,4 +14,4 @@ cpanm --notest --no-man-page \
     [email protected]
     
 nginx -c $TROOT/nginx.conf
-start_server --backlog=16384 --pid-file=$TROOT/app.pid --path=$TROOT/app.sock -- plackup -E production -s Starlet --max-keepalive-reqs 1000 --max-reqs-per-child 50000 --min-reqs-per-child 40000 --max-workers=${MAX_THREADS} -a $TROOT/app.psgi &
+start_server --backlog=16384 --pid-file=$TROOT/app.pid --path=$TROOT/app.sock -- plackup -E production -s Starlet --max-keepalive-reqs 1000 --max-reqs-per-child 50000 --min-reqs-per-child 40000 --max-workers=${CPU_COUNT} -a $TROOT/app.psgi &

+ 1 - 1
frameworks/Perl/web-simple/setup.sh

@@ -15,4 +15,4 @@ cpanm --notest --no-man-page  \
     
 nginx -c $TROOT/nginx.conf
 
-plackup -E production -s Starman --workers=${MAX_THREADS} -l $TROOT/frameworks-benchmark.sock -a $TROOT/app.pl &
+plackup -E production -s Starman --workers=${CPU_COUNT} -l $TROOT/frameworks-benchmark.sock -a $TROOT/app.pl &

+ 1 - 1
frameworks/Python/bottle/setup_nginxuwsgi.sh

@@ -7,4 +7,4 @@ sed -i 's|include .*/conf/uwsgi_params;|include '"${NGINX_HOME}"'/conf/uwsgi_par
 pip install --install-option="--prefix=${PY2_ROOT}" -r $TROOT/requirements.txt
 
 nginx -c $TROOT/nginx.conf
-uwsgi --ini $TROOT/uwsgi.ini --processes $MAX_THREADS --wsgi app:app &
+uwsgi --ini $TROOT/uwsgi.ini --processes $CPU_COUNT --wsgi app:app &

+ 1 - 1
frameworks/Python/flask/setup_nginxuwsgi.sh

@@ -8,4 +8,4 @@ pip install --install-option="--prefix=${PY2_ROOT}" -r $TROOT/requirements.txt
 
 nginx -c $TROOT/nginx.conf
 
-uwsgi --ini $TROOT/uwsgi.ini --processes $MAX_THREADS --wsgi app:app &
+uwsgi --ini $TROOT/uwsgi.ini --processes $CPU_COUNT --wsgi app:app &

+ 1 - 1
frameworks/Python/uwsgi/setup.sh

@@ -4,4 +4,4 @@ fw_depends python2
 
 pip install --install-option="--prefix=${PY2_ROOT}" -r $TROOT/requirements.txt
 
-uwsgi --master -L -l 5000 --gevent 1000 --http :8080 --http-keepalive --http-processes $MAX_THREADS -p $MAX_THREADS -w hello --add-header "Connection: keep-alive" --pidfile /tmp/uwsgi.pid &
+uwsgi --master -L -l 5000 --gevent 1000 --http :8080 --http-keepalive --http-processes $CPU_COUNT -p $CPU_COUNT -w hello --add-header "Connection: keep-alive" --pidfile /tmp/uwsgi.pid &

+ 1 - 1
frameworks/Python/uwsgi/setup_nginx.sh

@@ -8,4 +8,4 @@ pip install --install-option="--prefix=${PY2_ROOT}" -r $TROOT/requirements.txt
 
 nginx -c $TROOT/nginx.conf
 
-uwsgi --ini uwsgi.ini --processes $MAX_THREADS --gevent 1000 --wsgi hello &
+uwsgi --ini uwsgi.ini --processes $CPU_COUNT --gevent 1000 --wsgi hello &

+ 1 - 1
frameworks/Python/weppy/setup_nginxuwsgi.sh

@@ -8,4 +8,4 @@ pip install --install-option="--prefix=${PY2_ROOT}" -r $TROOT/requirements.txt
 
 nginx -c $TROOT/nginx.conf
 
-uwsgi --ini $TROOT/uwsgi.ini --processes $MAX_THREADS --wsgi app:app &
+uwsgi --ini $TROOT/uwsgi.ini --processes $CPU_COUNT --wsgi app:app &

+ 1 - 1
frameworks/Ur/urweb/setup.sh

@@ -4,5 +4,5 @@ fw_depends urweb
 
 urweb -db "dbname=hello_world user=benchmarkdbuser password=benchmarkdbpass host=${DBHOST}" bench
 
-MAX_THREADS=$((2 * $MAX_THREADS))
+MAX_THREADS=$((2 * $CPU_COUNT))
 ./bench.exe -q -k -t ${MAX_THREADS} &

+ 1 - 1
frameworks/Ur/urweb/setup_mysql.sh

@@ -7,5 +7,5 @@ export LD_LIBRARY_PATH=${URWEB_HOME}/lib
 
 ${URWEB_HOME}/bin/urweb -dbms mysql -db "dbname=hello_world user=benchmarkdbuser password=benchmarkdbpass host=${DBHOST}" bench
 
-MAX_THREADS=$((2 * $MAX_THREADS))
+MAX_THREADS=$((2 * $CPU_COUNT))
 ./bench.exe -q -k -t ${MAX_THREADS} &

+ 0 - 2
toolset/benchmark/benchmarker.py

@@ -987,8 +987,6 @@ class Benchmarker:
             args['types'] = { args['type'] : types[args['type']] }
         del args['type']
 
-
-        args['max_threads'] = args['threads']
         args['max_concurrency'] = max(args['concurrency_levels'])
 
         self.__dict__.update(args)

+ 18 - 19
toolset/benchmark/framework_test.py

@@ -34,6 +34,7 @@ class FrameworkTest:
   # Used for test types that require no pipelining or query string params.
   concurrency_template = """
 
+    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)*4
     echo ""
     echo "---------------------------------------------------------"
     echo " Running Primer {name}"
@@ -46,10 +47,10 @@ class FrameworkTest:
     echo ""
     echo "---------------------------------------------------------"
     echo " Running Warmup {name}"
-    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} \"http://{server_host}:{port}{url}\""
+    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}\""
     echo "---------------------------------------------------------"
     echo ""
-    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} "http://{server_host}:{port}{url}"
+    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}"
     sleep 5
 
     echo ""
@@ -64,11 +65,11 @@ class FrameworkTest:
       echo ""
       echo "---------------------------------------------------------"
       echo " Concurrency: $c for {name}"
-      echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
+      echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>$max_threads?$max_threads:$c)) \"http://{server_host}:{port}{url}\""
       echo "---------------------------------------------------------"
       echo ""
       STARTTIME=$(date +"%s")
-      {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
+      {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>$max_threads?$max_threads:$c))" http://{server_host}:{port}{url}
       echo "STARTTIME $STARTTIME"
       echo "ENDTIME $(date +"%s")"
       sleep 2
@@ -77,6 +78,7 @@ class FrameworkTest:
   # Used for test types that require pipelining.
   pipeline_template = """
 
+    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)*4
     echo ""
     echo "---------------------------------------------------------"
     echo " Running Primer {name}"
@@ -89,10 +91,10 @@ class FrameworkTest:
     echo ""
     echo "---------------------------------------------------------"
     echo " Running Warmup {name}"
-    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} \"http://{server_host}:{port}{url}\""
+    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}\""
     echo "---------------------------------------------------------"
     echo ""
-    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} "http://{server_host}:{port}{url}"
+    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}"
     sleep 5
 
     echo ""
@@ -107,11 +109,11 @@ class FrameworkTest:
       echo ""
       echo "---------------------------------------------------------"
       echo " Concurrency: $c for {name}"
-      echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
+      echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>$max_threads?$max_threads:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
       echo "---------------------------------------------------------"
       echo ""
       STARTTIME=$(date +"%s")
-      {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
+      {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>$max_threads?$max_threads:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
       echo "STARTTIME $STARTTIME"
       echo "ENDTIME $(date +"%s")"
       sleep 2
@@ -121,7 +123,7 @@ class FrameworkTest:
   # These tests run at a static concurrency level and vary the size of
   # the query sent with each request
   query_template = """
-
+    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)*4
     echo ""
     echo "---------------------------------------------------------"
     echo " Running Primer {name}"
@@ -134,10 +136,10 @@ class FrameworkTest:
     echo ""
     echo "---------------------------------------------------------"
     echo " Running Warmup {name}"
-    echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} \"http://{server_host}:{port}{url}2\""
+    echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}2\""
     echo "---------------------------------------------------------"
     echo ""
-    wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} "http://{server_host}:{port}{url}2"
+    wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}2"
     sleep 5
 
     echo ""
@@ -152,11 +154,11 @@ class FrameworkTest:
       echo ""
       echo "---------------------------------------------------------"
       echo " Queries: $c for {name}"
-      echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} \"http://{server_host}:{port}{url}$c\""
+      echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}$c\""
       echo "---------------------------------------------------------"
       echo ""
       STARTTIME=$(date +"%s")
-      wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t {max_threads} "http://{server_host}:{port}{url}$c"
+      wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}$c"
       echo "STARTTIME $STARTTIME"
       echo "ENDTIME $(date +"%s")"
       sleep 2
@@ -177,7 +179,6 @@ class FrameworkTest:
     os.environ['IROOT'] = self.install_root
     os.environ['DBHOST'] = socket.gethostbyname(self.database_host)
     os.environ['LOGDIR'] = logDir
-    os.environ['MAX_THREADS'] = str(self.benchmarker.threads)
     os.environ['MAX_CONCURRENCY'] = str(max(self.benchmarker.concurrency_levels))
 
     # Always ensure that IROOT exists
@@ -223,7 +224,6 @@ class FrameworkTest:
       export IROOT=%s           &&  \\
       export DBHOST=%s          &&  \\
       export LOGDIR=%s          &&  \\
-      export MAX_THREADS=%s     &&  \\
       export MAX_CONCURRENCY=%s && \\
       cd %s && \\
       %s/TFBReaper "bash -exc \\\"source %s && source %s.sh\\\"''' % (self.fwroot,
@@ -231,7 +231,6 @@ class FrameworkTest:
         self.install_root,
         socket.gethostbyname(self.database_host),
         logDir,
-        self.benchmarker.threads,
         max(self.benchmarker.concurrency_levels),
         self.directory,
         self.install_root,
@@ -632,7 +631,7 @@ class FrameworkTest:
   def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
     headers = self.headers_template.format(accept=accept_header)
     return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
-      max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
+      name=self.name, duration=self.benchmarker.duration,
       levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
       server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
 
@@ -644,7 +643,7 @@ class FrameworkTest:
   def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
     headers = self.headers_template.format(accept=accept_header)
     return self.pipeline_template.format(max_concurrency=16384,
-      max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
+      name=self.name, duration=self.benchmarker.duration,
       levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
       server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
       pipeline=16)
@@ -658,7 +657,7 @@ class FrameworkTest:
   def __generate_query_script(self, url, port, accept_header):
     headers = self.headers_template.format(accept=accept_header)
     return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
-      max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
+      name=self.name, duration=self.benchmarker.duration,
       levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
       server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
 

+ 2 - 9
toolset/run-tests.py

@@ -68,6 +68,8 @@ def main(argv=None):
     os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
     # 'Ubuntu', '14.04', 'trusty' respectively
     os.environ['TFB_DISTRIB_ID'], os.environ['TFB_DISTRIB_RELEASE'], os.environ['TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
+    # App server cpu count
+    os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())
 
     print("FWROOT is {!s}.".format(os.environ['FWROOT']))
 
@@ -117,12 +119,6 @@ def main(argv=None):
     if defaults['server_host'] is None:
         defaults['server_host'] = defaults['client_host']
 
-    maxThreads = 8
-    try:
-        maxThreads = multiprocessing.cpu_count()
-    except Exception:
-        pass
-
     ##########################################################
     # Set up argument parser
     ##########################################################
@@ -149,9 +145,6 @@ def main(argv=None):
     parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
 
     # Benchmark options
-    parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
-    parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction)
-    parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
     parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')