|
@@ -7,7 +7,7 @@
|
|
|
|
|
|
# The name of the cluster. This is mainly used to prevent machines in
|
|
# The name of the cluster. This is mainly used to prevent machines in
|
|
# one logical cluster from joining another.
|
|
# one logical cluster from joining another.
|
|
-cluster_name: 'Benchmark Cluster'
|
|
|
|
|
|
+cluster_name: 'TFB Cluster'
|
|
|
|
|
|
# This defines the number of tokens randomly assigned to this node on the ring
|
|
# This defines the number of tokens randomly assigned to this node on the ring
|
|
# The more tokens, relative to other nodes, the larger the proportion of data
|
|
# The more tokens, relative to other nodes, the larger the proportion of data
|
|
@@ -17,7 +17,8 @@ cluster_name: 'Benchmark Cluster'
|
|
# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
|
|
# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
|
|
# and will use the initial_token as described below.
|
|
# and will use the initial_token as described below.
|
|
#
|
|
#
|
|
-# Specifying initial_token will override this setting.
|
|
|
|
|
|
+# Specifying initial_token will override this setting on the node's initial start,
|
|
|
|
+# on subsequent starts, this setting will apply even if initial token is set.
|
|
#
|
|
#
|
|
# If you already have a cluster with 1 token per node, and wish to migrate to
|
|
# If you already have a cluster with 1 token per node, and wish to migrate to
|
|
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
|
|
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
|
|
@@ -78,6 +79,14 @@ authorizer: AllowAllAuthorizer
|
|
# Will be disabled automatically for AllowAllAuthorizer.
|
|
# Will be disabled automatically for AllowAllAuthorizer.
|
|
permissions_validity_in_ms: 2000
|
|
permissions_validity_in_ms: 2000
|
|
|
|
|
|
|
|
+# Refresh interval for permissions cache (if enabled).
|
|
|
|
+# After this interval, cache entries become eligible for refresh. Upon next
|
|
|
|
+# access, an async reload is scheduled and the old value returned until it
|
|
|
|
+# completes. If permissions_validity_in_ms is non-zero, then this must be
|
|
|
|
+# also.
|
|
|
|
+# Defaults to the same value as permissions_validity_in_ms.
|
|
|
|
+# permissions_update_interval_in_ms: 1000
|
|
|
|
+
|
|
# The partitioner is responsible for distributing groups of rows (by
|
|
# The partitioner is responsible for distributing groups of rows (by
|
|
# partition key) across nodes in the cluster. You should leave this
|
|
# partition key) across nodes in the cluster. You should leave this
|
|
# alone for new clusters. The partitioner can NOT be changed without
|
|
# alone for new clusters. The partitioner can NOT be changed without
|
|
@@ -332,11 +341,11 @@ start_rpc: true
|
|
# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
|
|
# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
|
|
# here if you want to listen on all interfaces, but that will break clients
|
|
# here if you want to listen on all interfaces, but that will break clients
|
|
# that rely on node auto-discovery.
|
|
# that rely on node auto-discovery.
|
|
-rpc_address: 0.0.0.0
|
|
|
|
|
|
+rpc_address: 127.0.0.1
|
|
# port for Thrift to listen for clients on
|
|
# port for Thrift to listen for clients on
|
|
rpc_port: 9160
|
|
rpc_port: 9160
|
|
|
|
|
|
-# enable or disable keepalive on rpc connections
|
|
|
|
|
|
+# enable or disable keepalive on rpc/native connections
|
|
rpc_keepalive: true
|
|
rpc_keepalive: true
|
|
|
|
|
|
# Cassandra provides two out-of-the-box options for the RPC Server:
|
|
# Cassandra provides two out-of-the-box options for the RPC Server:
|
|
@@ -349,7 +358,8 @@ rpc_keepalive: true
|
|
# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
|
|
# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
|
|
# asynchronously using a small number of threads that does not vary with the amount
|
|
# asynchronously using a small number of threads that does not vary with the amount
|
|
# of thrift clients (and thus scales well to many clients). The rpc requests are still
|
|
# of thrift clients (and thus scales well to many clients). The rpc requests are still
|
|
-# synchronous (one thread per active request).
|
|
|
|
|
|
+# synchronous (one thread per active request). If hsha is selected then it is essential
|
|
|
|
+# that rpc_max_threads is changed from the default value of unlimited.
|
|
#
|
|
#
|
|
# The default is sync because on Windows hsha is about 30% slower. On Linux,
|
|
# The default is sync because on Windows hsha is about 30% slower. On Linux,
|
|
# sync/hsha performance is about the same, with hsha of course using less memory.
|
|
# sync/hsha performance is about the same, with hsha of course using less memory.
|
|
@@ -420,15 +430,22 @@ auto_snapshot: true
|
|
tombstone_warn_threshold: 1000
|
|
tombstone_warn_threshold: 1000
|
|
tombstone_failure_threshold: 100000
|
|
tombstone_failure_threshold: 100000
|
|
|
|
|
|
-# Add column indexes to a row after its contents reach this size.
|
|
|
|
-# Increase if your column values are large, or if you have a very large
|
|
|
|
-# number of columns. The competing causes are, Cassandra has to
|
|
|
|
-# deserialize this much of the row to read a single column, so you want
|
|
|
|
-# it to be small - at least if you do many partial-row reads - but all
|
|
|
|
-# the index data is read for each access, so you don't want to generate
|
|
|
|
-# that wastefully either.
|
|
|
|
|
|
+# Granularity of the collation index of rows within a partition.
|
|
|
|
+# Increase if your rows are large, or if you have a very large
|
|
|
|
+# number of rows per partition. The competing goals are these:
|
|
|
|
+# 1) a smaller granularity means more index entries are generated
|
|
|
|
+# and looking up rows withing the partition by collation column
|
|
|
|
+# is faster
|
|
|
|
+# 2) but, Cassandra will keep the collation index in memory for hot
|
|
|
|
+# rows (as part of the key cache), so a larger granularity means
|
|
|
|
+# you can cache more hot rows
|
|
column_index_size_in_kb: 64
|
|
column_index_size_in_kb: 64
|
|
|
|
|
|
|
|
+
|
|
|
|
+# Log WARN on any batch size exceeding this value. 5kb per batch by default.
|
|
|
|
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
|
|
|
|
+batch_size_warn_threshold_in_kb: 5
|
|
|
|
+
|
|
# Size limit for rows being compacted in memory. Larger rows will spill
|
|
# Size limit for rows being compacted in memory. Larger rows will spill
|
|
# over to disk and use a slower two-pass compaction process. A message
|
|
# over to disk and use a slower two-pass compaction process. A message
|
|
# will be logged specifying the row key.
|
|
# will be logged specifying the row key.
|
|
@@ -474,6 +491,12 @@ compaction_preheat_key_cache: true
|
|
# When unset, the default is 200 Mbps or 25 MB/s.
|
|
# When unset, the default is 200 Mbps or 25 MB/s.
|
|
# stream_throughput_outbound_megabits_per_sec: 200
|
|
# stream_throughput_outbound_megabits_per_sec: 200
|
|
|
|
|
|
|
|
+# Throttles all streaming file transfer between the datacenters,
|
|
|
|
+# this setting allows users to throttle inter dc stream throughput in addition
|
|
|
|
+# to throttling all network stream traffic as configured with
|
|
|
|
+# stream_throughput_outbound_megabits_per_sec
|
|
|
|
+# inter_dc_stream_throughput_outbound_megabits_per_sec:
|
|
|
|
+
|
|
# How long the coordinator should wait for read operations to complete
|
|
# How long the coordinator should wait for read operations to complete
|
|
read_request_timeout_in_ms: 5000
|
|
read_request_timeout_in_ms: 5000
|
|
# How long the coordinator should wait for seq or index scans to complete
|
|
# How long the coordinator should wait for seq or index scans to complete
|