blob: 39f987152a45c915317b64c96a097f07af2dc84f [file] [log] [blame]
Michael Lando451a3402017-02-19 10:28:42 +02001# Cassandra storage config YAML
2
3# NOTE:
4# See http://wiki.apache.org/cassandra/StorageConfiguration for
5# full explanations of configuration directives
6# /NOTE
7
8# The name of the cluster. This is mainly used to prevent machines in
9# one logical cluster from joining another.
10cluster_name: 'Test Cluster'
11
12# This defines the number of tokens randomly assigned to this node on the ring
13# The more tokens, relative to other nodes, the larger the proportion of data
14# that this node will store. You probably want all nodes to have the same number
15# of tokens assuming they have equal hardware capability.
16#
17# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
18# and will use the initial_token as described below.
19#
20# Specifying initial_token will override this setting on the node's initial start,
21# on subsequent starts, this setting will apply even if initial token is set.
22#
23# If you already have a cluster with 1 token per node, and wish to migrate to
24# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
25num_tokens: 256
26
27# initial_token allows you to specify tokens manually. While you can use # it with
28# vnodes (num_tokens > 1, above) -- in which case you should provide a
29# comma-separated list -- it's primarily used when adding nodes # to legacy clusters
30# that do not have vnodes enabled.
31# initial_token:
32
33# See http://wiki.apache.org/cassandra/HintedHandoff
34# May either be "true" or "false" to enable globally, or contain a list
35# of data centers to enable per-datacenter.
36# hinted_handoff_enabled: DC1,DC2
37hinted_handoff_enabled: true
38# this defines the maximum amount of time a dead host will have hints
39# generated. After it has been dead this long, new hints for it will not be
40# created until it has been seen alive and gone down again.
41max_hint_window_in_ms: 10800000 # 3 hours
42# Maximum throttle in KBs per second, per delivery thread. This will be
43# reduced proportionally to the number of nodes in the cluster. (If there
44# are two nodes in the cluster, each delivery thread will use the maximum
45# rate; if there are three, each will throttle to half of the maximum,
46# since we expect two nodes to be delivering hints simultaneously.)
47hinted_handoff_throttle_in_kb: 1024
48# Number of threads with which to deliver hints;
49# Consider increasing this number when you have multi-dc deployments, since
50# cross-dc handoff tends to be slower
51max_hints_delivery_threads: 2
52
53# Maximum throttle in KBs per second, total. This will be
54# reduced proportionally to the number of nodes in the cluster.
55batchlog_replay_throttle_in_kb: 1024
56
57# Authentication backend, implementing IAuthenticator; used to identify users
58# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
59# PasswordAuthenticator}.
60#
61# - AllowAllAuthenticator performs no checks - set it to disable authentication.
62# - PasswordAuthenticator relies on username/password pairs to authenticate
63# users. It keeps usernames and hashed passwords in system_auth.credentials table.
64# Please increase system_auth keyspace replication factor if you use this authenticator.
65authenticator: AllowAllAuthenticator
66
67# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
68# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
69# CassandraAuthorizer}.
70#
71# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
72# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
73# increase system_auth keyspace replication factor if you use this authorizer.
74authorizer: AllowAllAuthorizer
75
76# Validity period for permissions cache (fetching permissions can be an
77# expensive operation depending on the authorizer, CassandraAuthorizer is
78# one example). Defaults to 2000, set to 0 to disable.
79# Will be disabled automatically for AllowAllAuthorizer.
80permissions_validity_in_ms: 2000
81
82# Refresh interval for permissions cache (if enabled).
83# After this interval, cache entries become eligible for refresh. Upon next
84# access, an async reload is scheduled and the old value returned until it
85# completes. If permissions_validity_in_ms is non-zero, then this must be
86# also.
87# Defaults to the same value as permissions_validity_in_ms.
88# permissions_update_interval_in_ms: 1000
89
90# The partitioner is responsible for distributing groups of rows (by
91# partition key) across nodes in the cluster. You should leave this
92# alone for new clusters. The partitioner can NOT be changed without
93# reloading all data, so when upgrading you should set this to the
94# same partitioner you were already using.
95#
96# Besides Murmur3Partitioner, partitioners included for backwards
97# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
98# OrderPreservingPartitioner.
99#
100partitioner: org.apache.cassandra.dht.Murmur3Partitioner
101
102# Directories where Cassandra should store data on disk. Cassandra
103# will spread data evenly across them, subject to the granularity of
104# the configured compaction strategy.
105# If not set, the default directory is $CASSANDRA_HOME/data/data.
106# data_file_directories:
107# - /var/lib/cassandra/data
108
109# commit log. when running on magnetic HDD, this should be a
110# separate spindle than the data directories.
111# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
112# commitlog_directory: /var/lib/cassandra/commitlog
113
114# policy for data disk failures:
115# die: shut down gossip and Thrift and kill the JVM for any fs errors or
116# single-sstable errors, so the node can be replaced.
117# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
118# stop: shut down gossip and Thrift, leaving the node effectively dead, but
119# can still be inspected via JMX.
120# best_effort: stop using the failed disk and respond to requests based on
121# remaining available sstables. This means you WILL see obsolete
122# data at CL.ONE!
123# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
124disk_failure_policy: stop
125
126# policy for commit disk failures:
127# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced.
128# stop: shut down gossip and Thrift, leaving the node effectively dead, but
129# can still be inspected via JMX.
130# stop_commit: shutdown the commit log, letting writes collect but
131# continuing to service reads, as in pre-2.0.5 Cassandra
132# ignore: ignore fatal errors and let the batches fail
133commit_failure_policy: stop
134
135# Maximum size of the key cache in memory.
136#
137# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
138# minimum, sometimes more. The key cache is fairly tiny for the amount of
139# time it saves, so it's worthwhile to use it at large numbers.
140# The row cache saves even more time, but must contain the entire row,
141# so it is extremely space-intensive. It's best to only use the
142# row cache if you have hot rows or static rows.
143#
144# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
145#
146# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
147key_cache_size_in_mb:
148
149# Duration in seconds after which Cassandra should
150# save the key cache. Caches are saved to saved_caches_directory as
151# specified in this configuration file.
152#
153# Saved caches greatly improve cold-start speeds, and is relatively cheap in
154# terms of I/O for the key cache. Row cache saving is much more expensive and
155# has limited use.
156#
157# Default is 14400 or 4 hours.
158key_cache_save_period: 14400
159
160# Number of keys from the key cache to save
161# Disabled by default, meaning all keys are going to be saved
162# key_cache_keys_to_save: 100
163
164# Maximum size of the row cache in memory.
165# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
166#
167# Default value is 0, to disable row caching.
168row_cache_size_in_mb: 0
169
170# Duration in seconds after which Cassandra should
171# save the row cache. Caches are saved to saved_caches_directory as specified
172# in this configuration file.
173#
174# Saved caches greatly improve cold-start speeds, and is relatively cheap in
175# terms of I/O for the key cache. Row cache saving is much more expensive and
176# has limited use.
177#
178# Default is 0 to disable saving the row cache.
179row_cache_save_period: 0
180
181# Number of keys from the row cache to save
182# Disabled by default, meaning all keys are going to be saved
183# row_cache_keys_to_save: 100
184
185# Maximum size of the counter cache in memory.
186#
187# Counter cache helps to reduce counter locks' contention for hot counter cells.
188# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
189# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
190# of the lock hold, helping with hot counter cell updates, but will not allow skipping
191# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
192# in memory, not the whole counter, so it's relatively cheap.
193#
194# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
195#
196# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
197# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
198counter_cache_size_in_mb:
199
200# Duration in seconds after which Cassandra should
201# save the counter cache (keys only). Caches are saved to saved_caches_directory as
202# specified in this configuration file.
203#
204# Default is 7200 or 2 hours.
205counter_cache_save_period: 7200
206
207# Number of keys from the counter cache to save
208# Disabled by default, meaning all keys are going to be saved
209# counter_cache_keys_to_save: 100
210
211# The off-heap memory allocator. Affects storage engine metadata as
212# well as caches. Experiments show that JEMAlloc saves some memory
213# than the native GCC allocator (i.e., JEMalloc is more
214# fragmentation-resistant).
215#
216# Supported values are: NativeAllocator, JEMallocAllocator
217#
218# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
219# modify cassandra-env.sh as directed in the file.
220#
221# Defaults to NativeAllocator
222# memory_allocator: NativeAllocator
223
224# saved caches
225# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
226# saved_caches_directory: /var/lib/cassandra/saved_caches
227
228# commitlog_sync may be either "periodic" or "batch."
229#
230# When in batch mode, Cassandra won't ack writes until the commit log
231# has been fsynced to disk. It will wait
232# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
233# This window should be kept short because the writer threads will
234# be unable to do extra work while waiting. (You may need to increase
235# concurrent_writes for the same reason.)
236#
237# commitlog_sync: batch
238# commitlog_sync_batch_window_in_ms: 2
239#
240# the other option is "periodic" where writes may be acked immediately
241# and the CommitLog is simply synced every commitlog_sync_period_in_ms
242# milliseconds.
243commitlog_sync: periodic
244commitlog_sync_period_in_ms: 10000
245
246# The size of the individual commitlog file segments. A commitlog
247# segment may be archived, deleted, or recycled once all the data
248# in it (potentially from each columnfamily in the system) has been
249# flushed to sstables.
250#
251# The default size is 32, which is almost always fine, but if you are
252# archiving commitlog segments (see commitlog_archiving.properties),
253# then you probably want a finer granularity of archiving; 8 or 16 MB
254# is reasonable.
255commitlog_segment_size_in_mb: 32
256
257# any class that implements the SeedProvider interface and has a
258# constructor that takes a Map<String, String> of parameters will do.
259seed_provider:
260 # Addresses of hosts that are deemed contact points.
261 # Cassandra nodes use this list of hosts to find each other and learn
262 # the topology of the ring. You must change this if you are running
263 # multiple nodes!
264 - class_name: org.apache.cassandra.locator.SimpleSeedProvider
265 parameters:
266 # seeds is actually a comma-delimited list of addresses.
267 # Ex: "<ip1>,<ip2>,<ip3>"
268 - seeds: "127.0.0.1"
269
270# For workloads with more data than can fit in memory, Cassandra's
271# bottleneck will be reads that need to fetch data from
272# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
273# order to allow the operations to enqueue low enough in the stack
274# that the OS and drives can reorder them. Same applies to
275# "concurrent_counter_writes", since counter writes read the current
276# values before incrementing and writing them back.
277#
278# On the other hand, since writes are almost never IO bound, the ideal
279# number of "concurrent_writes" is dependent on the number of cores in
280# your system; (8 * number_of_cores) is a good rule of thumb.
281concurrent_reads: 32
282concurrent_writes: 32
283concurrent_counter_writes: 32
284
285# Total memory to use for sstable-reading buffers. Defaults to
286# the smaller of 1/4 of heap or 512MB.
287# file_cache_size_in_mb: 512
288
289# Total permitted memory to use for memtables. Cassandra will stop
290# accepting writes when the limit is exceeded until a flush completes,
291# and will trigger a flush based on memtable_cleanup_threshold
292# If omitted, Cassandra will set both to 1/4 the size of the heap.
293# memtable_heap_space_in_mb: 2048
294# memtable_offheap_space_in_mb: 2048
295
296# Ratio of occupied non-flushing memtable size to total permitted size
297# that will trigger a flush of the largest memtable. Lager mct will
298# mean larger flushes and hence less compaction, but also less concurrent
299# flush activity which can make it difficult to keep your disks fed
300# under heavy write load.
301#
302# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
303# memtable_cleanup_threshold: 0.11
304
305# Specify the way Cassandra allocates and manages memtable memory.
306# Options are:
307# heap_buffers: on heap nio buffers
308# offheap_buffers: off heap (direct) nio buffers
309# offheap_objects: native memory, eliminating nio buffer heap overhead
310memtable_allocation_type: heap_buffers
311
312# Total space to use for commitlogs. Since commitlog segments are
313# mmapped, and hence use up address space, the default size is 32
314# on 32-bit JVMs, and 8192 on 64-bit JVMs.
315#
316# If space gets above this value (it will round up to the next nearest
317# segment multiple), Cassandra will flush every dirty CF in the oldest
318# segment and remove it. So a small total commitlog space will tend
319# to cause more flush activity on less-active columnfamilies.
320# commitlog_total_space_in_mb: 8192
321
322# This sets the amount of memtable flush writer threads. These will
323# be blocked by disk io, and each one will hold a memtable in memory
324# while blocked.
325#
326# memtable_flush_writers defaults to the smaller of (number of disks,
327# number of cores), with a minimum of 2 and a maximum of 8.
328#
329# If your data directories are backed by SSD, you should increase this
330# to the number of cores.
331#memtable_flush_writers: 8
332
333# A fixed memory pool size in MB for for SSTable index summaries. If left
334# empty, this will default to 5% of the heap size. If the memory usage of
335# all index summaries exceeds this limit, SSTables with low read rates will
336# shrink their index summaries in order to meet this limit. However, this
337# is a best-effort process. In extreme conditions Cassandra may need to use
338# more than this amount of memory.
339index_summary_capacity_in_mb:
340
341# How frequently index summaries should be resampled. This is done
342# periodically to redistribute memory from the fixed-size pool to sstables
343# proportional their recent read rates. Setting to -1 will disable this
344# process, leaving existing index summaries at their current sampling level.
345index_summary_resize_interval_in_minutes: 60
346
347# Whether to, when doing sequential writing, fsync() at intervals in
348# order to force the operating system to flush the dirty
349# buffers. Enable this to avoid sudden dirty buffer flushing from
350# impacting read latencies. Almost always a good idea on SSDs; not
351# necessarily on platters.
352trickle_fsync: false
353trickle_fsync_interval_in_kb: 10240
354
355# TCP port, for commands and data
356# For security reasons, you should not expose this port to the internet. Firewall it if needed.
357storage_port: 7000
358
359# SSL port, for encrypted communication. Unused unless enabled in
360# encryption_options
361# For security reasons, you should not expose this port to the internet. Firewall it if needed.
362ssl_storage_port: 7001
363
364# Address or interface to bind to and tell other Cassandra nodes to connect to.
365# You _must_ change this if you want multiple nodes to be able to communicate!
366#
367# Set listen_address OR listen_interface, not both. Interfaces must correspond
368# to a single address, IP aliasing is not supported.
369#
370# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
371# will always do the Right Thing _if_ the node is properly configured
372# (hostname, name resolution, etc), and the Right Thing is to use the
373# address associated with the hostname (it might not be).
374#
375# Setting listen_address to 0.0.0.0 is always wrong.
376#
377# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
378# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
379# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
380# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
381listen_address: localhost
382# listen_interface: eth0
383# listen_interface_prefer_ipv6: false
384
385# Address to broadcast to other Cassandra nodes
386# Leaving this blank will set it to the same value as listen_address
387# broadcast_address: 1.2.3.4
388
389# Internode authentication backend, implementing IInternodeAuthenticator;
390# used to allow/disallow connections from peer nodes.
391# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
392
393# Whether to start the native transport server.
394# Please note that the address on which the native transport is bound is the
395# same as the rpc_address. The port however is different and specified below.
396start_native_transport: true
397# port for the CQL native transport to listen for clients on
398# For security reasons, you should not expose this port to the internet. Firewall it if needed.
399native_transport_port: 9042
400# The maximum threads for handling requests when the native transport is used.
401# This is similar to rpc_max_threads though the default differs slightly (and
402# there is no native_transport_min_threads, idle threads will always be stopped
403# after 30 seconds).
404# native_transport_max_threads: 128
405#
406# The maximum size of allowed frame. Frame (requests) larger than this will
407# be rejected as invalid. The default is 256MB.
408# native_transport_max_frame_size_in_mb: 256
409
410# The maximum number of concurrent client connections.
411# The default is -1, which means unlimited.
412# native_transport_max_concurrent_connections: -1
413
414# The maximum number of concurrent client connections per source ip.
415# The default is -1, which means unlimited.
416# native_transport_max_concurrent_connections_per_ip: -1
417
418# Whether to start the thrift rpc server.
419start_rpc: true
420
421# The address or interface to bind the Thrift RPC service and native transport
422# server to.
423#
424# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
425# to a single address, IP aliasing is not supported.
426#
427# Leaving rpc_address blank has the same effect as on listen_address
428# (i.e. it will be based on the configured hostname of the node).
429#
430# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
431# set broadcast_rpc_address to a value other than 0.0.0.0.
432#
433# For security reasons, you should not expose this port to the internet. Firewall it if needed.
434#
435# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
436# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
437# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
438# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
439rpc_address: 0.0.0.0
440# rpc_interface: eth1
441# rpc_interface_prefer_ipv6: false
442
443# port for Thrift to listen for clients on
444rpc_port: 9160
445
446# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
447# be set to 0.0.0.0. If left blank, this will be set to the value of
448# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
449# be set.
450broadcast_rpc_address: 127.0.0.1
451
452
453
454
455
456
457# enable or disable keepalive on rpc/native connections
458rpc_keepalive: true
459
460# Cassandra provides two out-of-the-box options for the RPC Server:
461#
462# sync -> One thread per thrift connection. For a very large number of clients, memory
463# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
464# per thread, and that will correspond to your use of virtual memory (but physical memory
465# may be limited depending on use of stack space).
466#
467# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
468# asynchronously using a small number of threads that does not vary with the amount
469# of thrift clients (and thus scales well to many clients). The rpc requests are still
470# synchronous (one thread per active request). If hsha is selected then it is essential
471# that rpc_max_threads is changed from the default value of unlimited.
472#
473# The default is sync because on Windows hsha is about 30% slower. On Linux,
474# sync/hsha performance is about the same, with hsha of course using less memory.
475#
476# Alternatively, can provide your own RPC server by providing the fully-qualified class name
477# of an o.a.c.t.TServerFactory that can create an instance of it.
478rpc_server_type: sync
479
480# Uncomment rpc_min|max_thread to set request pool size limits.
481#
482# Regardless of your choice of RPC server (see above), the number of maximum requests in the
483# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
484# RPC server, it also dictates the number of clients that can be connected at all).
485#
486# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
487# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
488# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
489#
490# rpc_min_threads: 16
491# rpc_max_threads: 2048
492
493# uncomment to set socket buffer sizes on rpc connections
494# rpc_send_buff_size_in_bytes:
495# rpc_recv_buff_size_in_bytes:
496
497# Uncomment to set socket buffer size for internode communication
498# Note that when setting this, the buffer size is limited by net.core.wmem_max
499# and when not setting it it is defined by net.ipv4.tcp_wmem
500# See:
501# /proc/sys/net/core/wmem_max
502# /proc/sys/net/core/rmem_max
503# /proc/sys/net/ipv4/tcp_wmem
504# /proc/sys/net/ipv4/tcp_wmem
505# and: man tcp
506# internode_send_buff_size_in_bytes:
507# internode_recv_buff_size_in_bytes:
508
509# Frame size for thrift (maximum message length).
510thrift_framed_transport_size_in_mb: 15
511
512# Set to true to have Cassandra create a hard link to each sstable
513# flushed or streamed locally in a backups/ subdirectory of the
514# keyspace data. Removing these links is the operator's
515# responsibility.
516incremental_backups: false
517
518# Whether or not to take a snapshot before each compaction. Be
519# careful using this option, since Cassandra won't clean up the
520# snapshots for you. Mostly useful if you're paranoid when there
521# is a data format change.
522snapshot_before_compaction: false
523
524# Whether or not a snapshot is taken of the data before keyspace truncation
525# or dropping of column families. The STRONGLY advised default of true
526# should be used to provide data safety. If you set this flag to false, you will
527# lose data on truncation or drop.
528auto_snapshot: true
529
530# When executing a scan, within or across a partition, we need to keep the
531# tombstones seen in memory so we can return them to the coordinator, which
532# will use them to make sure other replicas also know about the deleted rows.
533# With workloads that generate a lot of tombstones, this can cause performance
534# problems and even exaust the server heap.
535# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
536# Adjust the thresholds here if you understand the dangers and want to
537# scan more tombstones anyway. These thresholds may also be adjusted at runtime
538# using the StorageService mbean.
539tombstone_warn_threshold: 1000
540tombstone_failure_threshold: 100000
541
542# Granularity of the collation index of rows within a partition.
543# Increase if your rows are large, or if you have a very large
544# number of rows per partition. The competing goals are these:
545# 1) a smaller granularity means more index entries are generated
546# and looking up rows withing the partition by collation column
547# is faster
548# 2) but, Cassandra will keep the collation index in memory for hot
549# rows (as part of the key cache), so a larger granularity means
550# you can cache more hot rows
551column_index_size_in_kb: 64
552
553
554# Log WARN on any batch size exceeding this value. 5kb per batch by default.
555# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
556batch_size_warn_threshold_in_kb: 5
557
558# Number of simultaneous compactions to allow, NOT including
559# validation "compactions" for anti-entropy repair. Simultaneous
560# compactions can help preserve read performance in a mixed read/write
561# workload, by mitigating the tendency of small sstables to accumulate
562# during a single long running compactions. The default is usually
563# fine and if you experience problems with compaction running too
564# slowly or too fast, you should look at
565# compaction_throughput_mb_per_sec first.
566#
567# concurrent_compactors defaults to the smaller of (number of disks,
568# number of cores), with a minimum of 2 and a maximum of 8.
569#
570# If your data directories are backed by SSD, you should increase this
571# to the number of cores.
572#concurrent_compactors: 1
573
574# Throttles compaction to the given total throughput across the entire
575# system. The faster you insert data, the faster you need to compact in
576# order to keep the sstable count down, but in general, setting this to
577# 16 to 32 times the rate you are inserting data is more than sufficient.
578# Setting this to 0 disables throttling. Note that this account for all types
579# of compaction, including validation compaction.
580compaction_throughput_mb_per_sec: 16
581
582# When compacting, the replacement sstable(s) can be opened before they
583# are completely written, and used in place of the prior sstables for
584# any range that has been written. This helps to smoothly transfer reads
585# between the sstables, reducing page cache churn and keeping hot rows hot
586sstable_preemptive_open_interval_in_mb: 50
587
588# Throttles all outbound streaming file transfers on this node to the
589# given total throughput in Mbps. This is necessary because Cassandra does
590# mostly sequential IO when streaming data during bootstrap or repair, which
591# can lead to saturating the network connection and degrading rpc performance.
592# When unset, the default is 200 Mbps or 25 MB/s.
593# stream_throughput_outbound_megabits_per_sec: 200
594
595# Throttles all streaming file transfer between the datacenters,
596# this setting allows users to throttle inter dc stream throughput in addition
597# to throttling all network stream traffic as configured with
598# stream_throughput_outbound_megabits_per_sec
599# inter_dc_stream_throughput_outbound_megabits_per_sec:
600
601# How long the coordinator should wait for read operations to complete
602read_request_timeout_in_ms: 5000
603# How long the coordinator should wait for seq or index scans to complete
604range_request_timeout_in_ms: 10000
605# How long the coordinator should wait for writes to complete
606write_request_timeout_in_ms: 2000
607# How long the coordinator should wait for counter writes to complete
608counter_write_request_timeout_in_ms: 5000
609# How long a coordinator should continue to retry a CAS operation
610# that contends with other proposals for the same row
611cas_contention_timeout_in_ms: 1000
612# How long the coordinator should wait for truncates to complete
613# (This can be much longer, because unless auto_snapshot is disabled
614# we need to flush first so we can snapshot before removing the data.)
615truncate_request_timeout_in_ms: 60000
616# The default timeout for other, miscellaneous operations
617request_timeout_in_ms: 10000
618
619# Enable operation timeout information exchange between nodes to accurately
620# measure request timeouts. If disabled, replicas will assume that requests
621# were forwarded to them instantly by the coordinator, which means that
622# under overload conditions we will waste that much extra time processing
623# already-timed-out requests.
624#
625# Warning: before enabling this property make sure to ntp is installed
626# and the times are synchronized between the nodes.
627cross_node_timeout: false
628
629# Enable socket timeout for streaming operation.
630# When a timeout occurs during streaming, streaming is retried from the start
631# of the current file. This _can_ involve re-streaming an important amount of
632# data, so you should avoid setting the value too low.
633# Default value is 0, which never timeout streams.
634# streaming_socket_timeout_in_ms: 0
635
636# phi value that must be reached for a host to be marked down.
637# most users should never need to adjust this.
638# phi_convict_threshold: 8
639
640# endpoint_snitch -- Set this to a class that implements
641# IEndpointSnitch. The snitch has two functions:
642# - it teaches Cassandra enough about your network topology to route
643# requests efficiently
644# - it allows Cassandra to spread replicas around your cluster to avoid
645# correlated failures. It does this by grouping machines into
646# "datacenters" and "racks." Cassandra will do its best not to have
647# more than one replica on the same "rack" (which may not actually
648# be a physical location)
649#
650# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
651# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
652# ARE PLACED.
653#
654# Out of the box, Cassandra provides
655# - SimpleSnitch:
656# Treats Strategy order as proximity. This can improve cache
657# locality when disabling read repair. Only appropriate for
658# single-datacenter deployments.
659# - GossipingPropertyFileSnitch
660# This should be your go-to snitch for production use. The rack
661# and datacenter for the local node are defined in
662# cassandra-rackdc.properties and propagated to other nodes via
663# gossip. If cassandra-topology.properties exists, it is used as a
664# fallback, allowing migration from the PropertyFileSnitch.
665# - PropertyFileSnitch:
666# Proximity is determined by rack and data center, which are
667# explicitly configured in cassandra-topology.properties.
668# - Ec2Snitch:
669# Appropriate for EC2 deployments in a single Region. Loads Region
670# and Availability Zone information from the EC2 API. The Region is
671# treated as the datacenter, and the Availability Zone as the rack.
672# Only private IPs are used, so this will not work across multiple
673# Regions.
674# - Ec2MultiRegionSnitch:
675# Uses public IPs as broadcast_address to allow cross-region
676# connectivity. (Thus, you should set seed addresses to the public
677# IP as well.) You will need to open the storage_port or
678# ssl_storage_port on the public IP firewall. (For intra-Region
679# traffic, Cassandra will switch to the private IP after
680# establishing a connection.)
681# - RackInferringSnitch:
682# Proximity is determined by rack and data center, which are
683# assumed to correspond to the 3rd and 2nd octet of each node's IP
684# address, respectively. Unless this happens to match your
685# deployment conventions, this is best used as an example of
686# writing a custom Snitch class and is provided in that spirit.
687#
688# You can use a custom Snitch by setting this to the full class name
689# of the snitch, which will be assumed to be on your classpath.
690endpoint_snitch: SimpleSnitch
691
692# controls how often to perform the more expensive part of host score
693# calculation
694dynamic_snitch_update_interval_in_ms: 100
695# controls how often to reset all host scores, allowing a bad host to
696# possibly recover
697dynamic_snitch_reset_interval_in_ms: 600000
698# if set greater than zero and read_repair_chance is < 1.0, this will allow
699# 'pinning' of replicas to hosts in order to increase cache capacity.
700# The badness threshold will control how much worse the pinned host has to be
701# before the dynamic snitch will prefer other replicas over it. This is
702# expressed as a double which represents a percentage. Thus, a value of
703# 0.2 means Cassandra would continue to prefer the static snitch values
704# until the pinned host was 20% worse than the fastest.
705dynamic_snitch_badness_threshold: 0.1
706
707# request_scheduler -- Set this to a class that implements
708# RequestScheduler, which will schedule incoming client requests
709# according to the specific policy. This is useful for multi-tenancy
710# with a single Cassandra cluster.
711# NOTE: This is specifically for requests from the client and does
712# not affect inter node communication.
713# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
714# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
715# client requests to a node with a separate queue for each
716# request_scheduler_id. The scheduler is further customized by
717# request_scheduler_options as described below.
718request_scheduler: org.apache.cassandra.scheduler.NoScheduler
719
720# Scheduler Options vary based on the type of scheduler
721# NoScheduler - Has no options
722# RoundRobin
723# - throttle_limit -- The throttle_limit is the number of in-flight
724# requests per client. Requests beyond
725# that limit are queued up until
726# running requests can complete.
727# The value of 80 here is twice the number of
728# concurrent_reads + concurrent_writes.
729# - default_weight -- default_weight is optional and allows for
730# overriding the default which is 1.
731# - weights -- Weights are optional and will default to 1 or the
732# overridden default_weight. The weight translates into how
733# many requests are handled during each turn of the
734# RoundRobin, based on the scheduler id.
735#
736# request_scheduler_options:
737# throttle_limit: 80
738# default_weight: 5
739# weights:
740# Keyspace1: 1
741# Keyspace2: 5
742
743# request_scheduler_id -- An identifier based on which to perform
744# the request scheduling. Currently the only valid option is keyspace.
745# request_scheduler_id: keyspace
746
747# Enable or disable inter-node encryption
748# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
749# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
750# suite for authentication, key exchange and encryption of the actual data transfers.
751# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
752# NOTE: No custom encryption options are enabled at the moment
753# The available internode options are : all, none, dc, rack
754#
755# If set to dc cassandra will encrypt the traffic between the DCs
756# If set to rack cassandra will encrypt the traffic between the racks
757#
758# The passwords used in these options must match the passwords used when generating
759# the keystore and truststore. For instructions on generating these files, see:
760# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
761#
762server_encryption_options:
763 internode_encryption: none
764 keystore: conf/.keystore
765 keystore_password: cassandra
766 truststore: conf/.truststore
767 truststore_password: cassandra
768 # More advanced defaults below:
769 # protocol: TLS
770 # algorithm: SunX509
771 # store_type: JKS
772 # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
773 # require_client_auth: false
774
775# enable or disable client/server encryption.
776client_encryption_options:
777 enabled: false
778 keystore: conf/.keystore
779 keystore_password: cassandra
780 # require_client_auth: false
781 # Set trustore and truststore_password if require_client_auth is true
782 # truststore: conf/.truststore
783 # truststore_password: cassandra
784 # More advanced defaults below:
785 # protocol: TLS
786 # algorithm: SunX509
787 # store_type: JKS
788 # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
789
790# internode_compression controls whether traffic between nodes is
791# compressed.
792# can be: all - all traffic is compressed
793# dc - traffic between different datacenters is compressed
794# none - nothing is compressed.
795internode_compression: all
796
797# Enable or disable tcp_nodelay for inter-dc communication.
798# Disabling it will result in larger (but fewer) network packets being sent,
799# reducing overhead from the TCP protocol itself, at the cost of increasing
800# latency if you block for cross-datacenter responses.
801inter_dc_tcp_nodelay: false