Update rook-ceph kubernetes deployment for k8s 1.15
[infra/stack/kubernetes.git] / apps / ceph / kubespray / playbooks / roles / common / vars / main.yml
index bc0decb67bb2c05d93279a65f9e83ac8cdc13cbe..5a35dd8148ca55b562c2e0e80b7d5c646c075719 100644 (file)
@@ -22,11 +22,32 @@ rook_storage_dir_path: "/rook/storage-dir"
 
 rook_namespace: "rook-ceph"
 
-rook_use_host_network: "false"
+# Per SuSE best practices for Rook deployments, separating nodes providing storage
+# from nodes running workloads consuming said storage
+# https://documentation.suse.com/sbp/all/html/SBP-rook-ceph-kubernetes/index.html
+
+# Label to be used to separate ceph workloads from other workloads
+rook_storage_label: "storage-node"
+# These nodes will have the label {{rook-storage-label}}=true applied to them
+rook_storage_nodes: "{{ groups['storage'] }}"
+# These nodes will have the label {{rook_storage_label}}=false applied to them
+# as well as a taint {{rook_storage_label}}=false:NoSchedule
+rook_nostorage_nodes: "{{ groups['k8s-cluster'] | difference(rook_storage_nodes) }}"
+
+# Disabled for small test environment
+rook_ceph_crashcollector_disable: true
+rook_use_host_network: false
 rook_node_device_filter: "vdb"
 
 rook_block_pool_name: "block-pool"
 rook_block_pool_replicas: 1
+# The below values should be customized according to deployment requirements
+# See: https://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/
+rook_ceph_osd_pool_default_size: 1
+rook_ceph_osd_pool_default_min_size: 1
+rook_ceph_mon_warn_on_no_pool_redundancy: false
+# Increase this as required. Must be an odd number. 3 is the recommended value.
+rook_ceph_mon_count: 1
 
 rook_block_storage_name: "block-storage"
 rook_block_storage_fs: "xfs"
@@ -34,3 +55,5 @@ rook_block_storage_fs: "xfs"
 rook_filesystem: "{{ lookup('env', 'ROOK_FS') | default('false', true) }}"
 rook_filesystem_name: "rookfs"
 rook_filesystem_storageclass_name: "csi-cephfs"
+
+# vim: set ts=2 sw=2 expandtab: