ceph: configuration fixes 93/8493/3
authorCian Johnston <cian.johnston@est.tech>
Fri, 23 Apr 2021 09:42:06 +0000 (09:42 +0000)
committerCian Johnston <cian.johnston@est.tech>
Fri, 23 Apr 2021 11:15:07 +0000 (11:15 +0000)
* Wire through deviceFilter properly
* Expose rook configuration knobs via env vars

Change-Id: Idfdb2d229f542ea73835f41b1ea574a3f72b8023
Signed-off-by: Cian Johnston <cian.johnston@est.tech>
apps/ceph/kubespray/playbooks/roles/common/vars/main.yml
apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2

index 5a35dd8148ca55b562c2e0e80b7d5c646c075719..81ae14ad41f25a521233d0394a4f9d00fc2b2cb3 100644 (file)
@@ -20,7 +20,7 @@
 rook_data_dir_path: "/var/lib/rook"
 rook_storage_dir_path: "/rook/storage-dir"
 
-rook_namespace: "rook-ceph"
+rook_namespace: "{{ lookup('env', 'ROOK_NAMESPACE') | default('rook-ceph', true) }}"
 
 # Per SuSE best practices for Rook deployments, separating nodes providing storage
 # from nodes running workloads consuming said storage
@@ -35,25 +35,25 @@ rook_storage_nodes: "{{ groups['storage'] }}"
 rook_nostorage_nodes: "{{ groups['k8s-cluster'] | difference(rook_storage_nodes) }}"
 
 # Disabled for small test environment
-rook_ceph_crashcollector_disable: true
-rook_use_host_network: false
-rook_node_device_filter: "vdb"
+rook_ceph_crashcollector_disable: "{{ lookup('env', 'ROOK_CEPH_CRASHCOLLECTOR_DISABLE') | default(true, true) }}"
+rook_use_host_network: "{{ lookup('env', 'ROOK_USE_HOST_NETWORK') | default(false, true) }}"
+rook_node_device_filter: "{{ lookup('env', 'ROOK_NODE_DEVICE_FILTER') | default('^vdb$', true) }}"
 
-rook_block_pool_name: "block-pool"
-rook_block_pool_replicas: 1
+rook_block_pool_name: "{{ lookup('env', 'ROOK_BLOCK_POOL_NAME') | default('block-pool', true) }}"
+rook_block_pool_replicas: "{{ lookup('env', 'ROOK_BLOCK_POOL_REPLICAS') | default(1, true) | int }}"
 # The below values should be customized according to deployment requirements
 # See: https://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/
-rook_ceph_osd_pool_default_size: 1
-rook_ceph_osd_pool_default_min_size: 1
-rook_ceph_mon_warn_on_no_pool_redundancy: false
+rook_ceph_osd_pool_default_size: "{{ lookup('env', 'ROOK_CEPH_OSD_POOL_DEFAULT_SIZE') | default(1, true) | int }}"
+rook_ceph_osd_pool_default_min_size: "{{ lookup('env', 'ROOK_CEPH_OSD_POOL_DEFAULT_MIN_SIZE') | default(1, true) | int }}"
+rook_ceph_mon_warn_on_no_pool_redundancy: "{{ lookup('env', 'ROOK_CEPH_MON_WARN_ON_NO_POOL_REDUNDANCY') | default(false, true) }}"
 # Increase this as required. Must be an odd number. 3 is the recommended value.
-rook_ceph_mon_count: 1
+rook_ceph_mon_count: "{{ lookup('env', 'ROOK_CEPH_MON_COUNT') | default(1, true) | int }}"
 
-rook_block_storage_name: "block-storage"
-rook_block_storage_fs: "xfs"
+rook_block_storage_name: "{{ lookup('env', 'ROOK_BLOCK_STORAGE_NAME') | default('block-storage', true) }}"
+rook_block_storage_fs: "{{ lookup('env', 'ROOK_BLOCK_STORAGE_FS') | default('xfs', true) }}"
 
 rook_filesystem: "{{ lookup('env', 'ROOK_FS') | default('false', true) }}"
-rook_filesystem_name: "rookfs"
-rook_filesystem_storageclass_name: "csi-cephfs"
+rook_filesystem_name: "{{ lookup('env', 'ROOK_FILESYSTEM_NAME') | default('rookfs', true) }}"
+rook_filesystem_storageclass_name: "{{ lookup('env', 'ROOK_FILESYSTEM_STORAGECLASS_NAME') | default('csi-cephfs, true) }}"
 
 # vim: set ts=2 sw=2 expandtab:
index 36d29ba00d43cce6424d81a44cee727899d1a14c..c49d1c3c0588c921eba32e7ceaac02c67cf7d3d6 100644 (file)
@@ -185,7 +185,7 @@ spec:
   storage: # cluster level storage configuration and selection
     useAllNodes: true
     useAllDevices: false
-    deviceFilter: "^vdb$"
+    deviceFilter: "{{ rook_node_device_filter }}"
     location:
     config:
       # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map