+###############################################################################################################
+
+# Rook Ceph Operator Config ConfigMap
+# Use this ConfigMap to override Rook-Ceph Operator configurations.
+# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
+# Operator Deployment.
+# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
+# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: rook-ceph-operator-config
+ # should be in the namespace of the operator
+ namespace: "{{ rook_namespace }}" # namespace:operator
+data:
+ # Enable the CSI driver.
+ # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
+ ROOK_CSI_ENABLE_CEPHFS: "true"
+ # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
+ ROOK_CSI_ENABLE_RBD: "true"
+ ROOK_CSI_ENABLE_GRPC_METRICS: "false"
+
+ # Set logging level for csi containers.
+ # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+ # CSI_LOG_LEVEL: "0"
+
+ # OMAP generator will generate the omap mapping between the PV name and the RBD image.
+ # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
+ # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
+ # it set it to false.
+ # CSI_ENABLE_OMAP_GENERATOR: "false"
+
+ # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
+ CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
+
+ # set to false to disable deployment of snapshotter container in RBD provisioner pod.
+ CSI_ENABLE_RBD_SNAPSHOTTER: "true"
+
+ # Enable cephfs kernel driver instead of ceph-fuse.
+ # If you disable the kernel client, your application may be disrupted during upgrade.
+ # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html
+ # NOTE! cephfs quota is not supported in kernel version < 4.17
+ CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
+
+ # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+ # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ CSI_CEPHFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+ # (Optional) Allow starting unsupported ceph-csi image
+ ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
+ # The default version of CSI supported by Rook will be started. To change the version
+ # of the CSI driver to something other than what is officially supported, change
+ # these images to the desired release of the CSI driver.
+ # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.2.0"
+ ROOK_CSI_CEPH_IMAGE: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
+ # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1"
+ ROOK_CSI_REGISTRAR_IMAGE: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
+ # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.0.0"
+ ROOK_CSI_RESIZER_IMAGE: "{{ csi_resizer_repository }}:{{ csi_resizer_version }}"
+ # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.0.0"
+ ROOK_CSI_PROVISIONER_IMAGE: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
+ # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.0"
+ ROOK_CSI_SNAPSHOTTER_IMAGE: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
+ # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.0.0"
+ ROOK_CSI_ATTACHER_IMAGE: "{{ csi_attacher_repository }}:{{ csi_attacher_version }}"
+
+ # (Optional) set user created priorityclassName for csi plugin pods.
+ # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
+
+ # (Optional) set user created priorityclassName for csi provisioner pods.
+ # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
+
+ # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ # Default value is RollingUpdate.
+ # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+ # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ # Default value is RollingUpdate.
+ # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+
+ # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
+ # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
+
+ # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
+ # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
+ # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
+ # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
+
+ # (Optional) Ceph Provisioner NodeAffinity.
+ CSI_PROVISIONER_NODE_AFFINITY: "{{ rook_storage_label }}=true"
+ # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
+ # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+ CSI_PROVISIONER_TOLERATIONS: |
+ - key: "{{ rook_storage_label }}"
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # - effect: NoSchedule
+ # key: node-role.kubernetes.io/controlplane
+ # operator: Exists
+ # - effect: NoExecute
+ # key: node-role.kubernetes.io/etcd
+ # operator: Exists
+ # (Optional) Ceph CSI plugin NodeAffinity.
+ # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+ CSI_PLUGIN_NODE_AFFINITY: "{{ rook_storage_label }}=false"
+ # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
+ # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+ # CSI_PLUGIN_TOLERATIONS: |
+ # - effect: NoSchedule
+ # key: node-role.kubernetes.io/controlplane
+ # operator: Exists
+ # - effect: NoExecute
+ # key: node-role.kubernetes.io/etcd
+ # operator: Exists
+
+ # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
+ # requests and limits you want to apply for provisioner pod
+ # CSI_RBD_PROVISIONER_RESOURCE: |
+ # - name : csi-provisioner
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-resizer
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-attacher
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-snapshotter
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-rbdplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
+ # requests and limits you want to apply for plugin pod
+ # CSI_RBD_PLUGIN_RESOURCE: |
+ # - name : driver-registrar
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # - name : csi-rbdplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
+ # requests and limits you want to apply for provisioner pod
+ # CSI_CEPHFS_PROVISIONER_RESOURCE: |
+ # - name : csi-provisioner
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-resizer
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-attacher
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-cephfsplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
+ # requests and limits you want to apply for plugin pod
+ # CSI_CEPHFS_PLUGIN_RESOURCE: |
+ # - name : driver-registrar
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # - name : csi-cephfsplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+
+ # Configure CSI CSI Ceph FS grpc and liveness metrics port
+ # CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
+ # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
+ # Configure CSI RBD grpc and liveness metrics port
+ # CSI_RBD_GRPC_METRICS_PORT: "9090"
+ # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
+
+ # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+ ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
+
+ # (Optional) Admission controller NodeAffinity.
+ # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+ # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format.
+ # Admission controller would be best to start on the same nodes as other ceph daemons.
+ # ADMISSION_CONTROLLER_TOLERATIONS: |
+ # - effect: NoSchedule
+ # key: node-role.kubernetes.io/controlplane
+ # operator: Exists
+ # - effect: NoExecute
+ # key: node-role.kubernetes.io/etcd
+ # operator: Exists
+
+# Some other config values need to be set in this ConfigMap
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: rook-config-override
+ namespace: "{{ rook_namespace }}" # namespace:cluster
+data:
+ config: |
+ [global]
+ osd_pool_default_size = {{ rook_ceph_osd_pool_default_size }}
+ osd_pool_default_min_size = {{ rook_ceph_osd_pool_default_min_size }}
+ mon_warn_on_pool_no_redundancy = {{ rook_ceph_mon_warn_on_no_pool_redundancy }}
+---