Update rook-ceph kubernetes deployment
[infra/stack/kubernetes.git] / apps / ceph / kubespray / playbooks / roles / install / templates / operator.yaml.j2
index 24a5db0d55b9b3b1055776ec4f00c43e856fe6b6..6bee51e8199191c0fa6682d8e48670c676fa25bc 100644 (file)
@@ -1,5 +1,5 @@
 # ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+#  Copyright (C) 2021 The Nordix Foundation. All rights reserved.
 # ================================================================================
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # The deployment for the rook operator
 # Contains the common settings for most Kubernetes deployments.
 # For example, to create the rook-ceph cluster:
-#   kubectl create -f common.yaml
-#   kubectl create -f operator.yaml
+#   kubectl create -f crds.yaml -f common.yaml -f operator.yaml
 #   kubectl create -f cluster.yaml
 #
 # Also see other operator sample files for variations of operator.yaml:
 # - operator-openshift.yaml: Common settings for running in OpenShift
-#################################################################################################################
+###############################################################################################################
+
+# Rook Ceph Operator Config ConfigMap
+# Use this ConfigMap to override Rook-Ceph Operator configurations.
+# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
+#       Operator Deployment.
+# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
+# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: rook-ceph-operator-config
+  # should be in the namespace of the operator
+  namespace: "{{ rook_namespace }}" # namespace:operator
+data:
+  # Enable the CSI driver.
+  # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
+  ROOK_CSI_ENABLE_CEPHFS: "true"
+  # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
+  ROOK_CSI_ENABLE_RBD: "true"
+  ROOK_CSI_ENABLE_GRPC_METRICS: "false"
+
+  # Set logging level for csi containers.
+  # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+  # CSI_LOG_LEVEL: "0"
+
+  # OMAP generator will generate the omap mapping between the PV name and the RBD image.
+  # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
+  # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
+  # it set it to false.
+  # CSI_ENABLE_OMAP_GENERATOR: "false"
+
+  # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
+  CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
+
+  # set to false to disable deployment of snapshotter container in RBD provisioner pod.
+  CSI_ENABLE_RBD_SNAPSHOTTER: "true"
+
+  # Enable cephfs kernel driver instead of ceph-fuse.
+  # If you disable the kernel client, your application may be disrupted during upgrade.
+  # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html
+  # NOTE! cephfs quota is not supported in kernel version < 4.17
+  CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
+
+  # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+  # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  CSI_CEPHFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+  # (Optional) Allow starting unsupported ceph-csi image
+  ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
+  # The default version of CSI supported by Rook will be started. To change the version
+  # of the CSI driver to something other than what is officially supported, change
+  # these images to the desired release of the CSI driver.
+  # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.2.0"
+  ROOK_CSI_CEPH_IMAGE: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
+  # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1"
+  ROOK_CSI_REGISTRAR_IMAGE: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
+  # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.0.0"
+  ROOK_CSI_RESIZER_IMAGE: "{{ csi_resizer_repository }}:{{ csi_resizer_version }}"
+  # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.0.0"
+  ROOK_CSI_PROVISIONER_IMAGE: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
+  # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.0"
+  ROOK_CSI_SNAPSHOTTER_IMAGE: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
+  # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.0.0"
+  ROOK_CSI_ATTACHER_IMAGE: "{{ csi_attacher_repository }}:{{ csi_attacher_version }}"
+
+  # (Optional) set user created priorityclassName for csi plugin pods.
+  # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
+
+  # (Optional) set user created priorityclassName for csi provisioner pods.
+  # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
+
+  # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+  # Default value is RollingUpdate.
+  # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+  # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+  # Default value is RollingUpdate.
+  # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+
+  # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
+  # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
+
+  # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
+  # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
+  # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
+  # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
+
+  # (Optional) Ceph Provisioner NodeAffinity.
+  CSI_PROVISIONER_NODE_AFFINITY: "{{ rook_storage_label }}=true"
+  # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
+  # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+  CSI_PROVISIONER_TOLERATIONS: |
+      - key: "{{ rook_storage_label }}"
+        operator: Exists
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+  #   - effect: NoSchedule
+  #     key: node-role.kubernetes.io/controlplane
+  #     operator: Exists
+  #   - effect: NoExecute
+  #     key: node-role.kubernetes.io/etcd
+  #     operator: Exists
+  # (Optional) Ceph CSI plugin NodeAffinity.
+  # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+  CSI_PLUGIN_NODE_AFFINITY: "{{ rook_storage_label }}=false"
+  # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
+  # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+  # CSI_PLUGIN_TOLERATIONS: |
+  #   - effect: NoSchedule
+  #     key: node-role.kubernetes.io/controlplane
+  #     operator: Exists
+  #   - effect: NoExecute
+  #     key: node-role.kubernetes.io/etcd
+  #     operator: Exists
+
+  # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
+  # requests and limits you want to apply for provisioner pod
+  # CSI_RBD_PROVISIONER_RESOURCE: |
+  #  - name : csi-provisioner
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-resizer
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-attacher
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-snapshotter
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-rbdplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
+  # requests and limits you want to apply for plugin pod
+  # CSI_RBD_PLUGIN_RESOURCE: |
+  #  - name : driver-registrar
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  #  - name : csi-rbdplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
+  # requests and limits you want to apply for provisioner pod
+  # CSI_CEPHFS_PROVISIONER_RESOURCE: |
+  #  - name : csi-provisioner
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-resizer
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-attacher
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-cephfsplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
+  # requests and limits you want to apply for plugin pod
+  # CSI_CEPHFS_PLUGIN_RESOURCE: |
+  #  - name : driver-registrar
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  #  - name : csi-cephfsplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+
+  # Configure CSI CSI Ceph FS grpc and liveness metrics port
+  # CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
+  # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
+  # Configure CSI RBD grpc and liveness metrics port
+  # CSI_RBD_GRPC_METRICS_PORT: "9090"
+  # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
+
+  # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+  ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
+
+  # (Optional) Admission controller NodeAffinity.
+  # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+  # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format.
+  # Admission controller would be best to start on the same nodes as other ceph daemons.
+  # ADMISSION_CONTROLLER_TOLERATIONS: |
+  #   - effect: NoSchedule
+  #     key: node-role.kubernetes.io/controlplane
+  #     operator: Exists
+  #   - effect: NoExecute
+  #     key: node-role.kubernetes.io/etcd
+  #     operator: Exists
+
+# Some other config values need to be set in this ConfigMap
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: rook-config-override
+  namespace: "{{ rook_namespace }}" # namespace:cluster
+data:
+  config: |
+    [global]
+    osd_pool_default_size = {{ rook_ceph_osd_pool_default_size }}
+    osd_pool_default_min_size = {{ rook_ceph_osd_pool_default_min_size }}
+    mon_warn_on_pool_no_redundancy = {{ rook_ceph_mon_warn_on_no_pool_redundancy }}
+---
 # OLM: BEGIN OPERATOR DEPLOYMENT
 apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: rook-ceph-operator
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
   labels:
     operator: rook
     storage-backend: ceph
@@ -49,7 +352,7 @@ spec:
       serviceAccountName: rook-ceph-system
       containers:
       - name: rook-ceph-operator
-        image: "{{ rook_repository }}:{{ rook_version }}"
+        image: {{ rook_repository }}:{{ rook_version }}
         args: ["ceph", "operator"]
         volumeMounts:
         - mountPath: /var/lib/rook
@@ -72,17 +375,22 @@ spec:
         # - name: AGENT_TOLERATION_KEY
         #   value: "<KeyOfTheTaintToTolerate>"
         # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format.
-        # - name: AGENT_TOLERATIONS
-        #   value: |
+        - name: AGENT_TOLERATIONS
+          value: |
+              - key: "{{ rook_storage_label }}"
+                operator: Exists
         #     - effect: NoSchedule
         #       key: node-role.kubernetes.io/controlplane
         #       operator: Exists
         #     - effect: NoExecute
         #       key: node-role.kubernetes.io/etcd
         #       operator: Exists
+        # (Optional) Rook Agent priority class name to set on the pod(s)
+        # - name: AGENT_PRIORITY_CLASS_NAME
+        #   value: "<PriorityClassName>"
         # (Optional) Rook Agent NodeAffinity.
-        - name: AGENT_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook,ceph"
+        - name: AGENT_NODE_AFFINITY
+          value: "{{ rook_storage_label }}=true"
         # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
         # `Any` uses Ceph admin credentials by default/fallback.
         # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
@@ -107,17 +415,25 @@ spec:
         # - name: DISCOVER_TOLERATION_KEY
         #   value: "<KeyOfTheTaintToTolerate>"
         # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
-        # - name: DISCOVER_TOLERATIONS
-        #   value: |
+        - name: DISCOVER_TOLERATIONS
+          value: |
+            - key: "{{ rook_storage_label }}"
+              operator: Exists
         #     - effect: NoSchedule
         #       key: node-role.kubernetes.io/controlplane
         #       operator: Exists
         #     - effect: NoExecute
         #       key: node-role.kubernetes.io/etcd
         #       operator: Exists
+        # (Optional) Rook Discover priority class name to set on the pod(s)
+        # - name: DISCOVER_PRIORITY_CLASS_NAME
+        #   value: "<PriorityClassName>"
         # (Optional) Discover Agent NodeAffinity.
-        # - name: DISCOVER_AGENT_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook, ceph"
+        - name: DISCOVER_AGENT_NODE_AFFINITY
+          value: "{{ rook_storage_label }}=true"
+        # (Optional) Discover Agent Pod Labels.
+        # - name: DISCOVER_AGENT_POD_LABELS
+        #   value: "key1=value1,key2=value2"
         # Allow rook to create multiple file systems. Note: This is considered
         # an experimental feature in Ceph as described at
         # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
@@ -129,25 +445,12 @@ spec:
         - name: ROOK_LOG_LEVEL
           value: "INFO"
 
-        # The interval to check the health of the ceph cluster and update the status in the custom resource.
-        - name: ROOK_CEPH_STATUS_CHECK_INTERVAL
-          value: "60s"
-
-        # The interval to check if every mon is in the quorum.
-        - name: ROOK_MON_HEALTHCHECK_INTERVAL
-          value: "45s"
-
-        # The duration to wait before trying to failover or remove/replace the
-        # current mon with a new mon (useful for compensating flapping network).
-        - name: ROOK_MON_OUT_TIMEOUT
-          value: "600s"
-
         # The duration between discovering devices in the rook-discover daemonset.
         - name: ROOK_DISCOVER_DEVICES_INTERVAL
           value: "60m"
 
         # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
-        # This is necessary to workaround the anyuid issues when running on OpenShift.
+        # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
         # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
         - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
           value: "false"
@@ -167,78 +470,39 @@ spec:
         - name: ROOK_DISABLE_DEVICE_HOTPLUG
           value: "false"
 
+        # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
+        # In case of more than one regex, use comma to separate between them.
+        # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
+        # Add regex expression after putting a comma to blacklist a disk
+        # If value is empty, the default regex will be used.
+        - name: DISCOVER_DAEMON_UDEV_BLACKLIST
+          value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
+
         # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release
         # in favor of the CSI driver.
         - name: ROOK_ENABLE_FLEX_DRIVER
-          value: "true"
+          value: "false"
 
         # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
         # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
         - name: ROOK_ENABLE_DISCOVERY_DAEMON
           value: "false"
 
-        # Enable the default version of the CSI CephFS driver. To start another version of the CSI driver, see image properties below.
-        - name: ROOK_CSI_ENABLE_CEPHFS
-          value: "true"
+        # Time to wait until the node controller will move Rook pods to other
+        # nodes after detecting an unreachable node.
+        # Pods affected by this setting are:
+        # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
+        # The value used in this variable replaces the default value of 300 secs
+        # added automatically by k8s as Toleration for
+        # <node.kubernetes.io/unreachable>
+        # The total amount of time to reschedule Rook pods in healthy nodes
+        # before detecting a <not ready node> condition will be the sum of:
+        #  --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
+        #  --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
+        - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
+          value: "5"
 
-        # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
-        - name: ROOK_CSI_ENABLE_RBD
-          value: "true"
-        - name: ROOK_CSI_ENABLE_GRPC_METRICS
-          value: "true"
-        # The default version of CSI supported by Rook will be started. To change the version
-        # of the CSI driver to something other than what is officially supported, change
-        # these images to the desired release of the CSI driver.
-        #- name: ROOK_CSI_CEPH_IMAGE
-        #  value: "quay.io/cephcsi/cephcsi:v1.2.1"
-        #- name: ROOK_CSI_REGISTRAR_IMAGE
-        #  value: "quay.io/k8scsi/csi-node-driver-registrar:v1.1.0"
-        #- name: ROOK_CSI_PROVISIONER_IMAGE
-        #  value: "quay.io/k8scsi/csi-provisioner:v1.3.0"
-        #- name: ROOK_CSI_SNAPSHOTTER_IMAGE
-        #  value: "quay.io/k8scsi/csi-snapshotter:v1.2.0"
-        #- name: ROOK_CSI_ATTACHER_IMAGE
-        #  value: "quay.io/k8scsi/csi-attacher:v1.2.0"
-        # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
-        #- name: ROOK_CSI_KUBELET_DIR_PATH
-        #  value: "/var/lib/kubelet"
-        # (Optional) Ceph Provisioner NodeAffinity.
-        # - name: CSI_PROVISIONER_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook, ceph"
-        # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
-        #  CSI provisioner would be best to start on the same nodes as other ceph daemons.
-        # - name: CSI_PROVISIONER_TOLERATIONS
-        #   value: |
-        #     - effect: NoSchedule
-        #       key: node-role.kubernetes.io/controlplane
-        #       operator: Exists
-        #     - effect: NoExecute
-        #       key: node-role.kubernetes.io/etcd
-        #       operator: Exists
-        # (Optional) Ceph CSI plugin NodeAffinity.
-        # - name: CSI_PLUGIN_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook, ceph"
-        # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
-        # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
-        # - name: CSI_PLUGIN_TOLERATIONS
-        #   value: |
-        #     - effect: NoSchedule
-        #       key: node-role.kubernetes.io/controlplane
-        #       operator: Exists
-        #     - effect: NoExecute
-        #       key: node-role.kubernetes.io/etcd
-        #       operator: Exists
         # The name of the node to pass with the downward API
-        - name: ROOK_CSI_CEPH_IMAGE
-          value: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
-        - name: ROOK_CSI_REGISTRAR_IMAGE
-          value: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
-        - name: ROOK_CSI_PROVISIONER_IMAGE
-          value: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
-        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
-          value: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
-        - name: ROOK_CSI_ATTACHER_IMAGE
-          value: "{{ csi_attacherr_repository }}:{{ csi_attacher_version }}"
         - name: NODE_NAME
           valueFrom:
             fieldRef:
@@ -253,6 +517,13 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: metadata.namespace
+
+        #  Uncomment it to run lib bucket provisioner in multithreaded mode
+        #- name: LIB_BUCKET_PROVISIONER_THREADS
+        #  value: "5"
+
+      # Uncomment it to run rook operator on the host network
+      #hostNetwork: true
       volumes:
       - name: rook-config
         emptyDir: {}