Update rook-ceph kubernetes deployment for k8s 1.15 50/8450/1
authorCian Johnston <cian.johnston@est.tech>
Tue, 23 Mar 2021 14:38:12 +0000 (14:38 +0000)
committerCian Johnston <cian.johnston@est.tech>
Tue, 20 Apr 2021 11:02:34 +0000 (11:02 +0000)
* Upgrade rook to v1.5.9 using CRDs specific to <v1.16
* Upgrade ceph to latest release
* Add taints and tolerations to separate rook-ceph workloads
  from other workloads to work around RBD+XFS hangup bug
* Use a separate volume for Ceph block storage

Change-Id: I3a634204ab63183adb137d9c07bf026b16ec2fcf
Signed-off-by: Cian Johnston <cian.johnston@est.tech>
17 files changed:
apps/ceph/kubespray/playbooks/roles/common/vars/main.yml
apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml
apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/crds.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/ping-tunnel-workaround.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2
apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml
apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml
apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2
playbooks/roles/package/defaults/main.yaml
vars/kubernetes.yaml

index bc0decb..5a35dd8 100644 (file)
@@ -22,11 +22,32 @@ rook_storage_dir_path: "/rook/storage-dir"
 
 rook_namespace: "rook-ceph"
 
-rook_use_host_network: "false"
+# Per SuSE best practices for Rook deployments, separating nodes providing storage
+# from nodes running workloads consuming said storage
+# https://documentation.suse.com/sbp/all/html/SBP-rook-ceph-kubernetes/index.html
+
+# Label to be used to separate ceph workloads from other workloads
+rook_storage_label: "storage-node"
+# These nodes will have the label {{rook-storage-label}}=true applied to them
+rook_storage_nodes: "{{ groups['storage'] }}"
+# These nodes will have the label {{rook_storage_label}}=false applied to them
+# as well as a taint {{rook_storage_label}}=false:NoSchedule
+rook_nostorage_nodes: "{{ groups['k8s-cluster'] | difference(rook_storage_nodes) }}"
+
+# Disabled for small test environment
+rook_ceph_crashcollector_disable: true
+rook_use_host_network: false
 rook_node_device_filter: "vdb"
 
 rook_block_pool_name: "block-pool"
 rook_block_pool_replicas: 1
+# The below values should be customized according to deployment requirements
+# See: https://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/
+rook_ceph_osd_pool_default_size: 1
+rook_ceph_osd_pool_default_min_size: 1
+rook_ceph_mon_warn_on_no_pool_redundancy: false
+# Increase this as required. Must be an odd number. 3 is the recommended value.
+rook_ceph_mon_count: 1
 
 rook_block_storage_name: "block-storage"
 rook_block_storage_fs: "xfs"
@@ -34,3 +55,5 @@ rook_block_storage_fs: "xfs"
 rook_filesystem: "{{ lookup('env', 'ROOK_FS') | default('false', true) }}"
 rook_filesystem_name: "rookfs"
 rook_filesystem_storageclass_name: "csi-cephfs"
+
+# vim: set ts=2 sw=2 expandtab:
index 4c4ebe9..044007c 100644 (file)
@@ -37,7 +37,7 @@
 
 - name: Delete existing rook cluster CRD if any
   k8s:
-    api_version: apiextensions.k8s.io/v1beta1
+    api_version: apiextensions.k8s.io/v1
     state: absent
     kind: CustomResourceDefinition
     name: cephclusters.ceph.rook.io
@@ -51,6 +51,7 @@
   with_items:
     - operator.yaml.j2
     - common.yaml.j2
+    - crds.yaml.j2
   loop_control:
     loop_var: config_file
   ignore_errors: true
   delay: 5
   tags: reset
 
+- name: label storage nodes  # noqa 305
+  shell: "kubectl label node {{ item }} {{ rook_storage_label }}=true"
+  changed_when: true
+  with_items: "{{ rook_storage_nodes }}"
+
+- name: taint storage nodes  # noqa 305
+  shell: "kubectl taint node {{ item }} {{ rook_storage_label }}=true:NoSchedule"
+  changed_when: true
+  with_items: "{{ rook_storage_nodes }}"
+
+- name: label nodes for other workloads  # noqa 305
+  shell: "kubectl label node {{ item }} {{ rook_storage_label }}=false"
+  changed_when: true
+  with_items: "{{ rook_nostorage_nodes }}"
+
 - name: Create rook operator
   k8s:
     state: present
     definition: "{{ lookup('template', config_file) }}"
   with_items:
+    - crds.yaml.j2
     - common.yaml.j2
     - operator.yaml.j2
   loop_control:
index 60c6665..36d29ba 100644 (file)
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
+#################################################################################################################
+# Define the settings for the rook-ceph cluster with common settings for a production cluster.
+# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
+# in this example. See the documentation for more details on storage settings available.
+
+# For example, to create the cluster:
+#   kubectl create -f crds.yaml -f common.yaml -f operator.yaml
+#   kubectl create -f cluster.yaml
+#################################################################################################################
+
 apiVersion: ceph.rook.io/v1
 kind: CephCluster
 metadata:
   name: rook-ceph
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 spec:
   cephVersion:
     # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
-    # v12 is luminous, v13 is mimic, and v14 is nautilus.
-    # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different
+    # v13 is mimic, v14 is nautilus, and v15 is octopus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    # If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v15.2.9-20201217
+    # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
     image: "{{ ceph_repository }}:{{ ceph_version }}"
-    # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported.
-    # After nautilus is released, Rook will be updated to support nautilus.
+    # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported.
+    # Future versions such as `pacific` would require this to be set to `true`.
     # Do not set to true in production.
     allowUnsupported: false
-  # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).
+  # The path on the host where configuration files will be persisted. Must be specified.
   # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
   # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
   dataDirHostPath: "{{ rook_data_dir_path }}"
@@ -41,14 +53,24 @@ spec:
   # Use at your OWN risk
   # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
   skipUpgradeChecks: false
-  # set the amount of mons to be started
+  # Whether or not continue if PGs are not clean during an upgrade
+  continueUpgradeAfterChecksEvenIfNotHealthy: false
+  # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
+  # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
+  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
+  # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
+  # The default wait timeout is 10 minutes.
+  waitTimeoutForHealthyOSDInMinutes: 10
   mon:
-    count: 3
+    # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3.
+    count: {{ rook_ceph_mon_count }}
+    # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
+    # Mons should only be allowed on the same node for test environments where data loss is acceptable.
     allowMultiplePerNode: true
   mgr:
-    modules:
+    modules: []
     # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
-    # are already enabled by other settings in the cluster CR and the "rook" module is always enabled.
+    # are already enabled by other settings in the cluster CR.
     # - name: pg_autoscaler
     #   enabled: true
   # enable the ceph dashboard for viewing cluster status
@@ -60,6 +82,7 @@ spec:
     # port: 8443
     # serve the dashboard using SSL
     ssl: true
+  # enable prometheus alerting for cluster
   monitoring:
     # requires Prometheus to be pre-installed
     enabled: false
@@ -68,32 +91,55 @@ spec:
     # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
     # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
     # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
-    rulesNamespace: {{ rook_namespace }}
+    rulesNamespace: "{{ rook_namespace }}"
   network:
-    # toggle to use hostNetwork
     hostNetwork: {{ rook_use_host_network }}
-  rbdMirroring:
-    # The number of daemons that will perform the rbd mirroring.
-    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
-    workers: 0
+  # enable the crash collector for ceph daemon crash collection
+  crashCollector:
+    disable: {{ rook_ceph_crashcollector_disable }}
+  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
+  cleanupPolicy:
+    # Since cluster cleanup is destructive to data, confirmation is required.
+    # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
+    # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
+    # Rook will immediately stop configuring the cluster and only wait for the delete command.
+    # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
+    confirmation: ""
+    # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
+    sanitizeDisks:
+      # method indicates if the entire disk should be sanitized or simply ceph's metadata
+      # in both case, re-install is possible
+      # possible choices are 'complete' or 'quick' (default)
+      method: quick
+      # dataSource indicate where to get random bytes from to write on the disk
+      # possible choices are 'zero' (default) or 'random'
+      # using random sources will consume entropy from the system and will take much more time then the zero source
+      dataSource: zero
+      # iteration overwrite N times instead of the default (1)
+      # takes an integer value
+      iteration: 1
+    # allowUninstallWithVolumes defines how the uninstall should be performed
+    # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
+    allowUninstallWithVolumes: false
   # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
   # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
   # tolerate taints with a key of 'storage-node'.
-#  placement:
-#    all:
-#      nodeAffinity:
-#        requiredDuringSchedulingIgnoredDuringExecution:
-#          nodeSelectorTerms:
-#          - matchExpressions:
-#            - key: role
-#              operator: In
-#              values:
-#              - storage-node
+  placement:
+    all:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+          - matchExpressions:
+            - key: "{{ rook_storage_label }}"
+              operator: In
+              values:
+              - "true"
 #      podAffinity:
 #      podAntiAffinity:
-#      tolerations:
-#      - key: storage-node
-#        operator: Exists
+#      topologySpreadConstraints:
+      tolerations:
+      - key: "{{ rook_storage_label }}"
+        operator: Exists
 # The above placement information can also be specified for mon, osd, and mgr components
 #    mon:
 # Monitor deployments may contain an anti-affinity rule for avoiding monitor
@@ -102,12 +148,22 @@ spec:
 # preferred rule with weight: 50.
 #    osd:
 #    mgr:
+#    cleanup:
   annotations:
 #    all:
 #    mon:
 #    osd:
+#    cleanup:
+#    prepareosd:
 # If no mgr annotations are set, prometheus scrape annotations will be set by default.
-#   mgr:
+#    mgr:
+  labels:
+#    all:
+#    mon:
+#    osd:
+#    cleanup:
+#    mgr:
+#    prepareosd:
   resources:
 # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
 #    mgr:
@@ -120,54 +176,77 @@ spec:
 # The above example requests/limits can also be added to the mon and osd components
 #    mon:
 #    osd:
+#    prepareosd:
+#    crashcollector:
+#    logcollector:
+#    cleanup:
+  # The option to automatically remove OSDs that are out and are safe to destroy.
+  removeOSDsIfOutAndSafeToRemove: false
   storage: # cluster level storage configuration and selection
     useAllNodes: true
     useAllDevices: false
+    deviceFilter: "^vdb$"
     location:
     config:
-      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
-      # Set the storeType explicitly only if it is required not to use the default.
-      # storeType: bluestore
-      databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
-      journalSizeMB: "1024"  # this value can be removed for environments with normal sized disks (20 GB or larger)
+      # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
+      # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+      databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
+      journalSizeMB: "1024"  # uncomment if the disks are 20 GB or smaller
       osdsPerDevice: "1" # this value can be overridden at the node or device level
-# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
-    directories:
-    - path: "{{ rook_storage_dir_path }}"
+      # encryptedDevice: "true" # the default value for this option is "false"
+    #directories:
+    #- path: "{{ rook_storage_dir_path }}"
 # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
 # nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
 #    nodes:
-#    - name: "172.17.4.101"
-#      directories: # specific directories to use for storage can be specified for each node
-#      - path: "/rook/storage-dir"
-#      resources:
-#        limits:
-#          cpu: "500m"
-#          memory: "1024Mi"
-#        requests:
-#          cpu: "500m"
-#          memory: "1024Mi"
 #    - name: "172.17.4.201"
 #      devices: # specific devices to use for storage can be specified for each node
 #      - name: "sdb"
 #      - name: "nvme01" # multiple osds can be created on high performance devices
 #        config:
 #          osdsPerDevice: "5"
+#      - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
 #      config: # configuration can be specified at the node level which overrides the cluster level config
-#        storeType: filestore
+#        storeType: filestore # this option is osbsolete and only provided as an example
 #    - name: "172.17.4.301"
-#      deviceFilter: ^vdb
+#      deviceFilter: "^vdb"
   # The section for configuring management of daemon disruptions during upgrade or fencing.
   disruptionManagement:
     # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
-    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph-managed-disruptionbudgets.md). The operator will
+    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
     # block eviction of OSDs by default and unblock them safely when drains are detected.
     managePodBudgets: false
     # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
     # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
     osdMaintenanceTimeout: 30
+    # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
+    # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
+    # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
+    pgHealthCheckTimeout: 0
     # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
     # Only available on OpenShift.
     manageMachineDisruptionBudgets: false
     # Namespace in which to watch for the MachineDisruptionBudgets.
     machineDisruptionBudgetNamespace: openshift-machine-api
+
+  # healthChecks
+  # Valid values for daemons are 'mon', 'osd', 'status'
+  healthCheck:
+    daemonHealth:
+      mon:
+        disabled: false
+        interval: 45s
+      osd:
+        disabled: false
+        interval: 60s
+      status:
+        disabled: false
+        interval: 60s
+    # Change pod liveness probe, it works for all mon,mgr,osd daemons
+    livenessProbe:
+      mon:
+        disabled: false
+      mgr:
+        disabled: false
+      osd:
+        disabled: false
index d8b7412..7809e9b 100644 (file)
@@ -1,3 +1,21 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2021 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
 ###################################################################################################################
 # Create the common resources that are necessary to start the operator and the ceph cluster.
 # These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
 apiVersion: v1
 kind: Namespace
 metadata:
-  name: "{{ rook_namespace }}"
-# OLM: BEGIN CEPH CRD
-# The CRD declarations
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: cephclusters.ceph.rook.io
-spec:
-  group: ceph.rook.io
-  names:
-    kind: CephCluster
-    listKind: CephClusterList
-    plural: cephclusters
-    singular: cephcluster
-  scope: Namespaced
-  version: v1
-  validation:
-    openAPIV3Schema:
-      properties:
-        spec:
-          properties:
-            annotations: {}
-            cephVersion:
-              properties:
-                allowUnsupported:
-                  type: boolean
-                image:
-                  type: string
-            dashboard:
-              properties:
-                enabled:
-                  type: boolean
-                urlPrefix:
-                  type: string
-                port:
-                  type: integer
-                  minimum: 0
-                  maximum: 65535
-                ssl:
-                  type: boolean
-            dataDirHostPath:
-              pattern: ^/(\S+)
-              type: string
-            skipUpgradeChecks:
-              type: boolean
-            mon:
-              properties:
-                allowMultiplePerNode:
-                  type: boolean
-                count:
-                  maximum: 9
-                  minimum: 0
-                  type: integer
-            mgr:
-              properties:
-                modules:
-                  items:
-                    properties:
-                      name:
-                        type: string
-                      enabled:
-                        type: boolean
-            network:
-              properties:
-                hostNetwork:
-                  type: boolean
-            storage:
-              properties:
-                disruptionManagement:
-                  properties:
-                    managePodBudgets:
-                      type: boolean
-                    osdMaintenanceTimeout:
-                      type: integer
-                    manageMachineDisruptionBudgets:
-                      type: boolean
-                useAllNodes:
-                  type: boolean
-                nodes:
-                  items:
-                    properties:
-                      name:
-                        type: string
-                      config:
-                        properties:
-                          metadataDevice:
-                            type: string
-                          storeType:
-                            type: string
-                            pattern: ^(filestore|bluestore)$
-                          databaseSizeMB:
-                            type: string
-                          walSizeMB:
-                            type: string
-                          journalSizeMB:
-                            type: string
-                          osdsPerDevice:
-                            type: string
-                          encryptedDevice:
-                            type: string
-                            pattern: ^(true|false)$
-                      useAllDevices:
-                        type: boolean
-                      deviceFilter: {}
-                      directories:
-                        type: array
-                        items:
-                          properties:
-                            path:
-                              type: string
-                      devices:
-                        type: array
-                        items:
-                          properties:
-                            name:
-                              type: string
-                            config: {}
-                      location: {}
-                      resources: {}
-                  type: array
-                useAllDevices:
-                  type: boolean
-                deviceFilter: {}
-                location: {}
-                directories:
-                  type: array
-                  items:
-                    properties:
-                      path:
-                        type: string
-                config: {}
-                topologyAware:
-                  type: boolean
-            monitoring:
-              properties:
-                enabled:
-                  type: boolean
-                rulesNamespace:
-                  type: string
-            rbdMirroring:
-              properties:
-                workers:
-                  type: integer
-            placement: {}
-            resources: {}
-  additionalPrinterColumns:
-    - name: DataDirHostPath
-      type: string
-      description: Directory used on the K8s nodes
-      JSONPath: .spec.dataDirHostPath
-    - name: MonCount
-      type: string
-      description: Number of MONs
-      JSONPath: .spec.mon.count
-    - name: Age
-      type: date
-      JSONPath: .metadata.creationTimestamp
-    - name: State
-      type: string
-      description: Current State
-      JSONPath: .status.state
-    - name: Health
-      type: string
-      description: Ceph Health
-      JSONPath: .status.ceph.health
-# OLM: END CEPH CRD
-# OLM: BEGIN CEPH FS CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: cephfilesystems.ceph.rook.io
-spec:
-  group: ceph.rook.io
-  names:
-    kind: CephFilesystem
-    listKind: CephFilesystemList
-    plural: cephfilesystems
-    singular: cephfilesystem
-  scope: Namespaced
-  version: v1
-  validation:
-    openAPIV3Schema:
-      properties:
-        spec:
-          properties:
-            metadataServer:
-              properties:
-                activeCount:
-                  minimum: 1
-                  maximum: 10
-                  type: integer
-                activeStandby:
-                  type: boolean
-                annotations: {}
-                placement: {}
-                resources: {}
-            metadataPool:
-              properties:
-                failureDomain:
-                  type: string
-                replicated:
-                  properties:
-                    size:
-                      minimum: 1
-                      maximum: 10
-                      type: integer
-                erasureCoded:
-                  properties:
-                    dataChunks:
-                      type: integer
-                    codingChunks:
-                      type: integer
-            dataPools:
-              type: array
-              items:
-                properties:
-                  failureDomain:
-                    type: string
-                  replicated:
-                    properties:
-                      size:
-                        minimum: 1
-                        maximum: 10
-                        type: integer
-                  erasureCoded:
-                    properties:
-                      dataChunks:
-                        type: integer
-                      codingChunks:
-                        type: integer
-  additionalPrinterColumns:
-    - name: ActiveMDS
-      type: string
-      description: Number of desired active MDS daemons
-      JSONPath: .spec.metadataServer.activeCount
-    - name: Age
-      type: date
-      JSONPath: .metadata.creationTimestamp
-# OLM: END CEPH FS CRD
-# OLM: BEGIN CEPH NFS CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: cephnfses.ceph.rook.io
-spec:
-  group: ceph.rook.io
-  names:
-    kind: CephNFS
-    listKind: CephNFSList
-    plural: cephnfses
-    singular: cephnfs
-    shortNames:
-    - nfs
-  scope: Namespaced
-  version: v1
-  validation:
-    openAPIV3Schema:
-      properties:
-        spec:
-          properties:
-            rados:
-              properties:
-                pool:
-                  type: string
-                namespace:
-                  type: string
-            server:
-              properties:
-                active:
-                  type: integer
-                annotations: {}
-                placement: {}
-                resources: {}
-
-# OLM: END CEPH NFS CRD
-# OLM: BEGIN CEPH OBJECT STORE CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: cephobjectstores.ceph.rook.io
-spec:
-  group: ceph.rook.io
-  names:
-    kind: CephObjectStore
-    listKind: CephObjectStoreList
-    plural: cephobjectstores
-    singular: cephobjectstore
-  scope: Namespaced
-  version: v1
-  validation:
-    openAPIV3Schema:
-      properties:
-        spec:
-          properties:
-            gateway:
-              properties:
-                type:
-                  type: string
-                sslCertificateRef: {}
-                port:
-                  type: integer
-                securePort: {}
-                instances:
-                  type: integer
-                annotations: {}
-                placement: {}
-                resources: {}
-            metadataPool:
-              properties:
-                failureDomain:
-                  type: string
-                replicated:
-                  properties:
-                    size:
-                      type: integer
-                erasureCoded:
-                  properties:
-                    dataChunks:
-                      type: integer
-                    codingChunks:
-                      type: integer
-            dataPool:
-              properties:
-                failureDomain:
-                  type: string
-                replicated:
-                  properties:
-                    size:
-                      type: integer
-                erasureCoded:
-                  properties:
-                    dataChunks:
-                      type: integer
-                    codingChunks:
-                      type: integer
-# OLM: END CEPH OBJECT STORE CRD
-# OLM: BEGIN CEPH OBJECT STORE USERS CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: cephobjectstoreusers.ceph.rook.io
-spec:
-  group: ceph.rook.io
-  names:
-    kind: CephObjectStoreUser
-    listKind: CephObjectStoreUserList
-    plural: cephobjectstoreusers
-    singular: cephobjectstoreuser
-  scope: Namespaced
-  version: v1
-# OLM: END CEPH OBJECT STORE USERS CRD
-# OLM: BEGIN CEPH BLOCK POOL CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: cephblockpools.ceph.rook.io
-spec:
-  group: ceph.rook.io
-  names:
-    kind: CephBlockPool
-    listKind: CephBlockPoolList
-    plural: cephblockpools
-    singular: cephblockpool
-  scope: Namespaced
-  version: v1
-# OLM: END CEPH BLOCK POOL CRD
-# OLM: BEGIN CEPH VOLUME POOL CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: volumes.rook.io
-spec:
-  group: rook.io
-  names:
-    kind: Volume
-    listKind: VolumeList
-    plural: volumes
-    singular: volume
-    shortNames:
-    - rv
-  scope: Namespaced
-  version: v1alpha2
-# OLM: END CEPH VOLUME POOL CRD
-# OLM: BEGIN OBJECTBUCKET CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: objectbuckets.objectbucket.io
-spec:
-  group: objectbucket.io
-  versions:
-    - name: v1alpha1
-      served: true
-      storage: true
-  names:
-    kind: ObjectBucket
-    listKind: ObjectBucketList
-    plural: objectbuckets
-    singular: objectbucket
-    shortNames:
-      - ob
-      - obs
-  scope: Cluster
-  subresources:
-    status: {}
-# OLM: END OBJECTBUCKET CRD
-# OLM: BEGIN OBJECTBUCKETCLAIM CRD
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: objectbucketclaims.objectbucket.io
-spec:
-  versions:
-    - name: v1alpha1
-      served: true
-      storage: true
-  group: objectbucket.io
-  names:
-    kind: ObjectBucketClaim
-    listKind: ObjectBucketClaimList
-    plural: objectbucketclaims
-    singular: objectbucketclaim
-    shortNames:
-      - obc
-      - obcs
-  scope: Namespaced
-  subresources:
-    status: {}
-# OLM: END OBJECTBUCKETCLAIM CRD
+  name: "{{ rook_namespace }}" # namespace:cluster
 # OLM: BEGIN OBJECTBUCKET ROLEBINDING
 ---
 kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-object-bucket
 roleRef:
@@ -465,68 +46,75 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: rook-ceph-system
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 # OLM: END OBJECTBUCKET ROLEBINDING
 # OLM: BEGIN OPERATOR ROLE
 ---
-# The cluster role for managing all the cluster-specific resources in a namespace
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-admission-controller
+  namespace: "{{ rook_namespace }}" # namespace:operator
+---
 kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
-  name: rook-ceph-cluster-mgmt
-  labels:
-    operator: rook
-    storage-backend: ceph
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
-rules: []
+  name: rook-ceph-admission-controller-role
+rules:
+  - apiGroups: ["ceph.rook.io"]
+    resources: ["*"]
+    verbs: ["get", "watch", "list"]
 ---
-apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rook-ceph-admission-controller-rolebinding
+subjects:
+  - kind: ServiceAccount
+    name: rook-ceph-admission-controller
+    apiGroup: ""
+    namespace: "{{ rook_namespace }}" # namespace:operator
+roleRef:
+  kind: ClusterRole
+  name: rook-ceph-admission-controller-role
+  apiGroup: rbac.authorization.k8s.io
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRole
 metadata:
-  name: rook-ceph-cluster-mgmt-rules
+  name: rook-ceph-cluster-mgmt
   labels:
     operator: rook
     storage-backend: ceph
-    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
 rules:
 - apiGroups:
   - ""
+  - apps
+  - extensions
   resources:
   - secrets
   - pods
   - pods/log
   - services
   - configmaps
-  verbs:
-  - get
-  - list
-  - watch
-  - patch
-  - create
-  - update
-  - delete
-- apiGroups:
-  - apps
-  resources:
   - deployments
   - daemonsets
   verbs:
   - get
   - list
   - watch
+  - patch
   - create
   - update
   - delete
 ---
 # The role for the operator to manage resources in its own namespace
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 kind: Role
 metadata:
   name: rook-ceph-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
   labels:
     operator: rook
     storage-backend: ceph
@@ -547,6 +135,7 @@ rules:
   - delete
 - apiGroups:
   - apps
+  - extensions
   resources:
   - daemonsets
   - statefulsets
@@ -560,27 +149,13 @@ rules:
   - delete
 ---
 # The cluster role for managing the Rook CRDs
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRole
 metadata:
   name: rook-ceph-global
   labels:
     operator: rook
     storage-backend: ceph
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
-rules: []
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: rook-ceph-global-rules
-  labels:
-    operator: rook
-    storage-backend: ceph
-    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
 rules:
 - apiGroups:
   - ""
@@ -590,6 +165,7 @@ rules:
   # Node access is needed for determining nodes where mons should run
   - nodes
   - nodes/proxy
+  - services
   verbs:
   - get
   - list
@@ -644,11 +220,13 @@ rules:
 - apiGroups:
   - policy
   - apps
+  - extensions
   resources:
-  #this is for the clusterdisruption controller
+  # This is for the clusterdisruption controller
   - poddisruptionbudgets
-  #this is for both clusterdisruption and nodedrain controllers
+  # This is for both clusterdisruption and nodedrain controllers
   - deployments
+  - replicasets
   verbs:
   - "*"
 - apiGroups:
@@ -673,29 +251,30 @@ rules:
   - create
   - update
   - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - csidrivers
+  verbs:
+  - create
+  - delete
+  - get
+  - update
+- apiGroups:
+  - k8s.cni.cncf.io
+  resources:
+  - network-attachment-definitions
+  verbs:
+  - get
 ---
 # Aspects of ceph-mgr that require cluster-wide access
 kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-mgr-cluster
   labels:
     operator: rook
     storage-backend: ceph
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
-rules: []
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
-  name: rook-ceph-mgr-cluster-rules
-  labels:
-    operator: rook
-    storage-backend: ceph
-    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
 rules:
 - apiGroups:
   - ""
@@ -719,13 +298,12 @@ rules:
   - watch
 ---
 kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-object-bucket
   labels:
     operator: rook
     storage-backend: ceph
-    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
 rules:
 - apiGroups:
   - ""
@@ -756,7 +334,7 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-ceph-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
   labels:
     operator: rook
     storage-backend: ceph
@@ -768,10 +346,10 @@ metadata:
 ---
 # Grant the operator, agent, and discovery agents access to resources in the namespace
 kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
   labels:
     operator: rook
     storage-backend: ceph
@@ -782,14 +360,13 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 # Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
 kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-global
-  namespace: "{{ rook_namespace }}"
   labels:
     operator: rook
     storage-backend: ceph
@@ -800,7 +377,7 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 # OLM: END OPERATOR ROLEBINDING
 #################################################################################################################
 # Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
@@ -814,7 +391,7 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 # imagePullSecrets:
 # - name: my-registry-secret
 
@@ -826,7 +403,7 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 # imagePullSecrets:
 # - name: my-registry-secret
 
@@ -837,15 +414,15 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-ceph-cmd-reporter
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 # OLM: END CMD REPORTER SERVICE ACCOUNT
 # OLM: BEGIN CLUSTER ROLE
 ---
 kind: Role
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 rules:
 - apiGroups: [""]
   resources: ["configmaps"]
@@ -855,10 +432,9 @@ rules:
   verbs: [ "get", "list", "create", "update", "delete" ]
 ---
 kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
 rules:
 - apiGroups:
   - ""
@@ -870,23 +446,9 @@ rules:
 ---
 # Aspects of ceph-mgr that require access to the system namespace
 kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-mgr-system
-  namespace: "{{ rook_namespace }}"
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
-rules: []
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
-  name: rook-ceph-mgr-system-rules
-  namespace: "{{ rook_namespace }}"
-  labels:
-      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
 rules:
 - apiGroups:
   - ""
@@ -899,20 +461,22 @@ rules:
 ---
 # Aspects of ceph-mgr that operate within the cluster's namespace
 kind: Role
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 rules:
 - apiGroups:
   - ""
   resources:
   - pods
   - services
+  - pods/log
   verbs:
   - get
   - list
   - watch
+  - delete
 - apiGroups:
   - batch
   resources:
@@ -934,10 +498,10 @@ rules:
 # OLM: BEGIN CMD REPORTER ROLE
 ---
 kind: Role
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-cmd-reporter
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 rules:
 - apiGroups:
   - ""
@@ -956,10 +520,10 @@ rules:
 ---
 # Allow the operator to create resources in this cluster's namespace
 kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-cluster-mgmt
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -967,14 +531,14 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 # Allow the osd pods in this namespace to work with configmaps
 kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: Role
@@ -982,14 +546,14 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 ---
 # Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
 kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: Role
@@ -997,14 +561,14 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 ---
 # Allow the ceph mgr to access the rook system resources necessary for the mgr modules
 kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-mgr-system
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}"  # namespace:operator
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -1012,11 +576,11 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 ---
 # Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
 kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-mgr-cluster
 roleRef:
@@ -1026,12 +590,12 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 
 ---
 # Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
 kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-osd
 roleRef:
@@ -1041,16 +605,16 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 
 # OLM: END CLUSTER ROLEBINDING
 # OLM: BEGIN CMD REPORTER ROLEBINDING
 ---
 kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rook-ceph-cmd-reporter
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: Role
@@ -1058,7 +622,7 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-cmd-reporter
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 # OLM: END CMD REPORTER ROLEBINDING
 #################################################################################################################
 # Beginning of pod security policy resources. The example will assume the cluster will be created in the
@@ -1070,7 +634,12 @@ subjects:
 apiVersion: policy/v1beta1
 kind: PodSecurityPolicy
 metadata:
-  name: rook-privileged
+  # Note: Kubernetes matches PSPs to deployments alphabetically. In some environments, this PSP may
+  # need to be renamed with a value that will match before others.
+  name: 00-rook-privileged
+  annotations:
+    seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
+    seccomp.security.alpha.kubernetes.io/defaultProfileName:  'runtime/default'
 spec:
   privileged: true
   allowedCapabilities:
@@ -1100,7 +669,6 @@ spec:
     - hostPath
     - flexVolume
   # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
-  # directory-based OSDs make this hard to nail down
   # allowedHostPaths:
   #   - pathPrefix: "/run/udev"  # for OSD prep
   #     readOnly: false
@@ -1146,7 +714,7 @@ rules:
     resources:
       - podsecuritypolicies
     resourceNames:
-      - rook-privileged
+      - 00-rook-privileged
     verbs:
       - use
 ---
@@ -1161,13 +729,13 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: rook-ceph-system
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: RoleBinding
 metadata:
   name: rook-ceph-default-psp
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -1175,13 +743,13 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: default
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: RoleBinding
 metadata:
   name: rook-ceph-osd-psp
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -1189,13 +757,13 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-osd
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: RoleBinding
 metadata:
   name: rook-ceph-mgr-psp
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -1203,13 +771,13 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-mgr
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: RoleBinding
 metadata:
   name: rook-ceph-cmd-reporter-psp
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -1217,7 +785,7 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: rook-ceph-cmd-reporter
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:cluster
 # OLM: END CLUSTER POD SECURITY POLICY BINDINGS
 # OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT
 ---
@@ -1225,21 +793,21 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-csi-cephfs-plugin-sa
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-csi-cephfs-provisioner-sa
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 # OLM: END CSI CEPHFS SERVICE ACCOUNT
 # OLM: BEGIN CSI CEPHFS ROLE
 ---
 kind: Role
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
-  namespace: "{{ rook_namespace }}"
   name: cephfs-external-provisioner-cfg
+  namespace: "{{ rook_namespace }}" # namespace:operator
 rules:
   - apiGroups: [""]
     resources: ["endpoints"]
@@ -1257,11 +825,11 @@ kind: RoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: cephfs-csi-provisioner-role-cfg
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 subjects:
   - kind: ServiceAccount
     name: rook-csi-cephfs-provisioner-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 roleRef:
   kind: Role
   name: cephfs-external-provisioner-cfg
@@ -1273,18 +841,6 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: cephfs-csi-nodeplugin
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
-rules: []
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: cephfs-csi-nodeplugin-rules
-  labels:
-    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
 rules:
   - apiGroups: [""]
     resources: ["nodes"]
@@ -1306,25 +862,13 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: cephfs-external-provisioner-runner
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
-rules: []
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: cephfs-external-provisioner-runner-rules
-  labels:
-    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
 rules:
   - apiGroups: [""]
     resources: ["secrets"]
     verbs: ["get", "list"]
   - apiGroups: [""]
     resources: ["persistentvolumes"]
-    verbs: ["get", "list", "watch", "create", "delete", "update"]
+    verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
   - apiGroups: [""]
     resources: ["persistentvolumeclaims"]
     verbs: ["get", "list", "watch", "update"]
@@ -1334,12 +878,36 @@ rules:
   - apiGroups: [""]
     resources: ["events"]
     verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents/status"]
+    verbs: ["update"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create", "list", "watch", "delete", "get", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots/status"]
+    verbs: ["update"]
   - apiGroups: ["storage.k8s.io"]
     resources: ["volumeattachments"]
-    verbs: ["get", "list", "watch", "update"]
+    verbs: ["get", "list", "watch", "update", "patch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments/status"]
+    verbs: ["patch"]
   - apiGroups: [""]
     resources: ["nodes"]
     verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims/status"]
+    verbs: ["update", "patch"]
 # OLM: END CSI CEPHFS CLUSTER ROLE
 # OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING
 ---
@@ -1354,7 +922,7 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-cephfs-plugin-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRoleBinding
@@ -1367,7 +935,7 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-cephfs-provisioner-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
@@ -1376,7 +944,7 @@ metadata:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-cephfs-plugin-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 roleRef:
   kind: ClusterRole
   name: cephfs-csi-nodeplugin
@@ -1390,7 +958,7 @@ metadata:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-cephfs-provisioner-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 roleRef:
   kind: ClusterRole
   name: cephfs-external-provisioner-runner
@@ -1402,28 +970,28 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-csi-rbd-plugin-sa
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: rook-csi-rbd-provisioner-sa
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 # OLM: END CSI RBD SERVICE ACCOUNT
 # OLM: BEGIN CSI RBD ROLE
 ---
 kind: Role
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
-  namespace: "{{ rook_namespace }}"
   name: rbd-external-provisioner-cfg
+  namespace: "{{ rook_namespace }}" # namespace:operator
 rules:
   - apiGroups: [""]
     resources: ["endpoints"]
     verbs: ["get", "watch", "list", "delete", "update", "create"]
   - apiGroups: [""]
     resources: ["configmaps"]
-    verbs: ["get", "list", "watch", "create", "delete"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
   - apiGroups: ["coordination.k8s.io"]
     resources: ["leases"]
     verbs: ["get", "watch", "list", "delete", "update", "create"]
@@ -1434,11 +1002,11 @@ kind: RoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rbd-csi-provisioner-role-cfg
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
 subjects:
   - kind: ServiceAccount
     name: rook-csi-rbd-provisioner-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 roleRef:
   kind: Role
   name: rbd-external-provisioner-cfg
@@ -1450,18 +1018,6 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rbd-csi-nodeplugin
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
-rules: []
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: rbd-csi-nodeplugin-rules
-  labels:
-    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
 rules:
   - apiGroups: [""]
     resources: ["secrets"]
@@ -1486,31 +1042,22 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: rbd-external-provisioner-runner
-aggregationRule:
-  clusterRoleSelectors:
-  - matchLabels:
-      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
-rules: []
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: rbd-external-provisioner-runner-rules
-  labels:
-    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
 rules:
   - apiGroups: [""]
     resources: ["secrets"]
-    verbs: ["get", "list"]
+    verbs: ["get", "list", "watch"]
   - apiGroups: [""]
     resources: ["persistentvolumes"]
-    verbs: ["get", "list", "watch", "create", "delete", "update"]
+    verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
   - apiGroups: [""]
     resources: ["persistentvolumeclaims"]
     verbs: ["get", "list", "watch", "update"]
   - apiGroups: ["storage.k8s.io"]
     resources: ["volumeattachments"]
-    verbs: ["get", "list", "watch", "update"]
+    verbs: ["get", "list", "watch", "update", "patch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments/status"]
+    verbs: ["patch"]
   - apiGroups: [""]
     resources: ["nodes"]
     verbs: ["get", "list", "watch"]
@@ -1529,12 +1076,21 @@ rules:
   - apiGroups: ["snapshot.storage.k8s.io"]
     resources: ["volumesnapshotclasses"]
     verbs: ["get", "list", "watch"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents/status"]
+    verbs: ["update"]
   - apiGroups: ["apiextensions.k8s.io"]
     resources: ["customresourcedefinitions"]
     verbs: ["create", "list", "watch", "delete", "get", "update"]
   - apiGroups: ["snapshot.storage.k8s.io"]
     resources: ["volumesnapshots/status"]
     verbs: ["update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims/status"]
+    verbs: ["update", "patch"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: [ "get"]
 # OLM: END CSI RBD CLUSTER ROLE
 # OLM: BEGIN CSI RBD CLUSTER ROLEBINDING
 ---
@@ -1549,7 +1105,7 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-rbd-plugin-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRoleBinding
@@ -1562,7 +1118,7 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-rbd-provisioner-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 ---
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
@@ -1571,7 +1127,7 @@ metadata:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-rbd-plugin-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 roleRef:
   kind: ClusterRole
   name: rbd-csi-nodeplugin
@@ -1584,7 +1140,7 @@ metadata:
 subjects:
   - kind: ServiceAccount
     name: rook-csi-rbd-provisioner-sa
-    namespace: "{{ rook_namespace }}"
+    namespace: "{{ rook_namespace }}" # namespace:operator
 roleRef:
   kind: ClusterRole
   name: rbd-external-provisioner-runner
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/crds.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/crds.yaml.j2
new file mode 100644 (file)
index 0000000..e9349bf
--- /dev/null
@@ -0,0 +1,792 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2021 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+#
+# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager)
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: rook-ceph
+# The CRD declarations
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            annotations: {}
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+                  minimum: 0
+                  maximum: 65535
+                ssl:
+                  type: boolean
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            disruptionManagement:
+              properties:
+                machineDisruptionBudgetNamespace:
+                  type: string
+                managePodBudgets:
+                  type: boolean
+                osdMaintenanceTimeout:
+                  type: integer
+                pgHealthCheckTimeout:
+                  type: integer
+                manageMachineDisruptionBudgets:
+                  type: boolean
+            skipUpgradeChecks:
+              type: boolean
+            continueUpgradeAfterChecksEvenIfNotHealthy:
+              type: boolean
+            waitTimeoutForHealthyOSDInMinutes:
+              type: integer
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+                volumeClaimTemplate: {}
+            mgr:
+              properties:
+                modules:
+                  items:
+                    properties:
+                      name:
+                        type: string
+                      enabled:
+                        type: boolean
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+                provider:
+                  type: string
+                selectors: {}
+            storage:
+              properties:
+                disruptionManagement:
+                  properties:
+                    machineDisruptionBudgetNamespace:
+                      type: string
+                    managePodBudgets:
+                      type: boolean
+                    osdMaintenanceTimeout:
+                      type: integer
+                    pgHealthCheckTimeout:
+                      type: integer
+                    manageMachineDisruptionBudgets:
+                      type: boolean
+                useAllNodes:
+                  type: boolean
+                nodes:
+                  items:
+                    properties:
+                      name:
+                        type: string
+                      config:
+                        properties:
+                          metadataDevice:
+                            type: string
+                          storeType:
+                            type: string
+                            pattern: ^(bluestore)$
+                          databaseSizeMB:
+                            type: string
+                          walSizeMB:
+                            type: string
+                          journalSizeMB:
+                            type: string
+                          osdsPerDevice:
+                            type: string
+                          encryptedDevice:
+                            type: string
+                            pattern: ^(true|false)$
+                      useAllDevices:
+                        type: boolean
+                      deviceFilter:
+                        type: string
+                      devicePathFilter:
+                        type: string
+                      devices:
+                        type: array
+                        items:
+                          properties:
+                            name:
+                              type: string
+                            config: {}
+                      resources: {}
+                useAllDevices:
+                  type: boolean
+                deviceFilter:
+                  type: string
+                devicePathFilter:
+                  type: string
+                config: {}
+                storageClassDeviceSets: {}
+            driveGroups:
+              type: array
+              items:
+                properties:
+                  name:
+                    type: string
+                  spec: {}
+                  placement: {}
+                required:
+                - name
+                - spec
+            monitoring:
+              properties:
+                enabled:
+                  type: boolean
+                rulesNamespace:
+                  type: string
+                externalMgrEndpoints:
+                  type: array
+                  items:
+                    properties:
+                      ip:
+                        type: string
+            removeOSDsIfOutAndSafeToRemove:
+              type: boolean
+            external:
+              properties:
+                enable:
+                  type: boolean
+            cleanupPolicy:
+              properties:
+                confirmation:
+                  type: string
+                  pattern: ^$|^yes-really-destroy-data$
+                sanitizeDisks:
+                  properties:
+                    method:
+                      type: string
+                      pattern: ^(complete|quick)$
+                    dataSource:
+                      type: string
+                      pattern: ^(zero|random)$
+                    iteration:
+                      type: integer
+                      format: int32
+            security: {}
+            logCollector: {}
+            placement: {}
+            resources: {}
+            healthCheck: {}
+  subresources:
+    status: {}
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: Phase
+      type: string
+      description: Phase
+      JSONPath: .status.phase
+    - name: Message
+      type: string
+      description: Message
+      JSONPath: .status.message
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclients.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephClient
+    listKind: CephClientList
+    plural: cephclients
+    singular: cephclient
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            caps:
+              type: object
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephrbdmirrors.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephRBDMirror
+    listKind: CephRBDMirrorList
+    plural: cephrbdmirrors
+    singular: cephrbdmirror
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            count:
+              type: integer
+              minimum: 1
+              maximum: 100
+            peers:
+              properties:
+                secretNames:
+                  type: array
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            metadataServer:
+              properties:
+                activeCount:
+                  minimum: 1
+                  maximum: 10
+                  type: integer
+                activeStandby:
+                  type: boolean
+                annotations: {}
+                placement: {}
+                resources: {}
+            metadataPool:
+              properties:
+                failureDomain:
+                  type: string
+                crushRoot:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      minimum: 0
+                      maximum: 10
+                      type: integer
+                    requireSafeReplicaSize:
+                      type: boolean
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      minimum: 0
+                      maximum: 10
+                      type: integer
+                    codingChunks:
+                      minimum: 0
+                      maximum: 10
+                      type: integer
+                compressionMode:
+                  type: string
+                  enum:
+                  - ""
+                  - none
+                  - passive
+                  - aggressive
+                  - force
+            dataPools:
+              type: array
+              items:
+                properties:
+                  failureDomain:
+                    type: string
+                  crushRoot:
+                    type: string
+                  replicated:
+                    properties:
+                      size:
+                        minimum: 0
+                        maximum: 10
+                        type: integer
+                      requireSafeReplicaSize:
+                        type: boolean
+                  erasureCoded:
+                    properties:
+                      dataChunks:
+                        minimum: 0
+                        maximum: 10
+                        type: integer
+                      codingChunks:
+                        minimum: 0
+                        maximum: 10
+                        type: integer
+                  compressionMode:
+                    type: string
+                    enum:
+                    - ""
+                    - none
+                    - passive
+                    - aggressive
+                    - force
+                  parameters:
+                    type: object
+            preservePoolsOnDelete:
+              type: boolean
+            preserveFilesystemOnDelete:
+              type: boolean
+  additionalPrinterColumns:
+    - name: ActiveMDS
+      type: string
+      description: Number of desired active MDS daemons
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            rados:
+              properties:
+                pool:
+                  type: string
+                namespace:
+                  type: string
+            server:
+              properties:
+                active:
+                  type: integer
+                annotations: {}
+                placement: {}
+                resources: {}
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            gateway:
+              properties:
+                type:
+                  type: string
+                sslCertificateRef: {}
+                port:
+                  type: integer
+                  minimum: 0
+                  maximum: 65535
+                securePort:
+                  type: integer
+                  minimum: 0
+                  maximum: 65535
+                instances:
+                  type: integer
+                externalRgwEndpoints:
+                  type: array
+                  items:
+                    properties:
+                      ip:
+                        type: string
+                annotations: {}
+                placement: {}
+                resources: {}
+            metadataPool:
+              properties:
+                failureDomain:
+                  type: string
+                crushRoot:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      type: integer
+                    requireSafeReplicaSize:
+                      type: boolean
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+                compressionMode:
+                  type: string
+                  enum:
+                  - ""
+                  - none
+                  - passive
+                  - aggressive
+                  - force
+                parameters:
+                  type: object
+            dataPool:
+              properties:
+                failureDomain:
+                  type: string
+                crushRoot:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      type: integer
+                    requireSafeReplicaSize:
+                      type: boolean
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+                compressionMode:
+                  type: string
+                  enum:
+                  - ""
+                  - none
+                  - passive
+                  - aggressive
+                  - force
+                parameters:
+                  type: object
+            preservePoolsOnDelete:
+              type: boolean
+            healthCheck:
+              properties:
+                bucket:
+                  properties:
+                    disabled:
+                      type: boolean
+                    interval:
+                      type: string
+                    timeout:
+                      type: string
+                livenessProbe:
+                  type: object
+                  properties:
+                    disabled:
+                      type: boolean
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+    shortNames:
+    - rcou
+    - objectuser
+  scope: Namespaced
+  version: v1
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectrealms.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectRealm
+    listKind: CephObjectRealmList
+    plural: cephobjectrealms
+    singular: cephobjectrealm
+  scope: Namespaced
+  version: v1
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectzonegroups.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectZoneGroup
+    listKind: CephObjectZoneGroupList
+    plural: cephobjectzonegroups
+    singular: cephobjectzonegroup
+  scope: Namespaced
+  version: v1
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectzones.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectZone
+    listKind: CephObjectZoneList
+    plural: cephobjectzones
+    singular: cephobjectzone
+  scope: Namespaced
+  version: v1
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            failureDomain:
+                type: string
+            crushRoot:
+                type: string
+            replicated:
+              properties:
+                size:
+                  type: integer
+                  minimum: 0
+                  maximum: 9
+                targetSizeRatio:
+                  type: number
+                requireSafeReplicaSize:
+                  type: boolean
+            erasureCoded:
+              properties:
+                dataChunks:
+                  type: integer
+                  minimum: 0
+                  maximum: 9
+                codingChunks:
+                  type: integer
+                  minimum: 0
+                  maximum: 9
+            compressionMode:
+              type: string
+              enum:
+              - ""
+              - none
+              - passive
+              - aggressive
+              - force
+            enableRBDStats:
+              description: EnableRBDStats is used to enable gathering of statistics
+                for all RBD images in the pool
+              type: boolean
+            parameters:
+              type: object
+            mirroring:
+              properties:
+                enabled:
+                  type: boolean
+                mode:
+                  type: string
+                  enum:
+                  - image
+                  - pool
+                snapshotSchedules:
+                  type: object
+                  properties:
+                    interval:
+                      type: string
+                    startTime:
+                      type: string
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: objectbuckets.objectbucket.io
+spec:
+  group: objectbucket.io
+  versions:
+    - name: v1alpha1
+      served: true
+      storage: true
+  names:
+    kind: ObjectBucket
+    listKind: ObjectBucketList
+    plural: objectbuckets
+    singular: objectbucket
+    shortNames:
+      - ob
+      - obs
+  scope: Cluster
+  subresources:
+    status: {}
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: objectbucketclaims.objectbucket.io
+spec:
+  versions:
+    - name: v1alpha1
+      served: true
+      storage: true
+  group: objectbucket.io
+  names:
+    kind: ObjectBucketClaim
+    listKind: ObjectBucketClaimList
+    plural: objectbucketclaims
+    singular: objectbucketclaim
+    shortNames:
+      - obc
+      - obcs
+  scope: Namespaced
+  subresources:
+    status: {}
index b2575f5..864de01 100644 (file)
@@ -23,9 +23,9 @@ parameters:
 
   # The secrets contain Ceph admin credentials. These are generated automatically by the operator
   # in the same namespace as the cluster.
-  csi.storage.k8s.io/provisioner-secret-name: rook-ceph-csi
-  csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_namespace }}
-  csi.storage.k8s.io/node-stage-secret-name: rook-ceph-csi
+  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
   csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_namespace }}
+  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_namespace }}
 
 reclaimPolicy: Delete
index 5a4345f..b460750 100644 (file)
@@ -8,10 +8,10 @@ metadata:
 spec:
   metadataPool:
     replicated:
-      size: 3
+      size: 1
   dataPools:
     - replicated:
-        size: 3
+        size: 1
   preservePoolsOnDelete: true
   metadataServer:
     activeCount: 1
index 24a5db0..6bee51e 100644 (file)
@@ -1,5 +1,5 @@
 # ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+#  Copyright (C) 2021 The Nordix Foundation. All rights reserved.
 # ================================================================================
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # The deployment for the rook operator
 # Contains the common settings for most Kubernetes deployments.
 # For example, to create the rook-ceph cluster:
-#   kubectl create -f common.yaml
-#   kubectl create -f operator.yaml
+#   kubectl create -f crds.yaml -f common.yaml -f operator.yaml
 #   kubectl create -f cluster.yaml
 #
 # Also see other operator sample files for variations of operator.yaml:
 # - operator-openshift.yaml: Common settings for running in OpenShift
-#################################################################################################################
+###############################################################################################################
+
+# Rook Ceph Operator Config ConfigMap
+# Use this ConfigMap to override Rook-Ceph Operator configurations.
+# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
+#       Operator Deployment.
+# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
+# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: rook-ceph-operator-config
+  # should be in the namespace of the operator
+  namespace: "{{ rook_namespace }}" # namespace:operator
+data:
+  # Enable the CSI driver.
+  # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
+  ROOK_CSI_ENABLE_CEPHFS: "true"
+  # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
+  ROOK_CSI_ENABLE_RBD: "true"
+  ROOK_CSI_ENABLE_GRPC_METRICS: "false"
+
+  # Set logging level for csi containers.
+  # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+  # CSI_LOG_LEVEL: "0"
+
+  # OMAP generator will generate the omap mapping between the PV name and the RBD image.
+  # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
+  # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
+  # it set it to false.
+  # CSI_ENABLE_OMAP_GENERATOR: "false"
+
+  # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
+  CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
+
+  # set to false to disable deployment of snapshotter container in RBD provisioner pod.
+  CSI_ENABLE_RBD_SNAPSHOTTER: "true"
+
+  # Enable cephfs kernel driver instead of ceph-fuse.
+  # If you disable the kernel client, your application may be disrupted during upgrade.
+  # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html
+  # NOTE! cephfs quota is not supported in kernel version < 4.17
+  CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
+
+  # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+  # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  CSI_CEPHFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+  # (Optional) Allow starting unsupported ceph-csi image
+  ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
+  # The default version of CSI supported by Rook will be started. To change the version
+  # of the CSI driver to something other than what is officially supported, change
+  # these images to the desired release of the CSI driver.
+  # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.2.0"
+  ROOK_CSI_CEPH_IMAGE: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
+  # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1"
+  ROOK_CSI_REGISTRAR_IMAGE: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
+  # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.0.0"
+  ROOK_CSI_RESIZER_IMAGE: "{{ csi_resizer_repository }}:{{ csi_resizer_version }}"
+  # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.0.0"
+  ROOK_CSI_PROVISIONER_IMAGE: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
+  # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.0"
+  ROOK_CSI_SNAPSHOTTER_IMAGE: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
+  # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.0.0"
+  ROOK_CSI_ATTACHER_IMAGE: "{{ csi_attacher_repository }}:{{ csi_attacher_version }}"
+
+  # (Optional) set user created priorityclassName for csi plugin pods.
+  # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
+
+  # (Optional) set user created priorityclassName for csi provisioner pods.
+  # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
+
+  # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+  # Default value is RollingUpdate.
+  # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+  # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+  # Default value is RollingUpdate.
+  # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+
+  # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
+  # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
+
+  # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
+  # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
+  # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
+  # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
+
+  # (Optional) Ceph Provisioner NodeAffinity.
+  CSI_PROVISIONER_NODE_AFFINITY: "{{ rook_storage_label }}=true"
+  # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
+  # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+  CSI_PROVISIONER_TOLERATIONS: |
+      - key: "{{ rook_storage_label }}"
+        operator: Exists
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+  #   - effect: NoSchedule
+  #     key: node-role.kubernetes.io/controlplane
+  #     operator: Exists
+  #   - effect: NoExecute
+  #     key: node-role.kubernetes.io/etcd
+  #     operator: Exists
+  # (Optional) Ceph CSI plugin NodeAffinity.
+  # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+  CSI_PLUGIN_NODE_AFFINITY: "{{ rook_storage_label }}=false"
+  # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
+  # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+  # CSI_PLUGIN_TOLERATIONS: |
+  #   - effect: NoSchedule
+  #     key: node-role.kubernetes.io/controlplane
+  #     operator: Exists
+  #   - effect: NoExecute
+  #     key: node-role.kubernetes.io/etcd
+  #     operator: Exists
+
+  # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
+  # requests and limits you want to apply for provisioner pod
+  # CSI_RBD_PROVISIONER_RESOURCE: |
+  #  - name : csi-provisioner
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-resizer
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-attacher
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-snapshotter
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-rbdplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
+  # requests and limits you want to apply for plugin pod
+  # CSI_RBD_PLUGIN_RESOURCE: |
+  #  - name : driver-registrar
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  #  - name : csi-rbdplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
+  # requests and limits you want to apply for provisioner pod
+  # CSI_CEPHFS_PROVISIONER_RESOURCE: |
+  #  - name : csi-provisioner
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-resizer
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-attacher
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 100m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 200m
+  #  - name : csi-cephfsplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
+  # requests and limits you want to apply for plugin pod
+  # CSI_CEPHFS_PLUGIN_RESOURCE: |
+  #  - name : driver-registrar
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+  #  - name : csi-cephfsplugin
+  #    resource:
+  #      requests:
+  #        memory: 512Mi
+  #        cpu: 250m
+  #      limits:
+  #        memory: 1Gi
+  #        cpu: 500m
+  #  - name : liveness-prometheus
+  #    resource:
+  #      requests:
+  #        memory: 128Mi
+  #        cpu: 50m
+  #      limits:
+  #        memory: 256Mi
+  #        cpu: 100m
+
+  # Configure CSI CSI Ceph FS grpc and liveness metrics port
+  # CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
+  # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
+  # Configure CSI RBD grpc and liveness metrics port
+  # CSI_RBD_GRPC_METRICS_PORT: "9090"
+  # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
+
+  # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+  ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
+
+  # (Optional) Admission controller NodeAffinity.
+  # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+  # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format.
+  # Admission controller would be best to start on the same nodes as other ceph daemons.
+  # ADMISSION_CONTROLLER_TOLERATIONS: |
+  #   - effect: NoSchedule
+  #     key: node-role.kubernetes.io/controlplane
+  #     operator: Exists
+  #   - effect: NoExecute
+  #     key: node-role.kubernetes.io/etcd
+  #     operator: Exists
+
+# Some other config values need to be set in this ConfigMap
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: rook-config-override
+  namespace: "{{ rook_namespace }}" # namespace:cluster
+data:
+  config: |
+    [global]
+    osd_pool_default_size = {{ rook_ceph_osd_pool_default_size }}
+    osd_pool_default_min_size = {{ rook_ceph_osd_pool_default_min_size }}
+    mon_warn_on_pool_no_redundancy = {{ rook_ceph_mon_warn_on_no_pool_redundancy }}
+---
 # OLM: BEGIN OPERATOR DEPLOYMENT
 apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: rook-ceph-operator
-  namespace: "{{ rook_namespace }}"
+  namespace: "{{ rook_namespace }}" # namespace:operator
   labels:
     operator: rook
     storage-backend: ceph
@@ -49,7 +352,7 @@ spec:
       serviceAccountName: rook-ceph-system
       containers:
       - name: rook-ceph-operator
-        image: "{{ rook_repository }}:{{ rook_version }}"
+        image: {{ rook_repository }}:{{ rook_version }}
         args: ["ceph", "operator"]
         volumeMounts:
         - mountPath: /var/lib/rook
@@ -72,17 +375,22 @@ spec:
         # - name: AGENT_TOLERATION_KEY
         #   value: "<KeyOfTheTaintToTolerate>"
         # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format.
-        # - name: AGENT_TOLERATIONS
-        #   value: |
+        - name: AGENT_TOLERATIONS
+          value: |
+              - key: "{{ rook_storage_label }}"
+                operator: Exists
         #     - effect: NoSchedule
         #       key: node-role.kubernetes.io/controlplane
         #       operator: Exists
         #     - effect: NoExecute
         #       key: node-role.kubernetes.io/etcd
         #       operator: Exists
+        # (Optional) Rook Agent priority class name to set on the pod(s)
+        # - name: AGENT_PRIORITY_CLASS_NAME
+        #   value: "<PriorityClassName>"
         # (Optional) Rook Agent NodeAffinity.
-        - name: AGENT_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook,ceph"
+        - name: AGENT_NODE_AFFINITY
+          value: "{{ rook_storage_label }}=true"
         # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
         # `Any` uses Ceph admin credentials by default/fallback.
         # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
@@ -107,17 +415,25 @@ spec:
         # - name: DISCOVER_TOLERATION_KEY
         #   value: "<KeyOfTheTaintToTolerate>"
         # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
-        # - name: DISCOVER_TOLERATIONS
-        #   value: |
+        - name: DISCOVER_TOLERATIONS
+          value: |
+            - key: "{{ rook_storage_label }}"
+              operator: Exists
         #     - effect: NoSchedule
         #       key: node-role.kubernetes.io/controlplane
         #       operator: Exists
         #     - effect: NoExecute
         #       key: node-role.kubernetes.io/etcd
         #       operator: Exists
+        # (Optional) Rook Discover priority class name to set on the pod(s)
+        # - name: DISCOVER_PRIORITY_CLASS_NAME
+        #   value: "<PriorityClassName>"
         # (Optional) Discover Agent NodeAffinity.
-        # - name: DISCOVER_AGENT_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook, ceph"
+        - name: DISCOVER_AGENT_NODE_AFFINITY
+          value: "{{ rook_storage_label }}=true"
+        # (Optional) Discover Agent Pod Labels.
+        # - name: DISCOVER_AGENT_POD_LABELS
+        #   value: "key1=value1,key2=value2"
         # Allow rook to create multiple file systems. Note: This is considered
         # an experimental feature in Ceph as described at
         # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
@@ -129,25 +445,12 @@ spec:
         - name: ROOK_LOG_LEVEL
           value: "INFO"
 
-        # The interval to check the health of the ceph cluster and update the status in the custom resource.
-        - name: ROOK_CEPH_STATUS_CHECK_INTERVAL
-          value: "60s"
-
-        # The interval to check if every mon is in the quorum.
-        - name: ROOK_MON_HEALTHCHECK_INTERVAL
-          value: "45s"
-
-        # The duration to wait before trying to failover or remove/replace the
-        # current mon with a new mon (useful for compensating flapping network).
-        - name: ROOK_MON_OUT_TIMEOUT
-          value: "600s"
-
         # The duration between discovering devices in the rook-discover daemonset.
         - name: ROOK_DISCOVER_DEVICES_INTERVAL
           value: "60m"
 
         # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
-        # This is necessary to workaround the anyuid issues when running on OpenShift.
+        # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
         # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
         - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
           value: "false"
@@ -167,78 +470,39 @@ spec:
         - name: ROOK_DISABLE_DEVICE_HOTPLUG
           value: "false"
 
+        # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
+        # In case of more than one regex, use comma to separate between them.
+        # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
+        # Add regex expression after putting a comma to blacklist a disk
+        # If value is empty, the default regex will be used.
+        - name: DISCOVER_DAEMON_UDEV_BLACKLIST
+          value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
+
         # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release
         # in favor of the CSI driver.
         - name: ROOK_ENABLE_FLEX_DRIVER
-          value: "true"
+          value: "false"
 
         # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
         # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
         - name: ROOK_ENABLE_DISCOVERY_DAEMON
           value: "false"
 
-        # Enable the default version of the CSI CephFS driver. To start another version of the CSI driver, see image properties below.
-        - name: ROOK_CSI_ENABLE_CEPHFS
-          value: "true"
+        # Time to wait until the node controller will move Rook pods to other
+        # nodes after detecting an unreachable node.
+        # Pods affected by this setting are:
+        # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
+        # The value used in this variable replaces the default value of 300 secs
+        # added automatically by k8s as Toleration for
+        # <node.kubernetes.io/unreachable>
+        # The total amount of time to reschedule Rook pods in healthy nodes
+        # before detecting a <not ready node> condition will be the sum of:
+        #  --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
+        #  --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
+        - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
+          value: "5"
 
-        # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
-        - name: ROOK_CSI_ENABLE_RBD
-          value: "true"
-        - name: ROOK_CSI_ENABLE_GRPC_METRICS
-          value: "true"
-        # The default version of CSI supported by Rook will be started. To change the version
-        # of the CSI driver to something other than what is officially supported, change
-        # these images to the desired release of the CSI driver.
-        #- name: ROOK_CSI_CEPH_IMAGE
-        #  value: "quay.io/cephcsi/cephcsi:v1.2.1"
-        #- name: ROOK_CSI_REGISTRAR_IMAGE
-        #  value: "quay.io/k8scsi/csi-node-driver-registrar:v1.1.0"
-        #- name: ROOK_CSI_PROVISIONER_IMAGE
-        #  value: "quay.io/k8scsi/csi-provisioner:v1.3.0"
-        #- name: ROOK_CSI_SNAPSHOTTER_IMAGE
-        #  value: "quay.io/k8scsi/csi-snapshotter:v1.2.0"
-        #- name: ROOK_CSI_ATTACHER_IMAGE
-        #  value: "quay.io/k8scsi/csi-attacher:v1.2.0"
-        # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
-        #- name: ROOK_CSI_KUBELET_DIR_PATH
-        #  value: "/var/lib/kubelet"
-        # (Optional) Ceph Provisioner NodeAffinity.
-        # - name: CSI_PROVISIONER_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook, ceph"
-        # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
-        #  CSI provisioner would be best to start on the same nodes as other ceph daemons.
-        # - name: CSI_PROVISIONER_TOLERATIONS
-        #   value: |
-        #     - effect: NoSchedule
-        #       key: node-role.kubernetes.io/controlplane
-        #       operator: Exists
-        #     - effect: NoExecute
-        #       key: node-role.kubernetes.io/etcd
-        #       operator: Exists
-        # (Optional) Ceph CSI plugin NodeAffinity.
-        # - name: CSI_PLUGIN_NODE_AFFINITY
-        #   value: "role=storage-node; storage=rook, ceph"
-        # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
-        # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
-        # - name: CSI_PLUGIN_TOLERATIONS
-        #   value: |
-        #     - effect: NoSchedule
-        #       key: node-role.kubernetes.io/controlplane
-        #       operator: Exists
-        #     - effect: NoExecute
-        #       key: node-role.kubernetes.io/etcd
-        #       operator: Exists
         # The name of the node to pass with the downward API
-        - name: ROOK_CSI_CEPH_IMAGE
-          value: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
-        - name: ROOK_CSI_REGISTRAR_IMAGE
-          value: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
-        - name: ROOK_CSI_PROVISIONER_IMAGE
-          value: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
-        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
-          value: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
-        - name: ROOK_CSI_ATTACHER_IMAGE
-          value: "{{ csi_attacherr_repository }}:{{ csi_attacher_version }}"
         - name: NODE_NAME
           valueFrom:
             fieldRef:
@@ -253,6 +517,13 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: metadata.namespace
+
+        #  Uncomment it to run lib bucket provisioner in multithreaded mode
+        #- name: LIB_BUCKET_PROVISIONER_THREADS
+        #  value: "5"
+
+      # Uncomment it to run rook operator on the host network
+      #hostNetwork: true
       volumes:
       - name: rook-config
         emptyDir: {}
index 7a3969a..5941c35 100644 (file)
@@ -19,6 +19,9 @@ spec:
       # remove it if your masters can't run pods
       - key: node-role.kubernetes.io/master
         effect: NoSchedule
+      - key: {{ rook_storage_label }}
+        effect: NoSchedule
+        operator: Exists
       nodeSelector:
         kubernetes.io/os: linux
       hostNetwork: true
index 21ada26..41e9434 100644 (file)
@@ -22,19 +22,17 @@ metadata:
   name: "{{ rook_block_storage_name }}"
   annotations:
     storageclass.kubernetes.io/is-default-class: "true"
-provisioner: ceph.rook.io/block
+provisioner: "{{ rook_namespace }}.rbd.csi.ceph.com"
 # Works for Kubernetes 1.14+
 allowVolumeExpansion: true
 parameters:
-  blockPool: "{{ rook_block_pool_name }}"
-  # Specify the namespace of the rook cluster from which to create volumes.
-  # If not specified, it will use `rook` as the default namespace of the cluster.
-  # This is also the namespace where the cluster will be
-  clusterNamespace: "{{ rook_namespace }}"
+  pool: "{{ rook_block_pool_name }}"
+  clusterID: "{{ rook_namespace }}"
   # Specify the filesystem type of the volume. If not specified, it will use `ext4`.
-  fstype: "{{ rook_block_storage_fs }}"
-  # (Optional) Specify an existing Ceph user that will be used for mounting storage with this StorageClass.
-  #mountUser: user1
-  # (Optional) Specify an existing Kubernetes secret name containing just one key holding the Ceph user secret.
-  # The secret must exist in each namespace(s) where the storage will be consumed.
-  #mountSecret: ceph-user1-secret
+  csi.storage.k8s.io/fstype: "{{ rook_block_storage_fs }}"
+  csi.storage.k8s.io/provisioner-secret-name: "rook-csi-rbd-provisioner"
+  csi.storage.k8s.io/provisioner-secret-namespace: "{{ rook_namespace }}"
+  csi.storage.k8s.io/controller-expand-secret-name: "rook-csi-rbd-provisioner"
+  csi.storage.k8s.io/controller-expand-secret-namespace: "{{ rook_namespace }}"
+  csi.storage.k8s.io/node-stage-secret-name: "rook-csi-rbd-node"
+  csi.storage.k8s.io/node-stage-secret-namespace: "{{ rook_namespace }}"
index 0dd3c0f..7e1abf8 100644 (file)
@@ -23,14 +23,21 @@ spec:
         args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
         imagePullPolicy: IfNotPresent
         env:
-          - name: ROOK_ADMIN_SECRET
+          - name: ROOK_CEPH_USERNAME
             valueFrom:
               secretKeyRef:
                 name: rook-ceph-mon
-                key: admin-secret
+                key: ceph-username
+          - name: ROOK_CEPH_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: ceph-secret
         securityContext:
           privileged: true
         volumeMounts:
+          - mountPath: /etc/ceph
+            name: ceph-config
           - mountPath: /dev
             name: dev
           - mountPath: /sys/bus
@@ -57,3 +64,10 @@ spec:
             items:
             - key: data
               path: mon-endpoints
+        - name: ceph-config
+          emptyDir: {}
+      tolerations:
+        - key: "node.kubernetes.io/unreachable"
+          operator: "Exists"
+          effect: "NoExecute"
+          tolerationSeconds: 5
index 841f878..6ca5854 100644 (file)
@@ -22,8 +22,9 @@ rook_repository: "{{ server_fqdn }}/rook/ceph"
 busybox_repository: "{{ server_fqdn }}/busybox"
 cephcsi_repository: "{{ server_fqdn }}/cephcsi/cephcsi"
 csi_node_driver_registrar_repository: "{{ server_fqdn }}/k8scsi/csi-node-driver-registrar"
+csi_resizer_repository: "{{ server_fqdn }}/k8scsi/csi-resizer"
 csi_provisioner_repository: "{{ server_fqdn }}/k8scsi/csi-provisioner"
 csi_snapshotter_repository: "{{ server_fqdn }}/k8scsi/csi-snapshotter"
-csi_attacherr_repository: "{{ server_fqdn }}/k8scsi/csi-attacher"
+csi_attacher_repository: "{{ server_fqdn }}/k8scsi/csi-attacher"
 
 # vim: set ts=2 sw=2 expandtab:
index 76caf86..ff36be5 100644 (file)
@@ -22,8 +22,9 @@ rook_repository: "rook/ceph"
 busybox_repository: "docker.io/library/busybox"
 cephcsi_repository: "quay.io/cephcsi/cephcsi"
 csi_node_driver_registrar_repository: "quay.io/k8scsi/csi-node-driver-registrar"
+csi_resizer_repository: "quay.io/k8scsi/csi-resizer"
 csi_provisioner_repository: "quay.io/k8scsi/csi-provisioner"
 csi_snapshotter_repository: "quay.io/k8scsi/csi-snapshotter"
-csi_attacherr_repository: "quay.io/k8scsi/csi-attacher"
+csi_attacher_repository: "quay.io/k8scsi/csi-attacher"
 
 # vim: set ts=2 sw=2 expandtab:
index 3844479..48c5318 100644 (file)
@@ -21,6 +21,7 @@
   action: >
     {{ ansible_pkg_mgr }} name={{ item }} state=present update_cache=yes
   with_items:
+    - "lvm2"
     - "xfsprogs"
     - "gdisk"
 
index 58a96cb..ad4b181 100644 (file)
@@ -570,7 +570,9 @@ nodeExporter:
   ## Node tolerations for node-exporter scheduling to nodes with taints
   ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
   ##
-  tolerations: []
+  tolerations:
+    - effect: NoExecute
+      operator: Exists
     # - key: "key"
     #   operator: "Equal|Exists"
     #   value: "value"
index dbc1041..def9636 100644 (file)
@@ -81,6 +81,9 @@ other_images:
   csi-provisioner:
     repo: quay.io/k8scsi/csi-provisioner
     tag: "{{ csi_provisioner_version }}"
+  csi-resizer:
+    repo: quay.io/k8scsi/csi-resizer
+    tag: "{{ csi_resizer_version }}"
   csi-snapshotter:
     repo: quay.io/k8scsi/csi-snapshotter
     tag: "{{ csi_snapshotter_version }}"
index a5d1a2f..fe4a200 100644 (file)
@@ -68,14 +68,15 @@ kubectl_version: "{{ kubernetes_version }}"
 # -------------------------------------------------------------------------------
 # Kubernetes: Versions of rook, ceph and their dependencies
 # -------------------------------------------------------------------------------
-rook_version: "v1.1.2"
+rook_version: "v1.5.9"
 busybox_version: "1.32.0"
-ceph_version: "v14.2.4-20190917"
-cephcsi_version: "v1.2.1"
-csi_node_driver_registrar_version: "v1.1.0"
-csi_attacher_version: "v1.2.0"
-csi_provisioner_version: "v1.3.0"
-csi_snapshotter_version: "v1.2.0"
+ceph_version: "v14.2.18-20210316"
+cephcsi_version: "v3.2.0"
+csi_node_driver_registrar_version: "v2.0.1"
+csi_resizer_version: "v1.0.0"
+csi_attacher_version: "v3.0.0"
+csi_provisioner_version: "v2.0.0"
+csi_snapshotter_version: "v3.0.0"
 
 # -------------------------------------------------------------------------------
 # Kubernetes: Versions of prometheus and its dependencies