1 # ============LICENSE_START=======================================================
2 # Copyright (C) 2019 The Nordix Foundation. All rights reserved.
3 # ================================================================================
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 # SPDX-License-Identifier: Apache-2.0
17 # ============LICENSE_END=========================================================
19 apiVersion: ceph.rook.io/v1
23 namespace: "{{ rook_namespace }}"
26 # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
27 # v12 is luminous, v13 is mimic, and v14 is nautilus.
28 # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different
29 # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
30 image: "{{ ceph_repository }}:{{ ceph_version }}"
31 # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported.
32 # After nautilus is released, Rook will be updated to support nautilus.
33 # Do not set to true in production.
34 allowUnsupported: false
35 # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).
36 # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
37 # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
38 dataDirHostPath: "{{ rook_data_dir_path }}"
39 # Whether or not upgrade should continue even if a check fails
40 # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
41 # Use at your OWN risk
42 # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
43 skipUpgradeChecks: false
44 # set the amount of mons to be started
47 allowMultiplePerNode: true
50 # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
51 # are already enabled by other settings in the cluster CR and the "rook" module is always enabled.
52 # - name: pg_autoscaler
54 # enable the ceph dashboard for viewing cluster status
57 # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
58 # urlPrefix: /ceph-dashboard
59 # serve the dashboard at the given port.
61 # serve the dashboard using SSL
64 # requires Prometheus to be pre-installed
66 # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
68 # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
69 # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
70 # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
71 rulesNamespace: {{ rook_namespace }}
73 # toggle to use hostNetwork
74 hostNetwork: {{ rook_use_host_network }}
76 # The number of daemons that will perform the rbd mirroring.
77 # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
79 # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
80 # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
81 # tolerate taints with a key of 'storage-node'.
85 # requiredDuringSchedulingIgnoredDuringExecution:
97 # The above placement information can also be specified for mon, osd, and mgr components
99 # Monitor deployments may contain an anti-affinity rule for avoiding monitor
100 # collocation on the same node. This is a required rule when host network is used
101 # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
102 # preferred rule with weight: 50.
109 # If no mgr annotations are set, prometheus scrape annotations will be set by default.
112 # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
120 # The above example requests/limits can also be added to the mon and osd components
123 storage: # cluster level storage configuration and selection
128 # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
129 # Set the storeType explicitly only if it is required not to use the default.
130 # storeType: bluestore
131 databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
132 journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger)
133 osdsPerDevice: "1" # this value can be overridden at the node or device level
134 # Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
136 - path: "{{ rook_storage_dir_path }}"
137 # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
138 # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
140 # - name: "172.17.4.101"
141 # directories: # specific directories to use for storage can be specified for each node
142 # - path: "/rook/storage-dir"
150 # - name: "172.17.4.201"
151 # devices: # specific devices to use for storage can be specified for each node
153 # - name: "nvme01" # multiple osds can be created on high performance devices
156 # config: # configuration can be specified at the node level which overrides the cluster level config
157 # storeType: filestore
158 # - name: "172.17.4.301"
160 # The section for configuring management of daemon disruptions during upgrade or fencing.
161 disruptionManagement:
162 # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
163 # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph-managed-disruptionbudgets.md). The operator will
164 # block eviction of OSDs by default and unblock them safely when drains are detected.
165 managePodBudgets: false
166 # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
167 # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
168 osdMaintenanceTimeout: 30
169 # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
170 # Only available on OpenShift.
171 manageMachineDisruptionBudgets: false
172 # Namespace in which to watch for the MachineDisruptionBudgets.
173 machineDisruptionBudgetNamespace: openshift-machine-api