X-Git-Url: https://gerrit.nordix.org/gitweb?p=infra%2Fstack%2Fkubernetes.git;a=blobdiff_plain;f=apps%2Fceph%2Fkubespray%2Fplaybooks%2Froles%2Finstall%2Ftemplates%2Fcluster.yaml.j2;fp=apps%2Fceph%2Fkubespray%2Fplaybooks%2Froles%2Finstall%2Ftemplates%2Fcluster.yaml.j2;h=60c6665b88e11aa9c0d56c58c2e6d39c9d3e7703;hp=0000000000000000000000000000000000000000;hb=a1e1f40e71a48d8c5315f37999b9123c7ea908ab;hpb=20d34e772e021fabdee0aa9b50e9804a80d5108a diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2 new file mode 100644 index 0000000..60c6665 --- /dev/null +++ b/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2 @@ -0,0 +1,173 @@ +# ============LICENSE_START======================================================= +# Copyright (C) 2019 The Nordix Foundation. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# ============LICENSE_END========================================================= + +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: "{{ rook_namespace }}" +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v12 is luminous, v13 is mimic, and v14 is nautilus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + image: "{{ ceph_repository }}:{{ ceph_version }}" + # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported. + # After nautilus is released, Rook will be updated to support nautilus. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: "{{ rook_data_dir_path }}" + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # set the amount of mons to be started + mon: + count: 3 + allowMultiplePerNode: true + mgr: + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR and the "rook" module is always enabled. + # - name: pg_autoscaler + # enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: {{ rook_namespace }} + network: + # toggle to use hostNetwork + hostNetwork: {{ rook_use_host_network }} + rbdMirroring: + # The number of daemons that will perform the rbd mirroring. + # rbd mirroring must be configured with "rbd mirror" from the rook toolbox. + workers: 0 + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# Monitor deployments may contain an anti-affinity rule for avoiding monitor +# collocation on the same node. This is a required rule when host network is used +# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a +# preferred rule with weight: 50. +# osd: +# mgr: + annotations: +# all: +# mon: +# osd: +# If no mgr annotations are set, prometheus scrape annotations will be set by default. +# mgr: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + location: + config: + # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories. + # Set the storeType explicitly only if it is required not to use the default. + # storeType: bluestore + databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) + journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) + osdsPerDevice: "1" # this value can be overridden at the node or device level +# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set. + directories: + - path: "{{ rook_storage_dir_path }}" +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.101" +# directories: # specific directories to use for storage can be specified for each node +# - path: "/rook/storage-dir" +# resources: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: ^vdb + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: false + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api