| apiVersion: v1 |
| kind: Namespace |
| metadata: |
| name: "{{ rook_cluster_namespace }}" |
| --- |
| apiVersion: v1 |
| kind: ServiceAccount |
| metadata: |
| name: rook-ceph-osd |
| namespace: "{{ rook_cluster_namespace }}" |
| --- |
| apiVersion: v1 |
| kind: ServiceAccount |
| metadata: |
| name: rook-ceph-mgr |
| namespace: "{{ rook_cluster_namespace }}" |
| --- |
| kind: Role |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-osd |
| namespace: "{{ rook_cluster_namespace }}" |
| rules: |
| - apiGroups: [""] |
| resources: ["configmaps"] |
| verbs: [ "get", "list", "watch", "create", "update", "delete" ] |
| --- |
| # Aspects of ceph-mgr that require access to the system namespace |
| kind: Role |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-mgr-system |
| namespace: "{{ rook_cluster_namespace }}" |
| rules: |
| - apiGroups: |
| - "" |
| resources: |
| - configmaps |
| verbs: |
| - get |
| - list |
| - watch |
| --- |
| # Aspects of ceph-mgr that operate within the cluster's namespace |
| kind: Role |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-mgr |
| namespace: "{{ rook_cluster_namespace }}" |
| rules: |
| - apiGroups: |
| - "" |
| resources: |
| - pods |
| - services |
| verbs: |
| - get |
| - list |
| - watch |
| - apiGroups: |
| - batch |
| resources: |
| - jobs |
| verbs: |
| - get |
| - list |
| - watch |
| - create |
| - update |
| - delete |
| - apiGroups: |
| - ceph.rook.io |
| resources: |
| - "*" |
| verbs: |
| - "*" |
| --- |
| # Allow the operator to create resources in this cluster's namespace |
| kind: RoleBinding |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-cluster-mgmt |
| namespace: "{{ rook_cluster_namespace }}" |
| roleRef: |
| apiGroup: rbac.authorization.k8s.io |
| kind: ClusterRole |
| name: rook-ceph-cluster-mgmt |
| subjects: |
| - kind: ServiceAccount |
| name: rook-ceph-system |
| namespace: "{{ rook_operator_namespace }}" |
| --- |
| # Allow the osd pods in this namespace to work with configmaps |
| kind: RoleBinding |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-osd |
| namespace: "{{ rook_cluster_namespace }}" |
| roleRef: |
| apiGroup: rbac.authorization.k8s.io |
| kind: Role |
| name: rook-ceph-osd |
| subjects: |
| - kind: ServiceAccount |
| name: rook-ceph-osd |
| namespace: "{{ rook_cluster_namespace }}" |
| --- |
| # Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules |
| kind: RoleBinding |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-mgr |
| namespace: "{{ rook_cluster_namespace }}" |
| roleRef: |
| apiGroup: rbac.authorization.k8s.io |
| kind: Role |
| name: rook-ceph-mgr |
| subjects: |
| - kind: ServiceAccount |
| name: rook-ceph-mgr |
| namespace: "{{ rook_cluster_namespace }}" |
| --- |
| # Allow the ceph mgr to access the rook system resources necessary for the mgr modules |
| kind: RoleBinding |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-mgr-system |
| namespace: "{{ rook_operator_namespace }}" |
| roleRef: |
| apiGroup: rbac.authorization.k8s.io |
| kind: Role |
| name: rook-ceph-mgr-system |
| subjects: |
| - kind: ServiceAccount |
| name: rook-ceph-mgr |
| namespace: "{{ rook_cluster_namespace }}" |
| --- |
| # Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules |
| kind: RoleBinding |
| apiVersion: rbac.authorization.k8s.io/v1beta1 |
| metadata: |
| name: rook-ceph-mgr-cluster |
| namespace: "{{ rook_cluster_namespace }}" |
| roleRef: |
| apiGroup: rbac.authorization.k8s.io |
| kind: ClusterRole |
| name: rook-ceph-mgr-cluster |
| subjects: |
| - kind: ServiceAccount |
| name: rook-ceph-mgr |
| namespace: "{{ rook_cluster_namespace }}" |
| --- |
| apiVersion: ceph.rook.io/v1 |
| kind: CephCluster |
| metadata: |
| name: rook-ceph |
| namespace: "{{ rook_cluster_namespace }}" |
| spec: |
| cephVersion: |
| # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). |
| # v12 is luminous, v13 is mimic, and v14 is nautilus. |
| # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different |
| # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. |
| image: "ceph/ceph:{{ ceph_version}}" |
| # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported. |
| # After nautilus is released, Rook will be updated to support nautilus. |
| # Do not set to true in production. |
| allowUnsupported: false |
| # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). |
| # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. |
| # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. |
| dataDirHostPath: "{{ rook_data_dir_path }}" |
| # set the amount of mons to be started |
| mon: |
| count: 3 |
| allowMultiplePerNode: true |
| # enable the ceph dashboard for viewing cluster status |
| dashboard: |
| enabled: true |
| # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) |
| # urlPrefix: /ceph-dashboard |
| # serve the dashboard at the given port. |
| # port: 8443 |
| # serve the dashboard using SSL |
| # ssl: true |
| network: |
| # toggle to use hostNetwork |
| hostNetwork: {{ rook_use_host_network }} |
| rbdMirroring: |
| # The number of daemons that will perform the rbd mirroring. |
| # rbd mirroring must be configured with "rbd mirror" from the rook toolbox. |
| workers: 0 |
| # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. |
| # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and |
| # tolerate taints with a key of 'storage-node'. |
| # placement: |
| # all: |
| # nodeAffinity: |
| # requiredDuringSchedulingIgnoredDuringExecution: |
| # nodeSelectorTerms: |
| # - matchExpressions: |
| # - key: role |
| # operator: In |
| # values: |
| # - storage-node |
| # podAffinity: |
| # podAntiAffinity: |
| # tolerations: |
| # - key: storage-node |
| # operator: Exists |
| # The above placement information can also be specified for mon, osd, and mgr components |
| # mon: |
| # osd: |
| # mgr: |
| resources: |
| # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory |
| # mgr: |
| # limits: |
| # cpu: "500m" |
| # memory: "1024Mi" |
| # requests: |
| # cpu: "500m" |
| # memory: "1024Mi" |
| # The above example requests/limits can also be added to the mon and osd components |
| # mon: |
| # osd: |
| storage: # cluster level storage configuration and selection |
| useAllNodes: false |
| useAllDevices: false |
| nodes: |
| {% for nodename, hostname in idf[installer_type].hostnames.iteritems() %} |
| {% set node = nodes | selectattr('name', 'equalto', nodename) | first %} |
| - name: "{{ hostname }}" |
| {% if node.node.type == 'baremetal' %} |
| directories: |
| - path: "{{ rook_storage_dir_path }}" |
| {% else %} |
| deviceFilter: "^{{ rook_node_device_filter }}" |
| {% endif %} |
| {% endfor %} |
| location: |
| config: |
| # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories. |
| # Set the storeType explicitly only if it is required not to use the default. |
| # storeType: bluestore |
| databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) |
| journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) |
| osdsPerDevice: "1" # this value can be overridden at the node or device level |
| # Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set. |
| # directories: |
| # - path: /rook/storage-dir |
| # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named |
| # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. |
| # nodes: |
| # - name: "172.17.4.101" |
| # directories: # specific directories to use for storage can be specified for each node |
| # - path: "/rook/storage-dir" |
| # resources: |
| # limits: |
| # cpu: "500m" |
| # memory: "1024Mi" |
| # requests: |
| # cpu: "500m" |
| # memory: "1024Mi" |
| # - name: "172.17.4.201" |
| # devices: # specific devices to use for storage can be specified for each node |
| # - name: "sdb" |
| # - name: "nvme01" # multiple osds can be created on high performance devices |
| # config: |
| # osdsPerDevice: "5" |
| # config: # configuration can be specified at the node level which overrides the cluster level config |
| # storeType: filestore |
| # - name: "172.17.4.301" |
| # deviceFilter: ^vdb |