Move pre, postinstall, scenario, and apps to stack 58/5158/4
authorFatih Degirmenci <fatih.degirmenci@est.tech>
Wed, 24 Jun 2020 13:22:44 +0000 (13:22 +0000)
committerFatih Degirmenci <fatih.degirmenci@est.tech>
Wed, 24 Jun 2020 17:18:49 +0000 (17:18 +0000)
Preinstall, postinstall, scenario, and apps are specific
to stack composition and more appropriate to locate them
within the stacks themselves rather than the installers.
This makes it possible for different stacks to configure
target deployment according to their needs and use the
installer without thinking about potential effects the
changes could cause on other stacks that may be using the
same installer.

An example to this is Kubernetes and ONAP stacks and the
CEPH configuration. Both stacks use Kubespray but the
configuration of Kubernetes differs between them. By moving
the pre/post/scenario/app configuration into stacks
themselves, they become independent from each other as
much as possible.

Please note that once this change goes in, the dependent
change needs to be verified and submitted in order to
conclude the transition.

https://gerrit.nordix.org/c/infra/installer/kubespray/+/5157

Change-Id: I24d7e9546034b385565708f528ed9caddf03ae26

55 files changed:
apps/ceph/kubespray/playbooks/install.yml [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/common/vars/main.yml [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/external-dashboard-https.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/pool.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2 [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/prepare/files/clean-ceph-osd.sh [new file with mode: 0644]
apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml [new file with mode: 0644]
apps/istio/kubespray/playbooks/install.yml [new file with mode: 0644]
apps/istio/kubespray/playbooks/roles/install/tasks/main.yml [new file with mode: 0644]
apps/istio/kubespray/playbooks/roles/install/vars/main.yml [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/install.yml [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/tasks/main.yaml [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/templates/index.yaml.j2 [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/templates/prometheus_service.yaml.j2 [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2 [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/vars/main.yaml [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/vars/offline-deployment.yaml [new file with mode: 0644]
apps/prometheus/kubespray/playbooks/roles/install/vars/online-deployment.yaml [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/install.yml [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/roles/install/files/log-spinnaker-status.sh [new file with mode: 0755]
apps/spinnaker/kubespray/playbooks/roles/install/tasks/main.yml [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/roles/install/templates/index.yaml.j2 [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/roles/install/templates/values.yaml.j2 [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/roles/install/vars/main.yml [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/roles/install/vars/offline-deployment.yaml [new file with mode: 0644]
apps/spinnaker/kubespray/playbooks/roles/install/vars/online-deployment.yaml [new file with mode: 0644]
install.sh
package.sh
playbooks/postinstall.yaml [new file with mode: 0644]
playbooks/preinstall.yaml [new file with mode: 0644]
playbooks/roles/postinstall/tasks/configure-jumphost.yml [new file with mode: 0644]
playbooks/roles/postinstall/tasks/configure-localhost.yml [new file with mode: 0644]
playbooks/roles/postinstall/tasks/main.yml [new file with mode: 0644]
playbooks/roles/postinstall/vars/main.yaml [new file with mode: 0644]
playbooks/roles/postinstall/vars/offline-deployment.yaml [new file with mode: 0644]
playbooks/roles/postinstall/vars/online-deployment.yaml [new file with mode: 0644]
scenarios/k8-calico-istio.yaml [new file with mode: 0644]
scenarios/k8-calico-nofeature.yaml [new file with mode: 0644]
scenarios/k8-calico-spinnaker.yaml [new file with mode: 0644]
scenarios/k8-canal-nofeature.yaml [new file with mode: 0644]
scenarios/k8-cilium-nofeature.yaml [new file with mode: 0644]
scenarios/k8-flannel-nofeature.yaml [new file with mode: 0644]
scenarios/k8-multus-nofeature.yaml [new file with mode: 0644]
scenarios/k8-multus-plugins.yaml [new file with mode: 0644]
scenarios/k8-weave-nofeature.yaml [new file with mode: 0644]
tox.ini

diff --git a/apps/ceph/kubespray/playbooks/install.yml b/apps/ceph/kubespray/playbooks/install.yml
new file mode 100644 (file)
index 0000000..171f1af
--- /dev/null
@@ -0,0 +1,36 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- hosts: baremetal
+  gather_facts: true
+  become: true
+
+  roles:
+    - role: common
+    - role: prepare
+
+- hosts: jumphost
+  gather_facts: true
+  become: false
+
+  roles:
+    - role: common
+    - role: install
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/ceph/kubespray/playbooks/roles/common/vars/main.yml b/apps/ceph/kubespray/playbooks/roles/common/vars/main.yml
new file mode 100644 (file)
index 0000000..933075a
--- /dev/null
@@ -0,0 +1,35 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+rook_data_dir_path: "/var/lib/rook"
+rook_storage_dir_path: "/rook/storage-dir"
+
+rook_namespace: "rook-ceph"
+
+rook_use_host_network: "false"
+rook_node_device_filter: "vdb"
+
+rook_block_pool_name: "block-pool"
+rook_block_pool_replicas: 1
+
+rook_block_storage_name: "block-storage"
+rook_block_storage_fs: "xfs"
+
+rook_filesystem_name: "rookfs"
+rook_filesystem_storageclass_name: "csi-cephfs"
diff --git a/apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml b/apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml
new file mode 100644 (file)
index 0000000..b977018
--- /dev/null
@@ -0,0 +1,164 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- name: Delete existing rook cluster if any
+  k8s:
+    definition: "{{ lookup('template', config_file) }}"
+    state: absent
+  with_items:
+    - external-dashboard-https.yaml.j2
+    - pool.yaml.j2
+    - storageclass.yaml.j2
+    - toolbox.yaml.j2
+    - cluster.yaml.j2
+  loop_control:
+    loop_var: config_file
+  ignore_errors: true
+  tags: reset
+
+- name: Delete existing rook cluster CRD if any
+  k8s:
+    api_version: apiextensions.k8s.io/v1beta1
+    state: absent
+    kind: CustomResourceDefinition
+    name: cephclusters.ceph.rook.io
+  ignore_errors: true
+  tags: reset
+
+- name: Delete existing rook operator if any
+  k8s:
+    definition: "{{ lookup('template', config_file) }}"
+    state: absent
+  with_items:
+    - operator.yaml.j2
+    - common.yaml.j2
+  loop_control:
+    loop_var: config_file
+  ignore_errors: true
+  tags: reset
+
+- name: Wait until rook namespace is deleted
+  k8s_facts:
+    kind: Namespace
+    name: "{{ rook_namespace }}"
+  register: result
+  until: not result.resources
+  retries: 10
+  delay: 5
+  tags: reset
+
+- name: Create rook operator
+  k8s:
+    state: present
+    definition: "{{ lookup('template', config_file) }}"
+  with_items:
+    - common.yaml.j2
+    - operator.yaml.j2
+  loop_control:
+    loop_var: config_file
+
+- name: Wait until OPERATOR pod is available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ rook_namespace }}"
+    label_selectors:
+      - app = rook-ceph-operator
+    field_selectors:
+      - status.phase=Running
+  register: rook_mgr_status
+  until:
+    - rook_mgr_status.resources is defined
+    - rook_mgr_status.resources
+  retries: 20
+  delay: 5
+
+- name: Create rook cluster
+  k8s:
+    state: present
+    definition: "{{ lookup('template', config_file) }}"
+  with_items:
+    - cluster.yaml.j2
+    - toolbox.yaml.j2
+  loop_control:
+    loop_var: config_file
+
+- name: Wait until rook cluster deployment is complete
+  k8s_facts:
+    kind: CephCluster
+    name: rook-ceph
+    namespace: "{{ rook_namespace }}"
+    field_selectors:
+      - status.state = "Created"
+  register: rook_cluster_status
+  until:
+    - rook_cluster_status.resources
+  retries: 10
+  delay: 5
+
+- name: Wait until MGR pods are available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ rook_namespace }}"
+    label_selectors:
+      - app = rook-ceph-mgr
+    field_selectors:
+      - status.phase=Running
+  register: rook_mgr_status
+  until:
+    - rook_mgr_status.resources is defined
+    - rook_mgr_status.resources
+  retries: 30
+  delay: 10
+
+- name: Wait until OSD pods are available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ rook_namespace }}"
+    label_selectors:
+      - app = rook-ceph-osd
+    field_selectors:
+      - status.phase=Running
+  register: rook_osd_status
+  until:
+    - rook_osd_status.resources is defined
+    - rook_osd_status.resources
+  retries: 30
+  delay: 10
+
+- name: Create rook block storage
+  k8s:
+    state: present
+    definition: "{{ lookup('template', config_file) }}"
+  with_items:
+    - pool.yaml.j2
+    - storageclass.yaml.j2
+    - filesystem.yaml.j2
+    - filesystem-storageclass.yaml.j2
+  loop_control:
+    loop_var: config_file
+
+- name: Create rook external dashboard
+  k8s:
+    state: present
+    definition: "{{ lookup('template', 'external-dashboard-https.yaml.j2') }}"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2
new file mode 100644 (file)
index 0000000..60c6665
--- /dev/null
@@ -0,0 +1,173 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: "{{ rook_namespace }}"
+spec:
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v12 is luminous, v13 is mimic, and v14 is nautilus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    image: "{{ ceph_repository }}:{{ ceph_version }}"
+    # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported.
+    # After nautilus is released, Rook will be updated to support nautilus.
+    # Do not set to true in production.
+    allowUnsupported: false
+  # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: "{{ rook_data_dir_path }}"
+  # Whether or not upgrade should continue even if a check fails
+  # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
+  # Use at your OWN risk
+  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
+  skipUpgradeChecks: false
+  # set the amount of mons to be started
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  mgr:
+    modules:
+    # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
+    # are already enabled by other settings in the cluster CR and the "rook" module is always enabled.
+    # - name: pg_autoscaler
+    #   enabled: true
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    # urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    # port: 8443
+    # serve the dashboard using SSL
+    ssl: true
+  monitoring:
+    # requires Prometheus to be pre-installed
+    enabled: false
+    # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
+    # Recommended:
+    # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
+    # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
+    # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+    rulesNamespace: {{ rook_namespace }}
+  network:
+    # toggle to use hostNetwork
+    hostNetwork: {{ rook_use_host_network }}
+  rbdMirroring:
+    # The number of daemons that will perform the rbd mirroring.
+    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
+    workers: 0
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+#  placement:
+#    all:
+#      nodeAffinity:
+#        requiredDuringSchedulingIgnoredDuringExecution:
+#          nodeSelectorTerms:
+#          - matchExpressions:
+#            - key: role
+#              operator: In
+#              values:
+#              - storage-node
+#      podAffinity:
+#      podAntiAffinity:
+#      tolerations:
+#      - key: storage-node
+#        operator: Exists
+# The above placement information can also be specified for mon, osd, and mgr components
+#    mon:
+# Monitor deployments may contain an anti-affinity rule for avoiding monitor
+# collocation on the same node. This is a required rule when host network is used
+# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+# preferred rule with weight: 50.
+#    osd:
+#    mgr:
+  annotations:
+#    all:
+#    mon:
+#    osd:
+# If no mgr annotations are set, prometheus scrape annotations will be set by default.
+#   mgr:
+  resources:
+# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
+#    mgr:
+#      limits:
+#        cpu: "500m"
+#        memory: "1024Mi"
+#      requests:
+#        cpu: "500m"
+#        memory: "1024Mi"
+# The above example requests/limits can also be added to the mon and osd components
+#    mon:
+#    osd:
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: false
+    location:
+    config:
+      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
+      # Set the storeType explicitly only if it is required not to use the default.
+      # storeType: bluestore
+      databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
+      journalSizeMB: "1024"  # this value can be removed for environments with normal sized disks (20 GB or larger)
+      osdsPerDevice: "1" # this value can be overridden at the node or device level
+# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
+    directories:
+    - path: "{{ rook_storage_dir_path }}"
+# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+#    nodes:
+#    - name: "172.17.4.101"
+#      directories: # specific directories to use for storage can be specified for each node
+#      - path: "/rook/storage-dir"
+#      resources:
+#        limits:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#        requests:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#    - name: "172.17.4.201"
+#      devices: # specific devices to use for storage can be specified for each node
+#      - name: "sdb"
+#      - name: "nvme01" # multiple osds can be created on high performance devices
+#        config:
+#          osdsPerDevice: "5"
+#      config: # configuration can be specified at the node level which overrides the cluster level config
+#        storeType: filestore
+#    - name: "172.17.4.301"
+#      deviceFilter: ^vdb
+  # The section for configuring management of daemon disruptions during upgrade or fencing.
+  disruptionManagement:
+    # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
+    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph-managed-disruptionbudgets.md). The operator will
+    # block eviction of OSDs by default and unblock them safely when drains are detected.
+    managePodBudgets: false
+    # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
+    # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
+    osdMaintenanceTimeout: 30
+    # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
+    # Only available on OpenShift.
+    manageMachineDisruptionBudgets: false
+    # Namespace in which to watch for the MachineDisruptionBudgets.
+    machineDisruptionBudgetNamespace: openshift-machine-api
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2
new file mode 100644 (file)
index 0000000..d8b7412
--- /dev/null
@@ -0,0 +1,1592 @@
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+#
+# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager)
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: "{{ rook_namespace }}"
+# OLM: BEGIN CEPH CRD
+# The CRD declarations
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            annotations: {}
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+                  minimum: 0
+                  maximum: 65535
+                ssl:
+                  type: boolean
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            skipUpgradeChecks:
+              type: boolean
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+            mgr:
+              properties:
+                modules:
+                  items:
+                    properties:
+                      name:
+                        type: string
+                      enabled:
+                        type: boolean
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+            storage:
+              properties:
+                disruptionManagement:
+                  properties:
+                    managePodBudgets:
+                      type: boolean
+                    osdMaintenanceTimeout:
+                      type: integer
+                    manageMachineDisruptionBudgets:
+                      type: boolean
+                useAllNodes:
+                  type: boolean
+                nodes:
+                  items:
+                    properties:
+                      name:
+                        type: string
+                      config:
+                        properties:
+                          metadataDevice:
+                            type: string
+                          storeType:
+                            type: string
+                            pattern: ^(filestore|bluestore)$
+                          databaseSizeMB:
+                            type: string
+                          walSizeMB:
+                            type: string
+                          journalSizeMB:
+                            type: string
+                          osdsPerDevice:
+                            type: string
+                          encryptedDevice:
+                            type: string
+                            pattern: ^(true|false)$
+                      useAllDevices:
+                        type: boolean
+                      deviceFilter: {}
+                      directories:
+                        type: array
+                        items:
+                          properties:
+                            path:
+                              type: string
+                      devices:
+                        type: array
+                        items:
+                          properties:
+                            name:
+                              type: string
+                            config: {}
+                      location: {}
+                      resources: {}
+                  type: array
+                useAllDevices:
+                  type: boolean
+                deviceFilter: {}
+                location: {}
+                directories:
+                  type: array
+                  items:
+                    properties:
+                      path:
+                        type: string
+                config: {}
+                topologyAware:
+                  type: boolean
+            monitoring:
+              properties:
+                enabled:
+                  type: boolean
+                rulesNamespace:
+                  type: string
+            rbdMirroring:
+              properties:
+                workers:
+                  type: integer
+            placement: {}
+            resources: {}
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: State
+      type: string
+      description: Current State
+      JSONPath: .status.state
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+# OLM: END CEPH CRD
+# OLM: BEGIN CEPH FS CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            metadataServer:
+              properties:
+                activeCount:
+                  minimum: 1
+                  maximum: 10
+                  type: integer
+                activeStandby:
+                  type: boolean
+                annotations: {}
+                placement: {}
+                resources: {}
+            metadataPool:
+              properties:
+                failureDomain:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      minimum: 1
+                      maximum: 10
+                      type: integer
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+            dataPools:
+              type: array
+              items:
+                properties:
+                  failureDomain:
+                    type: string
+                  replicated:
+                    properties:
+                      size:
+                        minimum: 1
+                        maximum: 10
+                        type: integer
+                  erasureCoded:
+                    properties:
+                      dataChunks:
+                        type: integer
+                      codingChunks:
+                        type: integer
+  additionalPrinterColumns:
+    - name: ActiveMDS
+      type: string
+      description: Number of desired active MDS daemons
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+# OLM: END CEPH FS CRD
+# OLM: BEGIN CEPH NFS CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            rados:
+              properties:
+                pool:
+                  type: string
+                namespace:
+                  type: string
+            server:
+              properties:
+                active:
+                  type: integer
+                annotations: {}
+                placement: {}
+                resources: {}
+
+# OLM: END CEPH NFS CRD
+# OLM: BEGIN CEPH OBJECT STORE CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            gateway:
+              properties:
+                type:
+                  type: string
+                sslCertificateRef: {}
+                port:
+                  type: integer
+                securePort: {}
+                instances:
+                  type: integer
+                annotations: {}
+                placement: {}
+                resources: {}
+            metadataPool:
+              properties:
+                failureDomain:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      type: integer
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+            dataPool:
+              properties:
+                failureDomain:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      type: integer
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+# OLM: END CEPH OBJECT STORE CRD
+# OLM: BEGIN CEPH OBJECT STORE USERS CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+  scope: Namespaced
+  version: v1
+# OLM: END CEPH OBJECT STORE USERS CRD
+# OLM: BEGIN CEPH BLOCK POOL CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+# OLM: END CEPH BLOCK POOL CRD
+# OLM: BEGIN CEPH VOLUME POOL CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+# OLM: END CEPH VOLUME POOL CRD
+# OLM: BEGIN OBJECTBUCKET CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: objectbuckets.objectbucket.io
+spec:
+  group: objectbucket.io
+  versions:
+    - name: v1alpha1
+      served: true
+      storage: true
+  names:
+    kind: ObjectBucket
+    listKind: ObjectBucketList
+    plural: objectbuckets
+    singular: objectbucket
+    shortNames:
+      - ob
+      - obs
+  scope: Cluster
+  subresources:
+    status: {}
+# OLM: END OBJECTBUCKET CRD
+# OLM: BEGIN OBJECTBUCKETCLAIM CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: objectbucketclaims.objectbucket.io
+spec:
+  versions:
+    - name: v1alpha1
+      served: true
+      storage: true
+  group: objectbucket.io
+  names:
+    kind: ObjectBucketClaim
+    listKind: ObjectBucketClaimList
+    plural: objectbucketclaims
+    singular: objectbucketclaim
+    shortNames:
+      - obc
+      - obcs
+  scope: Namespaced
+  subresources:
+    status: {}
+# OLM: END OBJECTBUCKETCLAIM CRD
+# OLM: BEGIN OBJECTBUCKET ROLEBINDING
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-object-bucket
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-object-bucket
+subjects:
+  - kind: ServiceAccount
+    name: rook-ceph-system
+    namespace: "{{ rook_namespace }}"
+# OLM: END OBJECTBUCKET ROLEBINDING
+# OLM: BEGIN OPERATOR ROLE
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  - pods
+  - pods/log
+  - services
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The role for the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - daemonsets
+  - statefulsets
+  - deployments
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # Pod access is needed for fencing
+  - pods
+  # Node access is needed for determining nodes where mons should run
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+    # PVs and PVCs are managed by the Rook provisioner
+  - persistentvolumes
+  - persistentvolumeclaims
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - policy
+  - apps
+  resources:
+  #this is for the clusterdisruption controller
+  - poddisruptionbudgets
+  #this is for both clusterdisruption and nodedrain controllers
+  - deployments
+  verbs:
+  - "*"
+- apiGroups:
+  - healthchecking.openshift.io
+  resources:
+  - machinedisruptionbudgets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - machine.openshift.io
+  resources:
+  - machines
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+  - list
+  - get
+  - watch
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-object-bucket
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  verbs:
+  - "*"
+  resources:
+  - secrets
+  - configmaps
+- apiGroups:
+    - storage.k8s.io
+  resources:
+    - storageclasses
+  verbs:
+    - get
+    - list
+    - watch
+- apiGroups:
+  - "objectbucket.io"
+  verbs:
+  - "*"
+  resources:
+  - "*"
+# OLM: END OPERATOR ROLE
+# OLM: BEGIN SERVICE ACCOUNT SYSTEM
+---
+# The rook system service account used by the operator, agent, and discovery pods
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# OLM: END SERVICE ACCOUNT SYSTEM
+# OLM: BEGIN OPERATOR ROLEBINDING
+---
+# Grant the operator, agent, and discovery agents access to resources in the namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-global
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+# OLM: END OPERATOR ROLEBINDING
+#################################################################################################################
+# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
+# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
+# and bindings accordingly.
+#################################################################################################################
+# Service account for the Ceph OSDs. Must exist and cannot be renamed.
+# OLM: BEGIN SERVICE ACCOUNT OSD
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# OLM: END SERVICE ACCOUNT OSD
+# OLM: BEGIN SERVICE ACCOUNT MGR
+---
+# Service account for the Ceph Mgr. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# OLM: END SERVICE ACCOUNT MGR
+# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+# OLM: END CMD REPORTER SERVICE ACCOUNT
+# OLM: BEGIN CLUSTER ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+- apiGroups: ["ceph.rook.io"]
+  resources: ["cephclusters", "cephclusters/finalizers"]
+  verbs: [ "get", "list", "create", "update", "delete" ]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: "{{ rook_namespace }}"
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system-rules
+  namespace: "{{ rook_namespace }}"
+  labels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+# OLM: END CLUSTER ROLE
+# OLM: BEGIN CMD REPORTER ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+# OLM: END CMD REPORTER ROLE
+# OLM: BEGIN CLUSTER ROLEBINDING
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cluster-mgmt
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+
+---
+# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+
+# OLM: END CLUSTER ROLEBINDING
+# OLM: BEGIN CMD REPORTER ROLEBINDING
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-cmd-reporter
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+# OLM: END CMD REPORTER ROLEBINDING
+#################################################################################################################
+# Beginning of pod security policy resources. The example will assume the cluster will be created in the
+# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify
+# the roles and bindings accordingly.
+#################################################################################################################
+# OLM: BEGIN CLUSTER POD SECURITY POLICY
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: rook-privileged
+spec:
+  privileged: true
+  allowedCapabilities:
+    # required by CSI
+    - SYS_ADMIN
+  # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group
+  fsGroup:
+    rule: RunAsAny
+  # runAsUser, supplementalGroups - Rook needs to run some pods as root
+  # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time
+  runAsUser:
+    rule: RunAsAny
+  supplementalGroups:
+    rule: RunAsAny
+  # seLinux - seLinux context is unknown ahead of time; set if this is well-known
+  seLinux:
+    rule: RunAsAny
+  volumes:
+    # recommended minimum set
+    - configMap
+    - downwardAPI
+    - emptyDir
+    - persistentVolumeClaim
+    - secret
+    - projected
+    # required for Rook
+    - hostPath
+    - flexVolume
+  # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
+  # directory-based OSDs make this hard to nail down
+  # allowedHostPaths:
+  #   - pathPrefix: "/run/udev"  # for OSD prep
+  #     readOnly: false
+  #   - pathPrefix: "/dev"  # for OSD prep
+  #     readOnly: false
+  #   - pathPrefix: "/var/lib/rook"  # or whatever the dataDirHostPath value is set to
+  #     readOnly: false
+  # Ceph requires host IPC for setting up encrypted devices
+  hostIPC: true
+  # Ceph OSDs need to share the same PID namespace
+  hostPID: true
+  # hostNetwork can be set to 'false' if host networking isn't used
+  hostNetwork: true
+  hostPorts:
+    # Ceph messenger protocol v1
+    - min: 6789
+      max: 6790 # <- support old default port
+    # Ceph messenger protocol v2
+    - min: 3300
+      max: 3300
+    # Ceph RADOS ports for OSDs, MDSes
+    - min: 6800
+      max: 7300
+    # # Ceph dashboard port HTTP (not recommended)
+    # - min: 7000
+    #   max: 7000
+    # Ceph dashboard port HTTPS
+    - min: 8443
+      max: 8443
+    # Ceph mgr Prometheus Metrics
+    - min: 9283
+      max: 9283
+# OLM: END CLUSTER POD SECURITY POLICY
+# OLM: BEGIN POD SECURITY POLICY BINDINGS
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: 'psp:rook'
+rules:
+  - apiGroups:
+      - policy
+    resources:
+      - podsecuritypolicies
+    resourceNames:
+      - rook-privileged
+    verbs:
+      - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-ceph-system-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-ceph-system
+    namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-default-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-osd-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-mgr-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-cmd-reporter-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+# OLM: END CLUSTER POD SECURITY POLICY BINDINGS
+# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-plugin-sa
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-provisioner-sa
+  namespace: "{{ rook_namespace }}"
+# OLM: END CSI CEPHFS SERVICE ACCOUNT
+# OLM: BEGIN CSI CEPHFS ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  namespace: "{{ rook_namespace }}"
+  name: cephfs-external-provisioner-cfg
+rules:
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+  - apiGroups: ["coordination.k8s.io"]
+    resources: ["leases"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+# OLM: END CSI CEPHFS ROLE
+# OLM: BEGIN CSI CEPHFS ROLEBINDING
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role-cfg
+  namespace: "{{ rook_namespace }}"
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: Role
+  name: cephfs-external-provisioner-cfg
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI CEPHFS ROLEBINDING
+# OLM: BEGIN CSI CEPHFS CLUSTER ROLE
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+# OLM: END CSI CEPHFS CLUSTER ROLE
+# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-cephfs-plugin-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-cephfs-provisioner-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: cephfs-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: cephfs-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI CEPHFS CLUSTER ROLEBINDING
+# OLM: BEGIN CSI RBD SERVICE ACCOUNT
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-plugin-sa
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-provisioner-sa
+  namespace: "{{ rook_namespace }}"
+# OLM: END CSI RBD SERVICE ACCOUNT
+# OLM: BEGIN CSI RBD ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  namespace: "{{ rook_namespace }}"
+  name: rbd-external-provisioner-cfg
+rules:
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "watch", "create", "delete"]
+  - apiGroups: ["coordination.k8s.io"]
+    resources: ["leases"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+# OLM: END CSI RBD ROLE
+# OLM: BEGIN CSI RBD ROLEBINDING
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role-cfg
+  namespace: "{{ rook_namespace }}"
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: Role
+  name: rbd-external-provisioner-cfg
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI RBD ROLEBINDING
+# OLM: BEGIN CSI RBD CLUSTER ROLE
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create", "list", "watch", "delete", "get", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots/status"]
+    verbs: ["update"]
+# OLM: END CSI RBD CLUSTER ROLE
+# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-rbd-plugin-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-rbd-provisioner-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: rbd-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: rbd-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI RBD CLUSTER ROLEBINDING
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/external-dashboard-https.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/external-dashboard-https.yaml.j2
new file mode 100644 (file)
index 0000000..a15a040
--- /dev/null
@@ -0,0 +1,37 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: rook-ceph-mgr-dashboard-external-https
+  namespace: "{{ rook_namespace }}"
+  labels:
+    app: rook-ceph-mgr
+    rook_cluster: rook-ceph
+spec:
+  ports:
+  - name: dashboard
+    port: 8443
+    protocol: TCP
+    targetPort: 8443
+  selector:
+    app: rook-ceph-mgr
+    rook_cluster: rook-ceph
+  sessionAffinity: None
+  type: NodePort
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2
new file mode 100644 (file)
index 0000000..b2575f5
--- /dev/null
@@ -0,0 +1,31 @@
+# taken from example at https://rook.github.io/docs/rook/v1.2/ceph-filesystem.html
+
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: {{ rook_filesystem_storageclass_name }}
+# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
+provisioner: {{ rook_namespace }}.cephfs.csi.ceph.com
+parameters:
+  # clusterID is the namespace where operator is deployed.
+  clusterID: {{ rook_namespace }}
+
+  # CephFS filesystem name into which the volume shall be created
+  fsName: {{ rook_filesystem_name }}
+
+  # Ceph pool into which the volume shall be created
+  # Required for provisionVolume: "true"
+  pool: {{ rook_filesystem_name }}-data0
+
+  # Root path of an existing CephFS volume
+  # Required for provisionVolume: "false"
+  # rootPath: /absolute/path
+
+  # The secrets contain Ceph admin credentials. These are generated automatically by the operator
+  # in the same namespace as the cluster.
+  csi.storage.k8s.io/provisioner-secret-name: rook-ceph-csi
+  csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_namespace }}
+  csi.storage.k8s.io/node-stage-secret-name: rook-ceph-csi
+  csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_namespace }}
+
+reclaimPolicy: Delete
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2
new file mode 100644 (file)
index 0000000..5a4345f
--- /dev/null
@@ -0,0 +1,18 @@
+# taken from example at https://rook.github.io/docs/rook/v1.2/ceph-filesystem.html
+
+apiVersion: ceph.rook.io/v1
+kind: CephFilesystem
+metadata:
+  name: {{ rook_filesystem_name }}
+  namespace: {{ rook_namespace }}
+spec:
+  metadataPool:
+    replicated:
+      size: 3
+  dataPools:
+    - replicated:
+        size: 3
+  preservePoolsOnDelete: true
+  metadataServer:
+    activeCount: 1
+    activeStandby: true
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2
new file mode 100644 (file)
index 0000000..24a5db0
--- /dev/null
@@ -0,0 +1,261 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+#################################################################################################################
+# The deployment for the rook operator
+# Contains the common settings for most Kubernetes deployments.
+# For example, to create the rook-ceph cluster:
+#   kubectl create -f common.yaml
+#   kubectl create -f operator.yaml
+#   kubectl create -f cluster.yaml
+#
+# Also see other operator sample files for variations of operator.yaml:
+# - operator-openshift.yaml: Common settings for running in OpenShift
+#################################################################################################################
+# OLM: BEGIN OPERATOR DEPLOYMENT
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-operator
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+spec:
+  selector:
+    matchLabels:
+      app: rook-ceph-operator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-operator
+    spec:
+      serviceAccountName: rook-ceph-system
+      containers:
+      - name: rook-ceph-operator
+        image: "{{ rook_repository }}:{{ rook_version }}"
+        args: ["ceph", "operator"]
+        volumeMounts:
+        - mountPath: /var/lib/rook
+          name: rook-config
+        - mountPath: /etc/ceph
+          name: default-config-dir
+        env:
+        # If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
+        # If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
+        - name: ROOK_CURRENT_NAMESPACE_ONLY
+          value: "false"
+        # To disable RBAC, uncomment the following:
+        # - name: RBAC_ENABLED
+        #   value: "false"
+        # Rook Agent toleration. Will tolerate all taints with all keys.
+        # Choose between NoSchedule, PreferNoSchedule and NoExecute:
+        # - name: AGENT_TOLERATION
+        #   value: "NoSchedule"
+        # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate
+        # - name: AGENT_TOLERATION_KEY
+        #   value: "<KeyOfTheTaintToTolerate>"
+        # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format.
+        # - name: AGENT_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # (Optional) Rook Agent NodeAffinity.
+        # - name: AGENT_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook,ceph"
+        # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
+        # `Any` uses Ceph admin credentials by default/fallback.
+        # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
+        # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name.
+        # to the namespace in which the `mountSecret` Kubernetes secret namespace.
+        # - name: AGENT_MOUNT_SECURITY_MODE
+        #   value: "Any"
+        # Set the path where the Rook agent can find the flex volumes
+        # - name: FLEXVOLUME_DIR_PATH
+        #   value: "<PathToFlexVolumes>"
+        # Set the path where kernel modules can be found
+        # - name: LIB_MODULES_DIR_PATH
+        #   value: "<PathToLibModules>"
+        # Mount any extra directories into the agent container
+        # - name: AGENT_MOUNTS
+        #   value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2"
+        # Rook Discover toleration. Will tolerate all taints with all keys.
+        # Choose between NoSchedule, PreferNoSchedule and NoExecute:
+        # - name: DISCOVER_TOLERATION
+        #   value: "NoSchedule"
+        # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
+        # - name: DISCOVER_TOLERATION_KEY
+        #   value: "<KeyOfTheTaintToTolerate>"
+        # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
+        # - name: DISCOVER_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # (Optional) Discover Agent NodeAffinity.
+        # - name: DISCOVER_AGENT_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook, ceph"
+        # Allow rook to create multiple file systems. Note: This is considered
+        # an experimental feature in Ceph as described at
+        # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
+        # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027
+        - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS
+          value: "false"
+
+        # The logging level for the operator: INFO | DEBUG
+        - name: ROOK_LOG_LEVEL
+          value: "INFO"
+
+        # The interval to check the health of the ceph cluster and update the status in the custom resource.
+        - name: ROOK_CEPH_STATUS_CHECK_INTERVAL
+          value: "60s"
+
+        # The interval to check if every mon is in the quorum.
+        - name: ROOK_MON_HEALTHCHECK_INTERVAL
+          value: "45s"
+
+        # The duration to wait before trying to failover or remove/replace the
+        # current mon with a new mon (useful for compensating flapping network).
+        - name: ROOK_MON_OUT_TIMEOUT
+          value: "600s"
+
+        # The duration between discovering devices in the rook-discover daemonset.
+        - name: ROOK_DISCOVER_DEVICES_INTERVAL
+          value: "60m"
+
+        # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
+        # This is necessary to workaround the anyuid issues when running on OpenShift.
+        # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
+        - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
+          value: "false"
+
+        # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
+        # Disable it here if you have similar issues.
+        # For more details see https://github.com/rook/rook/issues/2417
+        - name: ROOK_ENABLE_SELINUX_RELABELING
+          value: "true"
+
+        # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues.
+        # For more details see https://github.com/rook/rook/issues/2254
+        - name: ROOK_ENABLE_FSGROUP
+          value: "true"
+
+        # Disable automatic orchestration when new devices are discovered
+        - name: ROOK_DISABLE_DEVICE_HOTPLUG
+          value: "false"
+
+        # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release
+        # in favor of the CSI driver.
+        - name: ROOK_ENABLE_FLEX_DRIVER
+          value: "true"
+
+        # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
+        # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
+        - name: ROOK_ENABLE_DISCOVERY_DAEMON
+          value: "false"
+
+        # Enable the default version of the CSI CephFS driver. To start another version of the CSI driver, see image properties below.
+        - name: ROOK_CSI_ENABLE_CEPHFS
+          value: "true"
+
+        # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
+        - name: ROOK_CSI_ENABLE_RBD
+          value: "true"
+        - name: ROOK_CSI_ENABLE_GRPC_METRICS
+          value: "true"
+        # The default version of CSI supported by Rook will be started. To change the version
+        # of the CSI driver to something other than what is officially supported, change
+        # these images to the desired release of the CSI driver.
+        #- name: ROOK_CSI_CEPH_IMAGE
+        #  value: "quay.io/cephcsi/cephcsi:v1.2.1"
+        #- name: ROOK_CSI_REGISTRAR_IMAGE
+        #  value: "quay.io/k8scsi/csi-node-driver-registrar:v1.1.0"
+        #- name: ROOK_CSI_PROVISIONER_IMAGE
+        #  value: "quay.io/k8scsi/csi-provisioner:v1.3.0"
+        #- name: ROOK_CSI_SNAPSHOTTER_IMAGE
+        #  value: "quay.io/k8scsi/csi-snapshotter:v1.2.0"
+        #- name: ROOK_CSI_ATTACHER_IMAGE
+        #  value: "quay.io/k8scsi/csi-attacher:v1.2.0"
+        # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
+        #- name: ROOK_CSI_KUBELET_DIR_PATH
+        #  value: "/var/lib/kubelet"
+        # (Optional) Ceph Provisioner NodeAffinity.
+        # - name: CSI_PROVISIONER_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook, ceph"
+        # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
+        #  CSI provisioner would be best to start on the same nodes as other ceph daemons.
+        # - name: CSI_PROVISIONER_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # (Optional) Ceph CSI plugin NodeAffinity.
+        # - name: CSI_PLUGIN_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook, ceph"
+        # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
+        # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+        # - name: CSI_PLUGIN_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # The name of the node to pass with the downward API
+        - name: ROOK_CSI_CEPH_IMAGE
+          value: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
+        - name: ROOK_CSI_REGISTRAR_IMAGE
+          value: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
+        - name: ROOK_CSI_PROVISIONER_IMAGE
+          value: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
+        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
+          value: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
+        - name: ROOK_CSI_ATTACHER_IMAGE
+          value: "{{ csi_attacherr_repository }}:{{ csi_attacher_version }}"
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        # The pod name to pass with the downward API
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        # The pod namespace to pass with the downward API
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+      volumes:
+      - name: rook-config
+        emptyDir: {}
+      - name: default-config-dir
+        emptyDir: {}
+# OLM: END OPERATOR DEPLOYMENT
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/pool.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/pool.yaml.j2
new file mode 100644 (file)
index 0000000..0db4c51
--- /dev/null
@@ -0,0 +1,32 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+  name: "{{ rook_block_pool_name }}"
+  namespace: "{{ rook_namespace }}"
+spec:
+  # The failure domain will spread the replicas of the data across different failure zones
+  failureDomain: osd
+  # For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy.
+  replicated:
+    size: {{ rook_block_pool_replicas }}
+  # A key/value list of annotations
+  annotations:
+  #  key: value
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2
new file mode 100644 (file)
index 0000000..21ada26
--- /dev/null
@@ -0,0 +1,40 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: "{{ rook_block_storage_name }}"
+  annotations:
+    storageclass.kubernetes.io/is-default-class: "true"
+provisioner: ceph.rook.io/block
+# Works for Kubernetes 1.14+
+allowVolumeExpansion: true
+parameters:
+  blockPool: "{{ rook_block_pool_name }}"
+  # Specify the namespace of the rook cluster from which to create volumes.
+  # If not specified, it will use `rook` as the default namespace of the cluster.
+  # This is also the namespace where the cluster will be
+  clusterNamespace: "{{ rook_namespace }}"
+  # Specify the filesystem type of the volume. If not specified, it will use `ext4`.
+  fstype: "{{ rook_block_storage_fs }}"
+  # (Optional) Specify an existing Ceph user that will be used for mounting storage with this StorageClass.
+  #mountUser: user1
+  # (Optional) Specify an existing Kubernetes secret name containing just one key holding the Ceph user secret.
+  # The secret must exist in each namespace(s) where the storage will be consumed.
+  #mountSecret: ceph-user1-secret
diff --git a/apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2 b/apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2
new file mode 100644 (file)
index 0000000..0dd3c0f
--- /dev/null
@@ -0,0 +1,59 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-tools
+  namespace: "{{ rook_namespace }}"
+  labels:
+    app: rook-ceph-tools
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: rook-ceph-tools
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-tools
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+      - name: rook-ceph-tools
+        image: "{{ rook_repository }}:{{ rook_version }}"
+        command: ["/tini"]
+        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+        imagePullPolicy: IfNotPresent
+        env:
+          - name: ROOK_ADMIN_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: admin-secret
+        securityContext:
+          privileged: true
+        volumeMounts:
+          - mountPath: /dev
+            name: dev
+          - mountPath: /sys/bus
+            name: sysbus
+          - mountPath: /lib/modules
+            name: libmodules
+          - name: mon-endpoint-volume
+            mountPath: /etc/rook
+      # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+      hostNetwork: true
+      volumes:
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: sysbus
+          hostPath:
+            path: /sys/bus
+        - name: libmodules
+          hostPath:
+            path: /lib/modules
+        - name: mon-endpoint-volume
+          configMap:
+            name: rook-ceph-mon-endpoints
+            items:
+            - key: data
+              path: mon-endpoints
diff --git a/apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml b/apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
new file mode 100644 (file)
index 0000000..9a4c206
--- /dev/null
@@ -0,0 +1,28 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+ceph_repository: "{{ server_fqdn }}/ceph/ceph"
+rook_repository: "{{ server_fqdn }}/rook/ceph"
+cephcsi_repository: "{{ server_fqdn }}/cephcsi/cephcsi"
+csi_node_driver_registrar_repository: "{{ server_fqdn }}/k8scsi/csi-node-driver-registrar"
+csi_provisioner_repository: "{{ server_fqdn }}/k8scsi/csi-provisioner"
+csi_snapshotter_repository: "{{ server_fqdn }}/k8scsi/csi-snapshotter"
+csi_attacherr_repository: "{{ server_fqdn }}/k8scsi/csi-attacher"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml b/apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml
new file mode 100644 (file)
index 0000000..21a9bb7
--- /dev/null
@@ -0,0 +1,28 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+ceph_repository: "docker.io/ceph/ceph"
+rook_repository: "rook/ceph"
+cephcsi_repository: "quay.io/cephcsi/cephcsi"
+csi_node_driver_registrar_repository: "quay.io/k8scsi/csi-node-driver-registrar"
+csi_provisioner_repository: "quay.io/k8scsi/csi-provisioner"
+csi_snapshotter_repository: "quay.io/k8scsi/csi-snapshotter"
+csi_attacherr_repository: "quay.io/k8scsi/csi-attacher"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/ceph/kubespray/playbooks/roles/prepare/files/clean-ceph-osd.sh b/apps/ceph/kubespray/playbooks/roles/prepare/files/clean-ceph-osd.sh
new file mode 100644 (file)
index 0000000..ed133fa
--- /dev/null
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+DISK="/dev/$1"
+# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean)
+# You will have to run this step for all disks.
+sgdisk --zap-all $DISK
+
+# These steps only have to be run once on each node
+# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks.
+ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
+# ceph-volume setup can leave ceph-<UUID> directories in /dev (unnecessary clutter)
+rm -rf /dev/ceph-*
diff --git a/apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml b/apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml
new file mode 100644 (file)
index 0000000..3844479
--- /dev/null
@@ -0,0 +1,46 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Install packages
+  action: >
+    {{ ansible_pkg_mgr }} name={{ item }} state=present update_cache=yes
+  with_items:
+    - "xfsprogs"
+    - "gdisk"
+
+- name: Remove existing rook data directories
+  file:
+    path: "{{ rook_data_dir_path }}"
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+- name: Remove existing rook storage directories
+  file:
+    path: "{{ rook_storage_dir_path }}"
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+- name: Remove existing rook ceph osds
+  script: "clean-ceph-osd.sh {{ rook_node_device_filter }}"
+  ignore_errors: true
+  tags: reset
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/istio/kubespray/playbooks/install.yml b/apps/istio/kubespray/playbooks/install.yml
new file mode 100644 (file)
index 0000000..97d22ae
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- hosts: jumphost
+  gather_facts: true
+  become: false
+
+  roles:
+    - role: install
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/istio/kubespray/playbooks/roles/install/tasks/main.yml b/apps/istio/kubespray/playbooks/roles/install/tasks/main.yml
new file mode 100644 (file)
index 0000000..1f4e859
--- /dev/null
@@ -0,0 +1,176 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Make sure "{{ istio_work_dir }}" exists
+  file:
+    path: "{{ istio_work_dir }}"
+    state: directory
+
+# TODO: validate download checksum
+- name: Download the installation files
+  unarchive:
+    src: "{{ istio_download_url }}"
+    dest: "{{ istio_work_dir }}"
+    remote_src: true
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- name: List existing installation of Istio
+  shell: helm list | awk '{print $1}' | grep istio
+  register: installed_istio_charts
+  ignore_errors: true
+  changed_when: false
+  tags: reset
+
+- name: Delete existing installation of Istio
+  command: helm delete --purge "{{ item }}"
+  loop: "{{ installed_istio_charts.stdout_lines }}"
+  ignore_errors: true
+  changed_when: true
+  tags: reset
+
+# This solves this bug: https://github.com/ansible/ansible/issues/47081 with the k8s module which is caused
+# due to the presence of --- at the end of the yaml file.
+- name: Fix upstream Istio CRDs
+  lineinfile:
+    path: "{{ item }}"
+    regex: '^-{3}\n+$'
+    line: ""
+    state: present
+    firstmatch: true
+  with_fileglob:
+    - "{{ istio_work_dir }}/istio-{{ istio_version }}/install/kubernetes/helm/istio-init/files/*"
+  ignore_errors: true
+  tags: reset
+
+- name: Delete existing Istio CRDs
+  k8s:
+    api_version: apiextensions.k8s.io/v1beta1
+    kind: CustomResourceDefinition
+    state: absent
+    src: "{{ item }}"
+  with_fileglob:
+    - "{{ istio_work_dir }}/istio-{{ istio_version }}/install/kubernetes/helm/istio-init/files/*"
+  ignore_errors: true
+  tags: reset
+
+- name: Delete Istio init namespace
+  k8s:
+    name: "{{ istio_init_namespace }}"
+    api_version: v1
+    kind: Namespace
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+# This can be avoided when we update Ansible to 2.8 version as is included in k8s module
+- name: Verify Istio init namespace deletion
+  k8s_facts:
+    kind: Namespace
+    name: "{{ istio_init_namespace }}"
+  register: namespace_status
+  until: not namespace_status.resources
+  retries: 5
+  delay: 10
+  ignore_errors: true
+  tags: reset
+
+- name: Delete Istio namespace
+  k8s:
+    name: "{{ istio_namespace }}"
+    api_version: v1
+    kind: Namespace
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+# This can be avoided when we update Ansible to 2.8 version as is included in k8s module
+- name: Verify Istio init namespace deletion
+  k8s_facts:
+    kind: Namespace
+    name: "{{ istio_namespace }}"
+  register: namespace_status
+  until: not namespace_status.resources
+  retries: 5
+  delay: 10
+  ignore_errors: true
+  tags: reset
+
+- name: Install and bootstrap Istio CRDs
+  command: >
+    helm install "{{ istio_work_dir }}"/istio-"{{ istio_version }}"/install/kubernetes/helm/istio-init
+      --name "{{ istio_init_release_name }}"
+      --namespace "{{ istio_init_namespace }}"
+  changed_when: true
+
+- name: Verify the commitment of all Istio CRDs
+  k8s_facts:
+    kind: CustomResourceDefinition
+    api: apiextensions.k8s.io/v1beta1
+    label_selectors:
+      - release=istio
+  register: crd_status
+  until: crd_status.resources|length >= 23
+  retries: 5
+  delay: 10
+
+- name: Install Istio configuration profile
+  command: >
+    helm install "{{ istio_work_dir }}"/istio-"{{ istio_version }}"/install/kubernetes/helm/istio
+      --name "{{ istio_release_name }}"
+      --namespace "{{ istio_namespace }}"
+  changed_when: true
+
+- name: Verify Istio service existence
+  k8s_facts:
+    kind: Service
+    namespace: "{{ istio_namespace }}"
+    label_selectors:
+      - release=istio
+  register: istio_service_status
+  until: istio_service_status.resources is defined
+  retries: 5
+  delay: 10
+
+- name: Wait until Istio pods are ready
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ istio_namespace }}"
+    label_selectors:
+      - release=istio
+    field_selectors:
+      - status.phase=Running
+  register: istio_pod_status
+  until:
+    - istio_pod_status.resources is defined
+    - istio_pod_status.resources
+  retries: 5
+  delay: 10
+
+- name: Add istioctl CLI bin to path
+  become: true
+  copy:
+    src: '{{ istio_work_dir }}/istio-{{ istio_version }}/bin/istioctl'
+    dest: '/usr/local/bin/istioctl'
+    remote_src: true
+    mode: '0755'
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/istio/kubespray/playbooks/roles/install/vars/main.yml b/apps/istio/kubespray/playbooks/roles/install/vars/main.yml
new file mode 100644 (file)
index 0000000..3fa5752
--- /dev/null
@@ -0,0 +1,26 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+istio_download_url: "https://github.com/istio/istio/releases/download/{{ istio_version }}/istio-{{ istio_version }}-linux.tar.gz"
+istio_work_dir: "/tmp/istio"
+
+istio_namespace: istio-system
+istio_release_name: istio
+istio_init_namespace: istio-init
+istio_init_release_name: istio-init
diff --git a/apps/prometheus/kubespray/playbooks/install.yml b/apps/prometheus/kubespray/playbooks/install.yml
new file mode 100644 (file)
index 0000000..97d22ae
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- hosts: jumphost
+  gather_facts: true
+  become: false
+
+  roles:
+    - role: install
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/tasks/main.yaml b/apps/prometheus/kubespray/playbooks/roles/install/tasks/main.yaml
new file mode 100644 (file)
index 0000000..3161f20
--- /dev/null
@@ -0,0 +1,133 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- block:
+  - name: Create directories for helm repositories
+    file:
+      path: "{{ item.path }}"
+      state: "{{ item.state }}"
+    loop:
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: directory}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: directory}
+
+  - name: Place index.yaml to webserver stable charts repository
+    template:
+      src: "index.yaml.j2"
+      dest: "{{ engine_workspace }}/offline/charts/stable/index.yaml"
+      force: true
+  when: execution_mode == "offline-deployment"
+
+- name: Initialize Helm
+  command: helm init --client-only --local-repo-url {{ local_repo_url }} --stable-repo-url {{ stable_repo_url }}
+  register: helm_init_result
+  changed_when: true
+
+- name: Clone Helm Charts repository
+  git:
+    repo: "{{ helm_charts_git_url }}"
+    dest: "{{ config_path }}/repos/charts"
+    version: "{{ charts_version }}"
+    force: true
+    recursive: true
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- name: Generate values.yaml
+  template:
+    src: "values.yaml.j2"
+    dest: "{{ config_path }}/repos/charts/stable/prometheus/values.yaml"
+    force: true
+
+- name: Remove previous installations of Prometheus
+  command: >
+    helm delete --purge "{{ prometheus_service }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Remove Prometheus namespace
+  command: >
+    kubectl delete ns "{{ prometheus_namespace }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Create Prometheus namespace
+  k8s:
+    state: present
+    definition:
+      apiVersion: v1
+      kind: Namespace
+      metadata:
+        name: "{{ prometheus_namespace }}"
+
+- name: Install Prometheus using helm
+  command: >
+    helm install
+      --name "{{ prometheus_service }}"
+      --namespace "{{ prometheus_namespace }}"
+      --timeout 900
+      {{ config_path }}/repos/charts/stable/prometheus
+  register: prometheus_helm_log
+  changed_when: true
+
+- name: Log Prometheus helm output to console
+  debug:
+    msg: "{{ prometheus_helm_log.stdout_lines }}"
+
+- name: Wait until Prometheus pods are available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ prometheus_namespace }}"
+    label_selectors:
+      - "app = {{ prometheus_service }}"
+    field_selectors:
+      - status.phase=Running
+  register: prometheus_pod_status
+  until:
+    - prometheus_pod_status.resources is defined
+    - prometheus_pod_status.resources
+  retries: 30
+  delay: 10
+
+- name: Install Prometheus LoadBalancer service
+  k8s:
+    state: present
+    definition: "{{ lookup('template', 'prometheus_service.yaml.j2') }}"
+  register: prometheus_service_status
+
+- name: Log Prometheus service information to console
+  debug:
+    msg:
+      - "------------------------------"
+      - "Prometheus Service information"
+      - "------------------------------"
+      - "clusterIP:  {{ prometheus_service_status.result.spec.clusterIP }}"
+      - "targetPort: {{ prometheus_service_status.result.spec.ports[0].targetPort }}"
+      - "nodePort:   {{ prometheus_service_status.result.spec.ports[0].nodePort }}"
+      - "------------------------------"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/templates/index.yaml.j2 b/apps/prometheus/kubespray/playbooks/roles/install/templates/index.yaml.j2
new file mode 100644 (file)
index 0000000..1e632d3
--- /dev/null
@@ -0,0 +1,27 @@
+apiVersion: v1
+entries:
+  prometheus:
+  - apiVersion: v1
+    appVersion: {{ prom_prometheus_version }}
+    created: 2020-03-01T17:30:10.216789698Z
+    description: Prometheus is a monitoring system and time series database.
+    digest: 6fb65153c0c0dedc16a54be8da21dcb1b5dad891948552a5b3a94c5381c25433
+    engine: gotpl
+    home: https://prometheus.io/
+    icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
+    maintainers:
+    - email: gianrubio@gmail.com
+      name: gianrubio
+    - email: zanhsieh@gmail.com
+      name: zanhsieh
+    name: prometheus
+    sources:
+    - https://github.com/prometheus/alertmanager
+    - https://github.com/prometheus/prometheus
+    - https://github.com/prometheus/pushgateway
+    - https://github.com/prometheus/node_exporter
+    - https://github.com/kubernetes/kube-state-metrics
+    tillerVersion: '>=2.8.0'
+    urls:
+    - https://kubernetes-charts.storage.googleapis.com/prometheus-10.6.0.tgz
+    version: 10.6.0
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/templates/prometheus_service.yaml.j2 b/apps/prometheus/kubespray/playbooks/roles/install/templates/prometheus_service.yaml.j2
new file mode 100644 (file)
index 0000000..e11cb23
--- /dev/null
@@ -0,0 +1,33 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: "{{ prometheus_service }}"
+  namespace: "{{ prometheus_namespace }}"
+spec:
+  selector:
+    app: "{{ prometheus_service }}"
+  type: LoadBalancer
+  ports:
+  - name: http
+    port: 80
+    targetPort: 9090
+    protocol: TCP
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2 b/apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2
new file mode 100644 (file)
index 0000000..58a96cb
--- /dev/null
@@ -0,0 +1,1634 @@
+{% raw %}
+rbac:
+  create: true
+
+podSecurityPolicy:
+  enabled: false
+
+imagePullSecrets:
+# - name: "image-pull-secret"
+
+## Define serviceAccount names for components. Defaults to component's fully qualified name.
+##
+serviceAccounts:
+  alertmanager:
+    create: true
+    name:
+  kubeStateMetrics:
+    create: true
+    name:
+  nodeExporter:
+    create: true
+    name:
+  pushgateway:
+    create: true
+    name:
+  server:
+    create: true
+    name:
+
+alertmanager:
+  ## If false, alertmanager will not be installed
+  ##
+  enabled: true
+
+  ## alertmanager container name
+  ##
+  name: alertmanager
+
+  ## alertmanager container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/alertmanager
+    tag: {{ prom_alertmanager_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## alertmanager priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Additional alertmanager container arguments
+  ##
+  extraArgs: {}
+
+  ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
+  ## so that the various internal URLs are still able to access as they are in the default case.
+  ## (Optional)
+  prefixURL: ""
+
+  ## External URL which can access alertmanager
+  baseURL: "http://localhost:9093"
+
+  ## Additional alertmanager container environment variable
+  ## For instance to add a http_proxy
+  ##
+  extraEnv: {}
+
+  ## Additional alertmanager Secret mounts
+  # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+  extraSecretMounts: []
+    # - name: secret-files
+    #   mountPath: /etc/secrets
+    #   subPath: ""
+    #   secretName: alertmanager-secret-files
+    #   readOnly: true
+
+  ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}
+  ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configMapOverrideName: ""
+
+  ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config
+  ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configFromSecret: ""
+
+  ## The configuration file name to be loaded to alertmanager
+  ## Must match the key within configuration loaded from ConfigMap/Secret
+  ##
+  configFileName: alertmanager.yml
+
+  ingress:
+    ## If true, alertmanager Ingress will be created
+    ##
+    enabled: false
+
+    ## alertmanager Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## alertmanager Ingress additional labels
+    ##
+    extraLabels: {}
+
+    ## alertmanager Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - alertmanager.domain.com
+    #   - domain.com/alertmanager
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## alertmanager Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-alerts-tls
+    #     hosts:
+    #       - alertmanager.domain.com
+
+  ## Alertmanager Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  ## Node tolerations for alertmanager scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for alertmanager pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Pod affinity
+  ##
+  affinity: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  persistentVolume:
+    ## If true, alertmanager will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: true
+
+    ## alertmanager data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## alertmanager data Persistent Volume Claim annotations
+    ##
+    annotations: {}
+
+    ## alertmanager data Persistent Volume existing claim name
+    ## Requires alertmanager.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## alertmanager data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## alertmanager data Persistent Volume size
+    ##
+    size: 2Gi
+
+    ## alertmanager data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## alertmanager data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of alertmanager data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+  ## Annotations to be added to alertmanager pods
+  ##
+  podAnnotations: {}
+    ## Tell prometheus to use a specific set of alertmanager pods
+    ## instead of all alertmanager pods found in the same namespace
+    ## Useful if you deploy multiple releases within the same namespace
+    ##
+    ## prometheus.io/probe: alertmanager-teamA
+
+  ## Labels to be added to Prometheus AlertManager pods
+  ##
+  podLabels: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
+  ##
+  replicaCount: 1
+
+  statefulSet:
+    ## If true, use a statefulset instead of a deployment for pod management.
+    ## This allows to scale replicas to more than 1 pod
+    ##
+    enabled: false
+
+    podManagementPolicy: OrderedReady
+
+    ## Alertmanager headless service to use for the statefulset
+    ##
+    headless:
+      annotations: {}
+      labels: {}
+
+      ## Enabling peer mesh service end points for enabling the HA alert manager
+      ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
+      # enableMeshPeer : true
+
+      servicePort: 80
+
+  ## alertmanager resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 32Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 32Mi
+
+  ## Security context to be added to alertmanager pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+    runAsGroup: 65534
+    fsGroup: 65534
+
+  service:
+    annotations: {}
+    labels: {}
+    clusterIP: ""
+
+    ## Enabling peer mesh service end points for enabling the HA alert manager
+    ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
+    # enableMeshPeer : true
+
+    ## List of IP addresses at which the alertmanager service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    # nodePort: 30000
+    sessionAffinity: None
+    type: ClusterIP
+
+## Monitors ConfigMap changes and POSTs to a URL
+## Ref: https://github.com/jimmidyson/configmap-reload
+##
+configmapReload:
+  prometheus:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+
+    ## configmap-reload container image
+    ##
+    image:
+{% endraw %}
+      repository: {{ dockerio_image_repository }}/jimmidyson/configmap-reload
+      tag: {{ configmap_reload_version }}
+{% raw %}
+      pullPolicy: IfNotPresent
+
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+
+
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+
+
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+  alertmanager:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+
+    ## configmap-reload container image
+    ##
+    image:
+{% endraw %}
+      repository: {{ dockerio_image_repository }}/jimmidyson/configmap-reload
+      tag: {{ configmap_reload_version }}
+{% raw %}
+      pullPolicy: IfNotPresent
+
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+
+
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+
+
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+
+
+kubeStateMetrics:
+  ## If false, kube-state-metrics will not be installed
+  ##
+  enabled: true
+
+  ## kube-state-metrics container name
+  ##
+  name: kube-state-metrics
+
+  ## kube-state-metrics container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ quayio_image_repository }}/coreos/kube-state-metrics
+    tag: {{ kube_state_metrics_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## kube-state-metrics priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## kube-state-metrics container arguments
+  ##
+  args: {}
+
+  ## Node tolerations for kube-state-metrics scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for kube-state-metrics pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to kube-state-metrics pods
+  ##
+  podAnnotations: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  pod:
+    labels: {}
+
+  replicaCount: 1
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## kube-state-metrics resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 16Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 16Mi
+
+  ## Security context to be added to kube-state-metrics pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+    labels: {}
+
+    # Exposed as a headless service:
+    # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
+    clusterIP: None
+
+    ## List of IP addresses at which the kube-state-metrics service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    # Port for Kubestatemetric self telemetry
+    serviceTelemetryPort: 81
+    type: ClusterIP
+
+nodeExporter:
+  ## If false, node-exporter will not be installed
+  ##
+  enabled: true
+
+  ## If true, node-exporter pods share the host network namespace
+  ##
+  hostNetwork: true
+
+  ## If true, node-exporter pods share the host PID namespace
+  ##
+  hostPID: true
+
+  ## node-exporter container name
+  ##
+  name: node-exporter
+
+  ## node-exporter container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/node-exporter
+    tag: {{ prom_node_exporter_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## node-exporter priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Custom Update Strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  ## Additional node-exporter container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional node-exporter hostPath mounts
+  ##
+  extraHostPathMounts: []
+    # - name: textfile-dir
+    #   mountPath: /srv/txt_collector
+    #   hostPath: /var/lib/node-exporter
+    #   readOnly: true
+    #   mountPropagation: HostToContainer
+
+  extraConfigmapMounts: []
+    # - name: certs-configmap
+    #   mountPath: /prometheus
+    #   configMap: certs-configmap
+    #   readOnly: true
+
+  ## Node tolerations for node-exporter scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for node-exporter pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to node-exporter pods
+  ##
+  podAnnotations: {}
+
+  ## Labels to be added to node-exporter pods
+  ##
+  pod:
+    labels: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## node-exporter resource limits & requests
+  ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 200m
+    #   memory: 50Mi
+    # requests:
+    #   cpu: 100m
+    #   memory: 30Mi
+
+  ## Security context to be added to node-exporter pods
+  ##
+  securityContext: {}
+    # runAsUser: 0
+
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+    labels: {}
+
+    # Exposed as a headless service:
+    # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
+    clusterIP: None
+
+    ## List of IP addresses at which the node-exporter service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    hostPort: 9100
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 9100
+    type: ClusterIP
+
+server:
+  ## Prometheus server container name
+  ##
+  enabled: true
+  name: server
+  sidecarContainers:
+
+  ## Prometheus server container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/prometheus
+    tag: {{ prom_prometheus_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## prometheus server priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
+  ## so that the various internal URLs are still able to access as they are in the default case.
+  ## (Optional)
+  prefixURL: ""
+
+  ## External URL which can access alertmanager
+  ## Maybe same with Ingress host name
+  baseURL: ""
+
+  ## Additional server container environment variables
+  ##
+  ## You specify this manually like you would a raw deployment manifest.
+  ## This means you can bind in environment variables from secrets.
+  ##
+  ## e.g. static environment variable:
+  ##  - name: DEMO_GREETING
+  ##    value: "Hello from the environment"
+  ##
+  ## e.g. secret environment variable:
+  ## - name: USERNAME
+  ##   valueFrom:
+  ##     secretKeyRef:
+  ##       name: mysecret
+  ##       key: username
+  env: []
+
+  extraFlags:
+    - web.enable-lifecycle
+    ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as
+    ## deleting time series. This is disabled by default.
+    # - web.enable-admin-api
+    ##
+    ## storage.tsdb.no-lockfile flag controls BD locking
+    # - storage.tsdb.no-lockfile
+    ##
+    ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL)
+    # - storage.tsdb.wal-compression
+
+  ## Path to a configuration file on prometheus server container FS
+  configPath: /etc/config/prometheus.yml
+
+  global:
+    ## How frequently to scrape targets by default
+    ##
+    scrape_interval: 1m
+    ## How long until a scrape request times out
+    ##
+    scrape_timeout: 10s
+    ## How frequently to evaluate rules
+    ##
+    evaluation_interval: 1m
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
+  ##
+  remoteWrite: {}
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
+  ##
+  remoteRead: {}
+
+  ## Additional Prometheus server container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
+  ## Additional Prometheus server Volume mounts
+  ##
+  extraVolumeMounts: []
+
+  ## Additional Prometheus server Volumes
+  ##
+  extraVolumes: []
+
+  ## Additional Prometheus server hostPath mounts
+  ##
+  extraHostPathMounts: []
+    # - name: certs-dir
+    #   mountPath: /etc/kubernetes/certs
+    #   subPath: ""
+    #   hostPath: /etc/kubernetes/certs
+    #   readOnly: true
+
+  extraConfigmapMounts: []
+    # - name: certs-configmap
+    #   mountPath: /prometheus
+    #   subPath: ""
+    #   configMap: certs-configmap
+    #   readOnly: true
+
+  ## Additional Prometheus server Secret mounts
+  # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+  extraSecretMounts: []
+    # - name: secret-files
+    #   mountPath: /etc/secrets
+    #   subPath: ""
+    #   secretName: prom-secret-files
+    #   readOnly: true
+
+  ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}}
+  ## Defining configMapOverrideName will cause templates/server-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configMapOverrideName: ""
+
+  ingress:
+    ## If true, Prometheus server Ingress will be created
+    ##
+    enabled: false
+
+    ## Prometheus server Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## Prometheus server Ingress additional labels
+    ##
+    extraLabels: {}
+
+    ## Prometheus server Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - prometheus.domain.com
+    #   - domain.com/prometheus
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## Prometheus server Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-server-tls
+    #     hosts:
+    #       - prometheus.domain.com
+
+  ## Server Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  ## Node tolerations for server scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for Prometheus server pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Pod affinity
+  ##
+  affinity: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  persistentVolume:
+    ## If true, Prometheus server will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: true
+
+    ## Prometheus server data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## Prometheus server data Persistent Volume annotations
+    ##
+    annotations: {}
+
+    ## Prometheus server data Persistent Volume existing claim name
+    ## Requires server.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## Prometheus server data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## Prometheus server data Persistent Volume size
+    ##
+    size: 8Gi
+
+    ## Prometheus server data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## Prometheus server data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of Prometheus server data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+  emptyDir:
+    sizeLimit: ""
+
+  ## Annotations to be added to Prometheus server pods
+  ##
+  podAnnotations: {}
+    # iam.amazonaws.com/role: prometheus
+
+  ## Labels to be added to Prometheus server pods
+  ##
+  podLabels: {}
+
+  ## Prometheus AlertManager configuration
+  ##
+  alertmanagers: []
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
+  ##
+  replicaCount: 1
+
+  statefulSet:
+    ## If true, use a statefulset instead of a deployment for pod management.
+    ## This allows to scale replicas to more than 1 pod
+    ##
+    enabled: false
+
+    annotations: {}
+    labels: {}
+    podManagementPolicy: OrderedReady
+
+    ## Alertmanager headless service to use for the statefulset
+    ##
+    headless:
+      annotations: {}
+      labels: {}
+      servicePort: 80
+
+  ## Prometheus server readiness and liveness probe initial delay and timeout
+  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+  ##
+  readinessProbeInitialDelay: 30
+  readinessProbeTimeout: 30
+  readinessProbeFailureThreshold: 3
+  readinessProbeSuccessThreshold: 1
+  livenessProbeInitialDelay: 30
+  livenessProbeTimeout: 30
+  livenessProbeFailureThreshold: 3
+  livenessProbeSuccessThreshold: 1
+
+  ## Prometheus server resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 500m
+    #   memory: 512Mi
+    # requests:
+    #   cpu: 500m
+    #   memory: 512Mi
+
+  ## Vertical Pod Autoscaler config
+  ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
+  verticalAutoscaler:
+    ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs)
+    enabled: false
+    # updateMode: "Auto"
+    # containerPolicies:
+    # - containerName: 'prometheus-server'
+
+  ## Security context to be added to server pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+    runAsGroup: 65534
+    fsGroup: 65534
+
+  service:
+    annotations: {}
+    labels: {}
+    clusterIP: ""
+
+    ## List of IP addresses at which the Prometheus server service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    sessionAffinity: None
+    type: ClusterIP
+
+    ## Enable gRPC port on service to allow auto discovery with thanos-querier
+    gRPC:
+      enabled: false
+      servicePort: 10901
+      # nodePort: 10901
+
+    ## If using a statefulSet (statefulSet.enabled=true), configure the
+    ## service to connect to a specific replica to have a consistent view
+    ## of the data.
+    statefulsetReplica:
+      enabled: false
+      replica: 0
+
+  ## Prometheus server pod termination grace period
+  ##
+  terminationGracePeriodSeconds: 300
+
+  ## Prometheus data retention period (default if not specified is 15 days)
+  ##
+  retention: "15d"
+
+pushgateway:
+  ## If false, pushgateway will not be installed
+  ##
+  enabled: true
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  ## pushgateway container name
+  ##
+  name: pushgateway
+
+  ## pushgateway container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/pushgateway
+    tag: {{ prom_push_gateway_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## pushgateway priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Additional pushgateway container arguments
+  ##
+  ## for example: persistence.file: /data/pushgateway.data
+  extraArgs: {}
+
+  ingress:
+    ## If true, pushgateway Ingress will be created
+    ##
+    enabled: false
+
+    ## pushgateway Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## pushgateway Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - pushgateway.domain.com
+    #   - domain.com/pushgateway
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## pushgateway Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-alerts-tls
+    #     hosts:
+    #       - pushgateway.domain.com
+
+  ## Node tolerations for pushgateway scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for pushgateway pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to pushgateway pods
+  ##
+  podAnnotations: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  replicaCount: 1
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## pushgateway resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 32Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 32Mi
+
+  ## Security context to be added to push-gateway pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+
+  service:
+    annotations:
+      prometheus.io/probe: pushgateway
+    labels: {}
+    clusterIP: ""
+
+    ## List of IP addresses at which the pushgateway service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 9091
+    type: ClusterIP
+
+  ## pushgateway Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  persistentVolume:
+    ## If true, pushgateway will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: false
+
+    ## pushgateway data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## pushgateway data Persistent Volume Claim annotations
+    ##
+    annotations: {}
+
+    ## pushgateway data Persistent Volume existing claim name
+    ## Requires pushgateway.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## pushgateway data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## pushgateway data Persistent Volume size
+    ##
+    size: 2Gi
+
+    ## pushgateway data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## pushgateway data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of pushgateway data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+
+## alertmanager ConfigMap entries
+##
+alertmanagerFiles:
+  alertmanager.yml:
+    global: {}
+      # slack_api_url: ''
+
+    receivers:
+      - name: default-receiver
+        # slack_configs:
+        #  - channel: '@you'
+        #    send_resolved: true
+
+    route:
+      group_wait: 10s
+      group_interval: 5m
+      receiver: default-receiver
+      repeat_interval: 3h
+
+## Prometheus server ConfigMap entries
+##
+serverFiles:
+
+  ## Alerts configuration
+  ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
+  alerting_rules.yml: {}
+  # groups:
+  #   - name: Instances
+  #     rules:
+  #       - alert: InstanceDown
+  #         expr: up == 0
+  #         for: 5m
+  #         labels:
+  #           severity: page
+  #         annotations:
+  #           description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.'
+  #           summary: 'Instance {{ $labels.instance }} down'
+  ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml
+  alerts: {}
+
+  ## Records configuration
+  ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
+  recording_rules.yml: {}
+  ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml
+  rules: {}
+
+  prometheus.yml:
+    rule_files:
+      - /etc/config/recording_rules.yml
+      - /etc/config/alerting_rules.yml
+    ## Below two files are DEPRECATED will be removed from this default values file
+      - /etc/config/rules
+      - /etc/config/alerts
+
+    scrape_configs:
+      - job_name: prometheus
+        static_configs:
+          - targets:
+            - localhost:9090
+
+      # A scrape configuration for running Prometheus on a Kubernetes cluster.
+      # This uses separate scrape configs for cluster components (i.e. API server, node)
+      # and services to allow each to use different authentication configs.
+      #
+      # Kubernetes labels will be added as Prometheus labels on metrics via the
+      # `labelmap` relabeling action.
+
+      # Scrape config for API servers.
+      #
+      # Kubernetes exposes API servers as endpoints to the default/kubernetes
+      # service so this uses `endpoints` role and uses relabelling to only keep
+      # the endpoints associated with the default/kubernetes service using the
+      # default named port `https`. This works for single API server deployments as
+      # well as HA API server deployments.
+      - job_name: 'kubernetes-apiservers'
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        # Keep only the default/kubernetes service endpoints for the https port. This
+        # will add targets for each API server which Kubernetes adds an endpoint to
+        # the default/kubernetes service.
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+            action: keep
+            regex: default;kubernetes;https
+
+      - job_name: 'kubernetes-nodes'
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        kubernetes_sd_configs:
+          - role: node
+
+        relabel_configs:
+          - action: labelmap
+            regex: __meta_kubernetes_node_label_(.+)
+          - target_label: __address__
+            replacement: kubernetes.default.svc:443
+          - source_labels: [__meta_kubernetes_node_name]
+            regex: (.+)
+            target_label: __metrics_path__
+            replacement: /api/v1/nodes/$1/proxy/metrics
+
+
+      - job_name: 'kubernetes-nodes-cadvisor'
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        kubernetes_sd_configs:
+          - role: node
+
+        # This configuration will work only on kubelet 1.7.3+
+        # As the scrape endpoints for cAdvisor have changed
+        # if you are using older version you need to change the replacement to
+        # replacement: /api/v1/nodes/$1:4194/proxy/metrics
+        # more info here https://github.com/coreos/prometheus-operator/issues/633
+        relabel_configs:
+          - action: labelmap
+            regex: __meta_kubernetes_node_label_(.+)
+          - target_label: __address__
+            replacement: kubernetes.default.svc:443
+          - source_labels: [__meta_kubernetes_node_name]
+            regex: (.+)
+            target_label: __metrics_path__
+            replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
+
+      # Scrape config for service endpoints.
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+      # to set this to `https` & most likely set the `tls_config` of the scrape config.
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+      # service then set this appropriately.
+      - job_name: 'kubernetes-service-endpoints'
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+            action: replace
+            target_label: __scheme__
+            regex: (https?)
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+            action: replace
+            target_label: __address__
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            action: replace
+            target_label: kubernetes_name
+          - source_labels: [__meta_kubernetes_pod_node_name]
+            action: replace
+            target_label: kubernetes_node
+
+      # Scrape config for slow service endpoints; same as above, but with a larger
+      # timeout and a larger interval
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
+      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+      # to set this to `https` & most likely set the `tls_config` of the scrape config.
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+      # service then set this appropriately.
+      - job_name: 'kubernetes-service-endpoints-slow'
+
+        scrape_interval: 5m
+        scrape_timeout: 30s
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+            action: replace
+            target_label: __scheme__
+            regex: (https?)
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+            action: replace
+            target_label: __address__
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            action: replace
+            target_label: kubernetes_name
+          - source_labels: [__meta_kubernetes_pod_node_name]
+            action: replace
+            target_label: kubernetes_node
+
+      - job_name: 'prometheus-pushgateway'
+        honor_labels: true
+
+        kubernetes_sd_configs:
+          - role: service
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+            action: keep
+            regex: pushgateway
+
+      # Example scrape config for probing services via the Blackbox Exporter.
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/probe`: Only probe services that have a value of `true`
+      - job_name: 'kubernetes-services'
+
+        metrics_path: /probe
+        params:
+          module: [http_2xx]
+
+        kubernetes_sd_configs:
+          - role: service
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+            action: keep
+            regex: true
+          - source_labels: [__address__]
+            target_label: __param_target
+          - target_label: __address__
+            replacement: blackbox
+          - source_labels: [__param_target]
+            target_label: instance
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            target_label: kubernetes_name
+
+      # Example scrape config for pods
+      #
+      # The relabeling allows the actual pod scrape endpoint to be configured via the
+      # following annotations:
+      #
+      # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+      - job_name: 'kubernetes-pods'
+
+        kubernetes_sd_configs:
+          - role: pod
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+            action: replace
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+            target_label: __address__
+          - action: labelmap
+            regex: __meta_kubernetes_pod_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_pod_name]
+            action: replace
+            target_label: kubernetes_pod_name
+
+      # Example Scrape config for pods which should be scraped slower. An useful example
+      # would be stackriver-exporter which querys an API on every scrape of the pod
+      #
+      # The relabeling allows the actual pod scrape endpoint to be configured via the
+      # following annotations:
+      #
+      # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+      - job_name: 'kubernetes-pods-slow'
+
+        scrape_interval: 5m
+        scrape_timeout: 30s
+
+        kubernetes_sd_configs:
+          - role: pod
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+            action: replace
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+            target_label: __address__
+          - action: labelmap
+            regex: __meta_kubernetes_pod_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_pod_name]
+            action: replace
+            target_label: kubernetes_pod_name
+
+# adds additional scrape configs to prometheus.yml
+# must be a string so you have to add a | after extraScrapeConfigs:
+# example adds prometheus-blackbox-exporter scrape config
+extraScrapeConfigs:
+  # - job_name: 'prometheus-blackbox-exporter'
+  #   metrics_path: /probe
+  #   params:
+  #     module: [http_2xx]
+  #   static_configs:
+  #     - targets:
+  #       - https://example.com
+  #   relabel_configs:
+  #     - source_labels: [__address__]
+  #       target_label: __param_target
+  #     - source_labels: [__param_target]
+  #       target_label: instance
+  #     - target_label: __address__
+  #       replacement: prometheus-blackbox-exporter:9115
+
+# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager
+# useful in H/A prometheus with different external labels but the same alerts
+alertRelabelConfigs:
+  # alert_relabel_configs:
+  # - source_labels: [dc]
+  #   regex: (.+)\d+
+  #   target_label: dc
+
+networkPolicy:
+  ## Enable creation of NetworkPolicy resources.
+  ##
+  enabled: false
+{% endraw %}
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/vars/main.yaml b/apps/prometheus/kubespray/playbooks/roles/install/vars/main.yaml
new file mode 100644 (file)
index 0000000..775be24
--- /dev/null
@@ -0,0 +1,23 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+prometheus_service: "prometheus"
+prometheus_namespace: "prometheus"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/vars/offline-deployment.yaml b/apps/prometheus/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
new file mode 100644 (file)
index 0000000..84ead46
--- /dev/null
@@ -0,0 +1,26 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+dockerio_image_repository: "{{ server_fqdn }}"
+quayio_image_repository: "{{ server_fqdn }}"
+helm_charts_git_url: "{{ engine_workspace }}/offline/git/charts"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "http://{{ server_fqdn }}/charts/stable"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/prometheus/kubespray/playbooks/roles/install/vars/online-deployment.yaml b/apps/prometheus/kubespray/playbooks/roles/install/vars/online-deployment.yaml
new file mode 100644 (file)
index 0000000..9e97b0e
--- /dev/null
@@ -0,0 +1,26 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+dockerio_image_repository: "docker.io"
+quayio_image_repository: "quay.io"
+helm_charts_git_url: "https://github.com/helm/charts.git"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "https://kubernetes-charts.storage.googleapis.com"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/spinnaker/kubespray/playbooks/install.yml b/apps/spinnaker/kubespray/playbooks/install.yml
new file mode 100644 (file)
index 0000000..97d22ae
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- hosts: jumphost
+  gather_facts: true
+  become: false
+
+  roles:
+    - role: install
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/files/log-spinnaker-status.sh b/apps/spinnaker/kubespray/playbooks/roles/install/files/log-spinnaker-status.sh
new file mode 100755 (executable)
index 0000000..c176ac1
--- /dev/null
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+cat << EOF
+---------------------------------------------------
+Halyard Spinnaker Deployment Log
+---------------------------------------------------
+$(kubectl -n spinnaker logs $(kubectl -n spinnaker get pod --no-headers -o custom-columns=':metadata.name' | grep spinnaker-install-using))
+
+
+---------------------------------------------------
+Spinnaker pods in Create or ImagePullBackOff state
+---------------------------------------------------
+$(kubectl get pod -n spinnaker | grep -i 'creating\|ImagePullBackOff')
+
+
+---------------------------------------------------
+Spinnaker pods in Init state
+---------------------------------------------------
+$(kubectl get pod -n spinnaker | grep Init | grep -v Error)
+
+
+---------------------------------------------------
+Spinnaker pods in Error or CrashLoopBackOff state
+---------------------------------------------------
+$(kubectl get pod -n spinnaker | grep 'Crash\|Error')
+
+
+---------------------------------------------------
+Spinnaker POD Summary
+---------------------------------------------------
+Creating/ImagePullBackOff     : $(kubectl get pod -n spinnaker | grep -i 'creating\|ImagePullBackOff' | wc -l) pods
+Init                          : $(kubectl get pod -n spinnaker | grep Init | grep -v Error | wc -l) pods
+Error/CrashLoopBackOff        : $(kubectl get pod -n spinnaker | grep 'Error\|Crash' | wc -l) pods
+Terminating                   : $(kubectl get pod -n spinnaker | grep -i terminating | wc -l) pods
+Running/Completed             : $(kubectl get pod -n spinnaker | grep -i 'running\|completed' | wc -l) pods
+Total                         : $(kubectl get pod -n spinnaker | grep -v RESTART | wc -l) pods
+---------------------------------------------------
+
+
+---------------------------------------------------
+Summary of Container Images pulled for Spinnaker
+---------------------------------------------------
+Number of Spinnaker containers     : $(kubectl get pods -n spinnaker -o jsonpath="{..image}" | tr -s '[[:space:]]' '\n' | sort | uniq | grep '^spin.*' |  wc -l)
+Number of non-Spinnaker containers : $(kubectl get pods -n spinnaker -o jsonpath="{..image}" | tr -s '[[:space:]]' '\n' | sort | uniq | grep -v 'spin' | wc -l)
+Total number of containers    : $(kubectl get pods -n spinnaker -o jsonpath="{..image}" | tr -s '[[:space:]]' '\n' | sort | uniq | wc -l)
+---------------------------------------------------
+EOF
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/tasks/main.yml b/apps/spinnaker/kubespray/playbooks/roles/install/tasks/main.yml
new file mode 100644 (file)
index 0000000..a722222
--- /dev/null
@@ -0,0 +1,166 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+#
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- block:
+  - name: Create directories for helm repositories
+    file:
+      path: "{{ item.path }}"
+      state: "{{ item.state }}"
+    loop:
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: directory}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: directory}
+
+  - name: Place index.yaml to webserver stable charts repository
+    template:
+      src: "index.yaml.j2"
+      dest: "{{ engine_workspace }}/offline/charts/stable/index.yaml"
+      force: true
+  when: execution_mode == "offline-deployment"
+
+- name: Initialize Helm
+  command: helm init --client-only --local-repo-url {{ local_repo_url }} --stable-repo-url {{ stable_repo_url }}
+  register: helm_init_result
+  changed_when: true
+
+- name: Clone Helm Charts repository
+  git:
+    repo: "{{ helm_charts_git_url }}"
+    dest: "{{ engine_cache }}/repos/charts"
+    version: "{{ charts_version }}"
+    force: true
+    recursive: true
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- name: Generate values.yaml
+  template:
+    src: "values.yaml.j2"
+    dest: "{{ engine_cache }}/repos/charts/stable/spinnaker/values.yaml"
+    force: true
+
+- name: Remove previous installations of Spinnaker
+  command: >
+    helm delete --purge "{{ spinnaker_service }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Remove Spinnaker namespace
+  command: >
+    kubectl delete ns "{{ spinnaker_namespace }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Create Spinnaker namespace
+  k8s:
+    state: present
+    definition:
+      apiVersion: v1
+      kind: Namespace
+      metadata:
+        name: "{{ spinnaker_namespace }}"
+
+- name: Verify Spinnaker Helm charts are available to be deployed
+  command: helm search spinnaker -l
+  register: helm_search
+  changed_when: false
+
+- name: Log Helm chart list to console
+  debug:
+    msg: "{{ helm_search.stdout_lines }}"
+
+- name: Inform user about Spinnaker deployment
+  debug:
+    msg: >
+      Spinnaker deployment is about to start!
+      This takes a while and nothing will be logged to console until the process is completed.
+
+- name: Fetch all helm dependencies for Spinnaker
+  command: >
+    helm dependency update
+      {{ engine_cache }}/repos/charts/stable/spinnaker
+  changed_when: true
+
+- name: Install Spinnaker using helm
+  command: >
+    helm install
+      --name "{{ spinnaker_service }}"
+      --namespace "{{ spinnaker_namespace }}"
+      --timeout 900
+      {{ engine_cache }}/repos/charts/stable/spinnaker
+  register: spinnaker_helm_log
+  changed_when: true
+
+- name: Log Spinnaker helm output to console
+  debug:
+    msg: "{{ spinnaker_helm_log.stdout_lines }}"
+
+# wait 10 minutes for all containers to be started
+- name: Wait for all containers to be started
+  shell: |
+    set -o pipefail
+    kubectl get po -n spinnaker | grep ContainerCreating | wc -l
+  register: kube
+  changed_when:
+    kube.stdout  == '0'
+  until:
+    kube.stdout  == '0'
+  retries: 60
+  delay: 10
+
+# wait 20 minutes for all containers to be initialized
+- block:
+    - name: Wait for all containers to be initialized
+      shell: |
+        set -o pipefail
+        kubectl get po -n spinnaker | grep Init | grep -v Error | wc -l
+      register: kube
+      changed_when:
+        kube.stdout  == '0'
+      until:
+        kube.stdout  == '0'
+      retries: 120
+      delay: 10
+  always:
+    - name: Get POD status
+      command: kubectl get po -n spinnaker
+      changed_when: false
+      register: kube
+
+    - name: Log POD status to console
+      debug:
+        msg: "{{ kube.stdout_lines }}"
+
+    - name: Get summary of Spinnaker deployment
+      script: log-spinnaker-status.sh
+      register: spinnaker_status
+
+    - name: Log Spinnaker status to console
+      debug:
+        msg: "{{ spinnaker_status.stdout_lines }}"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/templates/index.yaml.j2 b/apps/spinnaker/kubespray/playbooks/roles/install/templates/index.yaml.j2
new file mode 100644 (file)
index 0000000..963516c
--- /dev/null
@@ -0,0 +1,22 @@
+apiVersion: v1
+entries:
+  spinnaker:
+  - apiVersion: v1
+    description: Open source, multi-cloud continuous delivery platform for releasing software changes with high velocity and confidence.
+    name: spinnaker
+    version: {{ spinnaker_version }}
+    appVersion: {{ spinnaker_app_version }}
+    home: http://spinnaker.io/
+    sources:
+    - https://github.com/spinnaker
+    - https://github.com/viglesiasce/images
+    icon: https://pbs.twimg.com/profile_images/669205226994319362/O7OjwPrh_400x400.png
+    maintainers:
+    - name: viglesiasce
+      email: viglesias@google.com
+    - name: ezimanyi
+      email: ezimanyi@google.com
+    - name: dwardu89
+      email: hello@dwardu.com
+    - name: paulczar
+      email: username.taken@gmail.com
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/templates/values.yaml.j2 b/apps/spinnaker/kubespray/playbooks/roles/install/templates/values.yaml.j2
new file mode 100644 (file)
index 0000000..8d88583
--- /dev/null
@@ -0,0 +1,308 @@
+halyard:
+  spinnakerVersion: {{ spinnaker_app_version }}
+  image:
+    repository: {{ gcrio_image_repository }}/spinnaker-marketplace/halyard
+    tag: {{ spinnaker_version }}
+    pullSecrets: []
+  # Set to false to disable persistence data volume for halyard
+  persistence:
+    enabled: false
+  # Provide a config map with Hal commands that will be run the core config (storage)
+  # The config map should contain a script in the config.sh key
+  additionalScripts:
+    enabled: false
+    configMapName: my-halyard-config
+    configMapKey: config.sh
+    # If you'd rather do an inline script, set create to true and put the content in the data dict like you would a configmap
+    # The content will be passed through `tpl`, so value interpolation is supported.
+    create: false
+    data: {}
+  additionalSecrets:
+    create: false
+    data: {}
+    ## Uncomment if you want to use a pre-created secret rather than feeding data in via helm.
+    # name:
+  additionalConfigMaps:
+    create: false
+    data: {}
+    ## Uncomment if you want to use a pre-created ConfigMap rather than feeding data in via helm.
+    # name:
+  ## Define custom profiles for Spinnaker services. Read more for details:
+  ## https://www.spinnaker.io/reference/halyard/custom/#custom-profiles
+  ## The contents of the files will be passed through `tpl`, so value interpolation is supported.
+  additionalProfileConfigMaps:
+    data: {}
+      ## if you're running spinnaker behind a reverse proxy such as a GCE ingress
+      ## you may need the following profile settings for the gate profile.
+      ## see https://github.com/spinnaker/spinnaker/issues/1630
+      ## otherwise its harmless and will likely become default behavior in the future
+      ## According to the linked github issue.
+      # gate-local.yml:
+      #   server:
+      #     tomcat:
+      #       protocolHeader: X-Forwarded-Proto
+      #       remoteIpHeader: X-Forwarded-For
+      #       internalProxies: .*
+      #       httpsServerPort: X-Forwarded-Port
+
+  ## Define custom settings for Spinnaker services. Read more for details:
+  ## https://www.spinnaker.io/reference/halyard/custom/#custom-service-settings
+  ## You can use it to add annotations for pods, override the image, etc.
+  additionalServiceSettings: {}
+    # deck.yml:
+    #   artifactId: gcr.io/spinnaker-marketplace/deck:2.9.0-20190412012808
+    #   kubernetes:
+    #     podAnnotations:
+    #       iam.amazonaws.com/role: <role_arn>
+    # clouddriver.yml:
+    #   kubernetes:
+    #     podAnnotations:
+    #       iam.amazonaws.com/role: <role_arn>
+
+  ## Populate to provide a custom local BOM for Halyard to use for deployment. Read more for details:
+  ## https://www.spinnaker.io/guides/operator/custom-boms/#boms-and-configuration-on-your-filesystem
+  bom: ~
+  #   artifactSources:
+  #     debianRepository: https://dl.bintray.com/spinnaker-releases/debians
+  #     dockerRegistry: gcr.io/spinnaker-marketplace
+  #     gitPrefix: https://github.com/spinnaker
+  #     googleImageProject: marketplace-spinnaker-release
+  #   services:
+  #     clouddriver:
+  #       commit: 031bcec52d6c3eb447095df4251b9d7516ed74f5
+  #       version: 6.3.0-20190904130744
+  #     deck:
+  #       commit: b0aac478e13a7f9642d4d39479f649dd2ef52a5a
+  #       version: 2.12.0-20190916141821
+  #     ...
+  #   timestamp: '2019-09-16 18:18:44'
+  #   version: 1.16.1
+
+  ## Define local configuration for Spinnaker services.
+  ## The contents of these files would be copies of the configuration normally retrieved from
+  ## `gs://halconfig/<service-name>`, but instead need to be available locally on the halyard pod to facilitate
+  ## offline installation. This would typically be used along with a custom `bom:` with the `local:` prefix on a
+  ## service version.
+  ## Read more for details:
+  ## https://www.spinnaker.io/guides/operator/custom-boms/#boms-and-configuration-on-your-filesystem
+  ## The key for each entry must be the name of the service and a file name separated by the '_' character.
+  serviceConfigs: {}
+  # clouddriver_clouddriver-ro.yml: |-
+  #   ...
+  # clouddriver_clouddriver-rw.yml: |-
+  #   ...
+  # clouddriver_clouddriver.yml: |-
+  #   ...
+  # deck_settings.json: |-
+  #   ...
+  # echo_echo.yml: |-
+  #   ...
+
+  ## Uncomment if you want to add extra commands to the init script
+  ## run by the init container before halyard is started.
+  ## The content will be passed through `tpl`, so value interpolation is supported.
+  # additionalInitScript: |-
+
+  ## Uncomment if you want to add annotations on halyard and install-using-hal pods
+  # annotations:
+  #   iam.amazonaws.com/role: <role_arn>
+
+  ## Uncomment the following resources definitions to control the cpu and memory
+  # resources allocated for the halyard pod
+  resources: {}
+    # requests:
+    #   memory: "1Gi"
+    #   cpu: "100m"
+    # limits:
+    #   memory: "2Gi"
+    #   cpu: "200m"
+
+  ## Uncomment if you want to set environment variables on the Halyard pod.
+  # env:
+  #   - name: JAVA_OPTS
+  #     value: -Dhttp.proxyHost=proxy.example.com
+  customCerts:
+    ## Enable to override the default cacerts with your own one
+    enabled: false
+    secretName: custom-cacerts
+
+# Define which registries and repositories you want available in your
+# Spinnaker pipeline definitions
+# For more info visit:
+#   https://www.spinnaker.io/setup/providers/docker-registry/
+
+# Configure your Docker registries here
+dockerRegistries:
+- name: dockerhub
+  address: index.docker.io
+  repositories:
+    - library/alpine
+    - library/ubuntu
+    - library/centos
+    - library/nginx
+# - name: gcr
+#   address: https://gcr.io
+#   username: _json_key
+#   password: '<INSERT YOUR SERVICE ACCOUNT JSON HERE>'
+#   email: 1234@5678.com
+
+# If you don't want to put your passwords into a values file
+# you can use a pre-created secret instead of putting passwords
+# (specify secret name in below `dockerRegistryAccountSecret`)
+# per account above with data in the format:
+# <name>: <password>
+
+# dockerRegistryAccountSecret: myregistry-secrets
+
+kubeConfig:
+  # Use this when you want to register arbitrary clusters with Spinnaker
+  # Upload your ~/kube/.config to a secret
+  enabled: false
+  secretName: my-kubeconfig
+  secretKey: config
+  # Use this when you want to configure halyard to reference a kubeconfig from s3
+  # This allows you to keep your kubeconfig in an encrypted s3 bucket
+  # For more info visit:
+  #   https://www.spinnaker.io/reference/halyard/secrets/s3-secrets/#secrets-in-s3
+  # encryptedKubeconfig: encrypted:s3!r:us-west-2!b:mybucket!f:mykubeconfig
+  # List of contexts from the kubeconfig to make available to Spinnaker
+  contexts:
+  - default
+  deploymentContext: default
+  omittedNameSpaces:
+  - kube-system
+  - kube-public
+  onlySpinnakerManaged:
+    enabled: false
+
+  # When false, clouddriver will skip the permission checks for all kubernetes kinds at startup.
+  # This can save a great deal of time during clouddriver startup when you have many kubernetes
+  # accounts configured. This disables the log messages at startup about missing permissions.
+  checkPermissionsOnStartup: true
+
+  # A list of resource kinds this Spinnaker account can deploy to and will cache.
+  # When no kinds are configured, this defaults to â€˜all kinds'.
+  # kinds:
+  # -
+
+  # A list of resource kinds this Spinnaker account cannot deploy to or cache.
+  # This can only be set when â€“kinds is empty or not set.
+  # omittedKinds:
+  # -
+
+# Change this if youd like to expose Spinnaker outside the cluster
+ingress:
+  enabled: false
+  # host: spinnaker.example.org
+  # annotations:
+    # ingress.kubernetes.io/ssl-redirect: 'true'
+    # kubernetes.io/ingress.class: nginx
+    # kubernetes.io/tls-acme: "true"
+  # tls:
+  #  - secretName: -tls
+  #    hosts:
+  #      - domain.com
+
+ingressGate:
+  enabled: false
+  # host: gate.spinnaker.example.org
+  # annotations:
+    # ingress.kubernetes.io/ssl-redirect: 'true'
+    # kubernetes.io/ingress.class: nginx
+    # kubernetes.io/tls-acme: "true"
+  # tls:
+  #  - secretName: -tls
+  #    hosts:
+  #      - domain.com
+
+# spinnakerFeatureFlags is a list of Spinnaker feature flags to enable
+# Ref: https://www.spinnaker.io/reference/halyard/commands/#hal-config-features-edit
+# spinnakerFeatureFlags:
+#   - artifacts
+#   - pipeline-templates
+spinnakerFeatureFlags:
+  - artifacts
+  - jobs
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+# nodeSelector to provide to each of the Spinnaker components
+nodeSelector: {}
+
+# Redis password to use for the in-cluster redis service
+# Enable redis to use in-cluster redis
+redis:
+  enabled: true
+  # External Redis option will be enabled if in-cluster redis is disabled
+  external:
+    host: "<EXTERNAL-REDIS-HOST-NAME>"
+    port: 6379
+    # password: ""
+  password: password
+  nodeSelector: {}
+  cluster:
+    enabled: false
+# Uncomment if you don't want to create a PVC for redis
+  master:
+    persistence:
+      enabled: false
+
+# Minio access/secret keys for the in-cluster S3 usage
+# Minio is not exposed publically
+minio:
+  enabled: true
+  imageTag: RELEASE.2019-02-13T19-48-27Z
+  serviceType: ClusterIP
+  accessKey: spinnakeradmin
+  secretKey: spinnakeradmin
+  bucket: "spinnaker"
+  nodeSelector: {}
+# Uncomment if you don't want to create a PVC for minio
+  persistence:
+    enabled: false
+
+# Google Cloud Storage
+gcs:
+  enabled: false
+  project: my-project-name
+  bucket: "<GCS-BUCKET-NAME>"
+  ## if jsonKey is set, will create a secret containing it
+  jsonKey: '<INSERT CLOUD STORAGE JSON HERE>'
+  ## override the name of the secret to use for jsonKey, if `jsonKey`
+  ## is empty, it will not create a secret assuming you are creating one
+  ## external to the chart. the key for that secret should be `key.json`.
+  secretName:
+
+# AWS Simple Storage Service
+s3:
+  enabled: false
+  bucket: "<S3-BUCKET-NAME>"
+  # rootFolder: "front50"
+  # region: "us-east-1"
+  # endpoint: ""
+  # accessKey: ""
+  # secretKey: ""
+  # assumeRole: "<role to assume>"
+
+# Azure Storage Account
+azs:
+  enabled: false
+#   storageAccountName: ""
+#   accessKey: ""
+#   containerName: "spinnaker"
+
+rbac:
+  # Specifies whether RBAC resources should be created
+  create: true
+
+serviceAccount:
+  # Specifies whether a ServiceAccount should be created
+  create: true
+  # The name of the ServiceAccounts to use.
+  # If left blank it is auto-generated from the fullname of the release
+  halyardName:
+  spinnakerName:
+securityContext:
+  # Specifies permissions to write for user/group
+  runAsUser: 1000
+  fsGroup: 1000
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/vars/main.yml b/apps/spinnaker/kubespray/playbooks/roles/install/vars/main.yml
new file mode 100644 (file)
index 0000000..4995e3d
--- /dev/null
@@ -0,0 +1,24 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+helm_charts_git_url: https://github.com/helm/charts.git
+
+spinnaker_service: "spinnaker"
+spinnaker_namespace: "spinnaker"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/vars/offline-deployment.yaml b/apps/spinnaker/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
new file mode 100644 (file)
index 0000000..2d8de91
--- /dev/null
@@ -0,0 +1,25 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+gcrio_image_repository: "{{ server_fqdn }}"
+helm_charts_git_url: "{{ engine_workspace }}/offline/git/charts"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "http://{{ server_fqdn }}/charts/stable"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/apps/spinnaker/kubespray/playbooks/roles/install/vars/online-deployment.yaml b/apps/spinnaker/kubespray/playbooks/roles/install/vars/online-deployment.yaml
new file mode 100644 (file)
index 0000000..51511e6
--- /dev/null
@@ -0,0 +1,25 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+gcrio_image_repository: "gcr.io"
+helm_charts_git_url: "https://github.com/helm/charts.git"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "https://kubernetes-charts.storage.googleapis.com"
+
+# vim: set ts=2 sw=2 expandtab:
index 9ae4cc3ba98ddc74c5b6f853684f208ad1c47de1..d5f1b8d6b196b0a28212f6296b09d4b92c59a575 100755 (executable)
@@ -21,6 +21,12 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+#-------------------------------------------------------------------------------
+# Find and set where we are
+#-------------------------------------------------------------------------------
+STACK_ROOT_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]}")")"
+export STACK_ROOT_DIR
+
 #-------------------------------------------------------------------------------
 # Bootstrap stack software configuration
 #-------------------------------------------------------------------------------
@@ -29,7 +35,7 @@ echo "-------------------------------------------------------------------------"
 cd "${ENGINE_PATH}"
 ansible-playbook "${ENGINE_ANSIBLE_PARAMS[@]}" \
     -i "${ENGINE_PATH}/engine/inventory/localhost.ini" \
-    engine/stack/kubernetes/playbooks/bootstrap-swconfig.yaml
+    "${STACK_ROOT_DIR}/playbooks/bootstrap-swconfig.yaml"
 echo "-------------------------------------------------------------------------"
 
 #-------------------------------------------------------------------------------
@@ -52,7 +58,7 @@ source "${ENGINE_PATH}/engine/library/engine-services.sh"
 cd "${ENGINE_PATH}"
 ansible-playbook "${ENGINE_ANSIBLE_PARAMS[@]}" \
     -i "${ENGINE_PATH}/engine/inventory/inventory.ini" \
-    engine/stack/kubernetes/playbooks/prepare-artifacts.yaml
+    "${STACK_ROOT_DIR}/playbooks/prepare-artifacts.yaml"
 echo "-------------------------------------------------------------------------"
 
 #-------------------------------------------------------------------------------
index 197f1f2d8fac8be751d2b96e18b6865308df0e5e..87777a41bc9fd6a24e7ebcc9348bc4af694de65f 100755 (executable)
@@ -21,6 +21,12 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+#-------------------------------------------------------------------------------
+# Find and set where we are
+#-------------------------------------------------------------------------------
+STACK_ROOT_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]}")")"
+export STACK_ROOT_DIR
+
 #-------------------------------------------------------------------------------
 # Start packaging process
 #-------------------------------------------------------------------------------
@@ -29,7 +35,7 @@ echo "-------------------------------------------------------------------------"
 cd "${ENGINE_PATH}"
 ansible-playbook "${ENGINE_ANSIBLE_PARAMS[@]}" \
     -i "${ENGINE_PATH}/engine/inventory/localhost.ini" \
-    engine/stack/kubernetes/playbooks/package.yaml
+    "${STACK_ROOT_DIR}/playbooks/package.yaml"
 echo "-------------------------------------------------------------------------"
 echo
 echo "Info  : Packaging is done!"
diff --git a/playbooks/postinstall.yaml b/playbooks/postinstall.yaml
new file mode 100644 (file)
index 0000000..c9f23f4
--- /dev/null
@@ -0,0 +1,32 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# run common postinstall tasks
+# NOTE: The common post-deployment tasks is currently applicable only to
+# simple k8s and openstack scenarios.
+# in future, when statement could be moved to tasks in role if the current
+# tasks become relevant or new tasks are added.
+- hosts: all
+  gather_facts: true
+  become: true
+
+  roles:
+    - role: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/preinstall.yaml b/playbooks/preinstall.yaml
new file mode 100644 (file)
index 0000000..cf10f79
--- /dev/null
@@ -0,0 +1,22 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): left this playbook as placeholder
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/postinstall/tasks/configure-jumphost.yml b/playbooks/roles/postinstall/tasks/configure-jumphost.yml
new file mode 100644 (file)
index 0000000..3e0cfb5
--- /dev/null
@@ -0,0 +1,78 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- name: Install openshift
+  pip:
+    name: openshift
+
+- name: Ensure /root/.kube folder exists and empty
+  file:
+    path: "/root/.kube"
+    state: "{{ item }}"
+    owner: "root"
+    mode: 0755
+  with_items:
+    - absent
+    - directory
+
+- name: Copy kubernetes admin.conf to /root/.kube
+  copy:
+    src: "{{ engine_cache }}/repos/kubespray/inventory/engine/artifacts/admin.conf"
+    dest: "/root/.kube/config"
+    owner: "root"
+    mode: 0644
+
+- name: Download kubectl and place it to /usr/local/bin
+  get_url:
+    url: "{{ kubectl_download_url }}"
+    dest: /usr/local/bin/kubectl
+    owner: root
+    group: root
+    mode: 0755
+
+- name: Download helm client
+  unarchive:
+    src: "{{ helm_client_download_url }}"
+    remote_src: true
+    dest: /tmp
+
+- name: Place helm and tiller binaries to /usr/local/bin
+  copy:
+    src: "/tmp/linux-amd64/{{ item }}"
+    remote_src: true
+    dest: "/usr/local/bin/{{ item }}"
+    owner: root
+    group: root
+    mode: 0755
+  with_items:
+    - helm
+    - tiller
+
+- name: Delete temporary files and folders
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "/tmp/helm-{{ helm_version }}-linux-amd64.tar.gz"
+    - "/tmp/linux-amd64"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/postinstall/tasks/configure-localhost.yml b/playbooks/roles/postinstall/tasks/configure-localhost.yml
new file mode 100644 (file)
index 0000000..f6a23b2
--- /dev/null
@@ -0,0 +1,74 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- name: Ensure /home/{{ ansible_env.SUDO_USER }}/.kube folder exists and empty
+  file:
+    path: "/home/{{ ansible_env.SUDO_USER }}/.kube"
+    state: "{{ item }}"
+    owner: "{{ ansible_env.SUDO_USER }}"
+    mode: 0755
+  with_items:
+    - absent
+    - directory
+
+- name: Copy kubernetes admin.conf to /home/{{ ansible_env.SUDO_USER }}/.kube
+  copy:
+    src: "{{ engine_cache }}/repos/kubespray/inventory/engine/artifacts/admin.conf"
+    dest: "/home/{{ ansible_env.SUDO_USER }}/.kube/config"
+    owner: "{{ ansible_env.SUDO_USER }}"
+    mode: 0644
+
+- name: Download kubectl and place it to /usr/local/bin
+  get_url:
+    url: "{{ kubectl_download_url }}"
+    dest: /usr/local/bin/kubectl
+    owner: root
+    group: root
+    mode: 0755
+
+- name: Download helm client
+  unarchive:
+    src: "{{ helm_client_download_url }}"
+    remote_src: true
+    dest: /tmp
+
+- name: Place helm and tiller binaries to /usr/local/bin
+  copy:
+    src: "/tmp/linux-amd64/{{ item }}"
+    remote_src: true
+    dest: "/usr/local/bin/{{ item }}"
+    owner: root
+    group: root
+    mode: 0755
+  with_items:
+    - helm
+    - tiller
+
+- name: Delete temporary files and folders
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "/tmp/helm-{{ helm_version }}-linux-amd64.tar.gz"
+    - "/tmp/linux-amd64"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/postinstall/tasks/main.yml b/playbooks/roles/postinstall/tasks/main.yml
new file mode 100644 (file)
index 0000000..c2eefc1
--- /dev/null
@@ -0,0 +1,25 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE: Install OpenShift and configure kubectl & helm on localhost
+# we operate against Kubernetes cluster from localhost
+- include_tasks: configure-{{ jumphost }}.yml
+  when: jumphost in group_names
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/postinstall/vars/main.yaml b/playbooks/roles/postinstall/vars/main.yaml
new file mode 100644 (file)
index 0000000..9c1177b
--- /dev/null
@@ -0,0 +1,22 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+jumphost: "{{ 'jumphost' if provisioner_type == 'heat' else 'localhost' }}"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/postinstall/vars/offline-deployment.yaml b/playbooks/roles/postinstall/vars/offline-deployment.yaml
new file mode 100644 (file)
index 0000000..f2e6fc9
--- /dev/null
@@ -0,0 +1,23 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+kubectl_download_url: "http://{{ server_fqdn }}/binaries/kubectl"
+helm_client_download_url: "http://{{ server_fqdn }}/binaries/helm-{{ helm_version }}-linux-amd64.tar.gz"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/postinstall/vars/online-deployment.yaml b/playbooks/roles/postinstall/vars/online-deployment.yaml
new file mode 100644 (file)
index 0000000..f616c46
--- /dev/null
@@ -0,0 +1,23 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubectl_version }}/bin/linux/amd64/kubectl"
+helm_client_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-calico-istio.yaml b/scenarios/k8-calico-istio.yaml
new file mode 100644 (file)
index 0000000..ca09ba8
--- /dev/null
@@ -0,0 +1,54 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to calico
+    - name: Set network plugin to Calico
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Istio
+  import_playbook: "../apps/istio/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-calico-nofeature.yaml b/scenarios/k8-calico-nofeature.yaml
new file mode 100644 (file)
index 0000000..896888a
--- /dev/null
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to calico
+    - name: Set network plugin to Calico
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-calico-spinnaker.yaml b/scenarios/k8-calico-spinnaker.yaml
new file mode 100644 (file)
index 0000000..9d5cb2f
--- /dev/null
@@ -0,0 +1,54 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to calico
+    - name: Set network plugin to calico
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Spinnaker
+  import_playbook: "../apps/spinnaker/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-canal-nofeature.yaml b/scenarios/k8-canal-nofeature.yaml
new file mode 100644 (file)
index 0000000..afad438
--- /dev/null
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to canal
+    - name: Set network plugin to Canal
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: canal"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-cilium-nofeature.yaml b/scenarios/k8-cilium-nofeature.yaml
new file mode 100644 (file)
index 0000000..d82f251
--- /dev/null
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to cilium
+    - name: Set network plugin to Cilium
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: cilium"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-flannel-nofeature.yaml b/scenarios/k8-flannel-nofeature.yaml
new file mode 100644 (file)
index 0000000..1183517
--- /dev/null
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to flannel
+    - name: Set network plugin to Flannel
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: flannel"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-multus-nofeature.yaml b/scenarios/k8-multus-nofeature.yaml
new file mode 100644 (file)
index 0000000..1840e4a
--- /dev/null
@@ -0,0 +1,57 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set master plugin to calico for multus to use as the primary network plugin
+    - name: Configure Multus to use Calico as the primary network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+    # configure multus to use ca
+    - name: Enable Multus network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin_multus:.*"
+        line: "kube_network_plugin_multus: true"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-multus-plugins.yaml b/scenarios/k8-multus-plugins.yaml
new file mode 100644 (file)
index 0000000..599558b
--- /dev/null
@@ -0,0 +1,72 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set master plugin to calico for multus to use as the primary network plugin
+    - name: Configure Multus to use Calico as the primary network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+    # configure multus to use ca
+    - name: Enable Multus network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin_multus:.*"
+        line: "kube_network_plugin_multus: true"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- hosts: k8s-cluster
+  gather_facts: false
+  become: false
+  tags:
+    - postinstall
+
+  tasks:
+    - name: Include Kubespray vars
+      include_vars: "{{ item }}"
+      with_items:
+        - "{{ engine_cache }}/repos/kubespray/roles/kubespray-defaults/defaults/main.yaml"
+        - "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+
+    - include_tasks: "{{ engine_cache }}/repos/kubespray/roles/network_plugin/cni/tasks/main.yml"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/scenarios/k8-weave-nofeature.yaml b/scenarios/k8-weave-nofeature.yaml
new file mode 100644 (file)
index 0000000..56637ab
--- /dev/null
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to weave
+    - name: Set network plugin to Weave
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: weave"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../playbooks/postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/tox.ini b/tox.ini
index a9df5d0d91c579bc4e42b41deacabaebc325badf..70655ef7adcb558d01b310a5928fb3fdd60a419c 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -25,6 +25,9 @@ commands =
     -print0 | xargs -t -n1 -0 yamllint --format standard --strict"
 
 [testenv:shellcheck]
+# TODO (fdegir): shellcheck errors are ignored since those come from scenarios
+# and we need time to fix those
+ignore_outcome = true
 description = invoke shellcheck to analyse bash shell scripts
 deps = -r{toxinidir}/test-requirements.txt
 whitelist_externals = bash