Adapt kubespray to stack oriented framework

This change adapts installer kubespray to stack oriented framework
approach. The changes in summary are

- role package is moved to stack kubernetes
- apps and scenarios are moved from swconfig repo to this repo
- playbooks and roles pre-deployment and post-deployment are renamed to
  preinstall and postinstall respectively
- vars moved to kubernetes repository and only the installer version is
  controlled in this repo
- shellcheck outcome is ignored as the failures are due to moving
  scenarios and apps from swconfig repo and it will be enabled back once
  the issues are fixed

Change-Id: I36860547b6493c579b600c063ecb413151c370b7
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/ceph/kubespray/playbooks/install.yml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/ceph/kubespray/playbooks/install.yml
index bd7361f..171f1af 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/install.yml
@@ -17,14 +17,20 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
+- hosts: baremetal
+  gather_facts: true
+  become: true
 
-# service names
-docker_service_name: docker
+  roles:
+    - role: common
+    - role: prepare
+
+- hosts: jumphost
+  gather_facts: true
+  become: false
+
+  roles:
+    - role: common
+    - role: install
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/dib.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/common/vars/main.yml
similarity index 69%
copy from playbooks/roles/package/tasks/dib.yaml
copy to playbooks/apps/ceph/kubespray/playbooks/roles/common/vars/main.yml
index 887977b..933075a 100644
--- a/playbooks/roles/package/tasks/dib.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/common/vars/main.yml
@@ -17,19 +17,19 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store images used for provisioning
-  file:
-    path: "{{ dib_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
+rook_data_dir_path: "/var/lib/rook"
+rook_storage_dir_path: "/rook/storage-dir"
 
-- name: Download distro images used for provisioning nodes
-  get_url:
-    url: "{{ item }}"
-    dest: "{{ dib_folder }}"
-    force: true
-  loop: "{{ dib_images }}"
+rook_namespace: "rook-ceph"
 
-# vim: set ts=2 sw=2 expandtab:
+rook_use_host_network: "false"
+rook_node_device_filter: "vdb"
+
+rook_block_pool_name: "block-pool"
+rook_block_pool_replicas: 1
+
+rook_block_storage_name: "block-storage"
+rook_block_storage_fs: "xfs"
+
+rook_filesystem_name: "rookfs"
+rook_filesystem_storageclass_name: "csi-cephfs"
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml
new file mode 100644
index 0000000..b977018
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/tasks/main.yaml
@@ -0,0 +1,164 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- name: Delete existing rook cluster if any
+  k8s:
+    definition: "{{ lookup('template', config_file) }}"
+    state: absent
+  with_items:
+    - external-dashboard-https.yaml.j2
+    - pool.yaml.j2
+    - storageclass.yaml.j2
+    - toolbox.yaml.j2
+    - cluster.yaml.j2
+  loop_control:
+    loop_var: config_file
+  ignore_errors: true
+  tags: reset
+
+- name: Delete existing rook cluster CRD if any
+  k8s:
+    api_version: apiextensions.k8s.io/v1beta1
+    state: absent
+    kind: CustomResourceDefinition
+    name: cephclusters.ceph.rook.io
+  ignore_errors: true
+  tags: reset
+
+- name: Delete existing rook operator if any
+  k8s:
+    definition: "{{ lookup('template', config_file) }}"
+    state: absent
+  with_items:
+    - operator.yaml.j2
+    - common.yaml.j2
+  loop_control:
+    loop_var: config_file
+  ignore_errors: true
+  tags: reset
+
+- name: Wait until rook namespace is deleted
+  k8s_facts:
+    kind: Namespace
+    name: "{{ rook_namespace }}"
+  register: result
+  until: not result.resources
+  retries: 10
+  delay: 5
+  tags: reset
+
+- name: Create rook operator
+  k8s:
+    state: present
+    definition: "{{ lookup('template', config_file) }}"
+  with_items:
+    - common.yaml.j2
+    - operator.yaml.j2
+  loop_control:
+    loop_var: config_file
+
+- name: Wait until OPERATOR pod is available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ rook_namespace }}"
+    label_selectors:
+      - app = rook-ceph-operator
+    field_selectors:
+      - status.phase=Running
+  register: rook_mgr_status
+  until:
+    - rook_mgr_status.resources is defined
+    - rook_mgr_status.resources
+  retries: 20
+  delay: 5
+
+- name: Create rook cluster
+  k8s:
+    state: present
+    definition: "{{ lookup('template', config_file) }}"
+  with_items:
+    - cluster.yaml.j2
+    - toolbox.yaml.j2
+  loop_control:
+    loop_var: config_file
+
+- name: Wait until rook cluster deployment is complete
+  k8s_facts:
+    kind: CephCluster
+    name: rook-ceph
+    namespace: "{{ rook_namespace }}"
+    field_selectors:
+      - status.state = "Created"
+  register: rook_cluster_status
+  until:
+    - rook_cluster_status.resources
+  retries: 10
+  delay: 5
+
+- name: Wait until MGR pods are available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ rook_namespace }}"
+    label_selectors:
+      - app = rook-ceph-mgr
+    field_selectors:
+      - status.phase=Running
+  register: rook_mgr_status
+  until:
+    - rook_mgr_status.resources is defined
+    - rook_mgr_status.resources
+  retries: 30
+  delay: 10
+
+- name: Wait until OSD pods are available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ rook_namespace }}"
+    label_selectors:
+      - app = rook-ceph-osd
+    field_selectors:
+      - status.phase=Running
+  register: rook_osd_status
+  until:
+    - rook_osd_status.resources is defined
+    - rook_osd_status.resources
+  retries: 30
+  delay: 10
+
+- name: Create rook block storage
+  k8s:
+    state: present
+    definition: "{{ lookup('template', config_file) }}"
+  with_items:
+    - pool.yaml.j2
+    - storageclass.yaml.j2
+    - filesystem.yaml.j2
+    - filesystem-storageclass.yaml.j2
+  loop_control:
+    loop_var: config_file
+
+- name: Create rook external dashboard
+  k8s:
+    state: present
+    definition: "{{ lookup('template', 'external-dashboard-https.yaml.j2') }}"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2
new file mode 100644
index 0000000..60c6665
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/cluster.yaml.j2
@@ -0,0 +1,173 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: "{{ rook_namespace }}"
+spec:
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v12 is luminous, v13 is mimic, and v14 is nautilus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    image: "{{ ceph_repository }}:{{ ceph_version }}"
+    # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported.
+    # After nautilus is released, Rook will be updated to support nautilus.
+    # Do not set to true in production.
+    allowUnsupported: false
+  # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: "{{ rook_data_dir_path }}"
+  # Whether or not upgrade should continue even if a check fails
+  # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
+  # Use at your OWN risk
+  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
+  skipUpgradeChecks: false
+  # set the amount of mons to be started
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  mgr:
+    modules:
+    # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
+    # are already enabled by other settings in the cluster CR and the "rook" module is always enabled.
+    # - name: pg_autoscaler
+    #   enabled: true
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    # urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    # port: 8443
+    # serve the dashboard using SSL
+    ssl: true
+  monitoring:
+    # requires Prometheus to be pre-installed
+    enabled: false
+    # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
+    # Recommended:
+    # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
+    # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
+    # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+    rulesNamespace: {{ rook_namespace }}
+  network:
+    # toggle to use hostNetwork
+    hostNetwork: {{ rook_use_host_network }}
+  rbdMirroring:
+    # The number of daemons that will perform the rbd mirroring.
+    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
+    workers: 0
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+#  placement:
+#    all:
+#      nodeAffinity:
+#        requiredDuringSchedulingIgnoredDuringExecution:
+#          nodeSelectorTerms:
+#          - matchExpressions:
+#            - key: role
+#              operator: In
+#              values:
+#              - storage-node
+#      podAffinity:
+#      podAntiAffinity:
+#      tolerations:
+#      - key: storage-node
+#        operator: Exists
+# The above placement information can also be specified for mon, osd, and mgr components
+#    mon:
+# Monitor deployments may contain an anti-affinity rule for avoiding monitor
+# collocation on the same node. This is a required rule when host network is used
+# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+# preferred rule with weight: 50.
+#    osd:
+#    mgr:
+  annotations:
+#    all:
+#    mon:
+#    osd:
+# If no mgr annotations are set, prometheus scrape annotations will be set by default.
+#   mgr:
+  resources:
+# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
+#    mgr:
+#      limits:
+#        cpu: "500m"
+#        memory: "1024Mi"
+#      requests:
+#        cpu: "500m"
+#        memory: "1024Mi"
+# The above example requests/limits can also be added to the mon and osd components
+#    mon:
+#    osd:
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: false
+    location:
+    config:
+      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
+      # Set the storeType explicitly only if it is required not to use the default.
+      # storeType: bluestore
+      databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
+      journalSizeMB: "1024"  # this value can be removed for environments with normal sized disks (20 GB or larger)
+      osdsPerDevice: "1" # this value can be overridden at the node or device level
+# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
+    directories:
+    - path: "{{ rook_storage_dir_path }}"
+# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+#    nodes:
+#    - name: "172.17.4.101"
+#      directories: # specific directories to use for storage can be specified for each node
+#      - path: "/rook/storage-dir"
+#      resources:
+#        limits:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#        requests:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#    - name: "172.17.4.201"
+#      devices: # specific devices to use for storage can be specified for each node
+#      - name: "sdb"
+#      - name: "nvme01" # multiple osds can be created on high performance devices
+#        config:
+#          osdsPerDevice: "5"
+#      config: # configuration can be specified at the node level which overrides the cluster level config
+#        storeType: filestore
+#    - name: "172.17.4.301"
+#      deviceFilter: ^vdb
+  # The section for configuring management of daemon disruptions during upgrade or fencing.
+  disruptionManagement:
+    # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
+    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph-managed-disruptionbudgets.md). The operator will
+    # block eviction of OSDs by default and unblock them safely when drains are detected.
+    managePodBudgets: false
+    # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
+    # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
+    osdMaintenanceTimeout: 30
+    # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
+    # Only available on OpenShift.
+    manageMachineDisruptionBudgets: false
+    # Namespace in which to watch for the MachineDisruptionBudgets.
+    machineDisruptionBudgetNamespace: openshift-machine-api
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2
new file mode 100644
index 0000000..d8b7412
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/common.yaml.j2
@@ -0,0 +1,1592 @@
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+#
+# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager)
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: "{{ rook_namespace }}"
+# OLM: BEGIN CEPH CRD
+# The CRD declarations
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            annotations: {}
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+                  minimum: 0
+                  maximum: 65535
+                ssl:
+                  type: boolean
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            skipUpgradeChecks:
+              type: boolean
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+            mgr:
+              properties:
+                modules:
+                  items:
+                    properties:
+                      name:
+                        type: string
+                      enabled:
+                        type: boolean
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+            storage:
+              properties:
+                disruptionManagement:
+                  properties:
+                    managePodBudgets:
+                      type: boolean
+                    osdMaintenanceTimeout:
+                      type: integer
+                    manageMachineDisruptionBudgets:
+                      type: boolean
+                useAllNodes:
+                  type: boolean
+                nodes:
+                  items:
+                    properties:
+                      name:
+                        type: string
+                      config:
+                        properties:
+                          metadataDevice:
+                            type: string
+                          storeType:
+                            type: string
+                            pattern: ^(filestore|bluestore)$
+                          databaseSizeMB:
+                            type: string
+                          walSizeMB:
+                            type: string
+                          journalSizeMB:
+                            type: string
+                          osdsPerDevice:
+                            type: string
+                          encryptedDevice:
+                            type: string
+                            pattern: ^(true|false)$
+                      useAllDevices:
+                        type: boolean
+                      deviceFilter: {}
+                      directories:
+                        type: array
+                        items:
+                          properties:
+                            path:
+                              type: string
+                      devices:
+                        type: array
+                        items:
+                          properties:
+                            name:
+                              type: string
+                            config: {}
+                      location: {}
+                      resources: {}
+                  type: array
+                useAllDevices:
+                  type: boolean
+                deviceFilter: {}
+                location: {}
+                directories:
+                  type: array
+                  items:
+                    properties:
+                      path:
+                        type: string
+                config: {}
+                topologyAware:
+                  type: boolean
+            monitoring:
+              properties:
+                enabled:
+                  type: boolean
+                rulesNamespace:
+                  type: string
+            rbdMirroring:
+              properties:
+                workers:
+                  type: integer
+            placement: {}
+            resources: {}
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: State
+      type: string
+      description: Current State
+      JSONPath: .status.state
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+# OLM: END CEPH CRD
+# OLM: BEGIN CEPH FS CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            metadataServer:
+              properties:
+                activeCount:
+                  minimum: 1
+                  maximum: 10
+                  type: integer
+                activeStandby:
+                  type: boolean
+                annotations: {}
+                placement: {}
+                resources: {}
+            metadataPool:
+              properties:
+                failureDomain:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      minimum: 1
+                      maximum: 10
+                      type: integer
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+            dataPools:
+              type: array
+              items:
+                properties:
+                  failureDomain:
+                    type: string
+                  replicated:
+                    properties:
+                      size:
+                        minimum: 1
+                        maximum: 10
+                        type: integer
+                  erasureCoded:
+                    properties:
+                      dataChunks:
+                        type: integer
+                      codingChunks:
+                        type: integer
+  additionalPrinterColumns:
+    - name: ActiveMDS
+      type: string
+      description: Number of desired active MDS daemons
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+# OLM: END CEPH FS CRD
+# OLM: BEGIN CEPH NFS CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            rados:
+              properties:
+                pool:
+                  type: string
+                namespace:
+                  type: string
+            server:
+              properties:
+                active:
+                  type: integer
+                annotations: {}
+                placement: {}
+                resources: {}
+
+# OLM: END CEPH NFS CRD
+# OLM: BEGIN CEPH OBJECT STORE CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            gateway:
+              properties:
+                type:
+                  type: string
+                sslCertificateRef: {}
+                port:
+                  type: integer
+                securePort: {}
+                instances:
+                  type: integer
+                annotations: {}
+                placement: {}
+                resources: {}
+            metadataPool:
+              properties:
+                failureDomain:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      type: integer
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+            dataPool:
+              properties:
+                failureDomain:
+                  type: string
+                replicated:
+                  properties:
+                    size:
+                      type: integer
+                erasureCoded:
+                  properties:
+                    dataChunks:
+                      type: integer
+                    codingChunks:
+                      type: integer
+# OLM: END CEPH OBJECT STORE CRD
+# OLM: BEGIN CEPH OBJECT STORE USERS CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+  scope: Namespaced
+  version: v1
+# OLM: END CEPH OBJECT STORE USERS CRD
+# OLM: BEGIN CEPH BLOCK POOL CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+# OLM: END CEPH BLOCK POOL CRD
+# OLM: BEGIN CEPH VOLUME POOL CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+# OLM: END CEPH VOLUME POOL CRD
+# OLM: BEGIN OBJECTBUCKET CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: objectbuckets.objectbucket.io
+spec:
+  group: objectbucket.io
+  versions:
+    - name: v1alpha1
+      served: true
+      storage: true
+  names:
+    kind: ObjectBucket
+    listKind: ObjectBucketList
+    plural: objectbuckets
+    singular: objectbucket
+    shortNames:
+      - ob
+      - obs
+  scope: Cluster
+  subresources:
+    status: {}
+# OLM: END OBJECTBUCKET CRD
+# OLM: BEGIN OBJECTBUCKETCLAIM CRD
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: objectbucketclaims.objectbucket.io
+spec:
+  versions:
+    - name: v1alpha1
+      served: true
+      storage: true
+  group: objectbucket.io
+  names:
+    kind: ObjectBucketClaim
+    listKind: ObjectBucketClaimList
+    plural: objectbucketclaims
+    singular: objectbucketclaim
+    shortNames:
+      - obc
+      - obcs
+  scope: Namespaced
+  subresources:
+    status: {}
+# OLM: END OBJECTBUCKETCLAIM CRD
+# OLM: BEGIN OBJECTBUCKET ROLEBINDING
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-object-bucket
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-object-bucket
+subjects:
+  - kind: ServiceAccount
+    name: rook-ceph-system
+    namespace: "{{ rook_namespace }}"
+# OLM: END OBJECTBUCKET ROLEBINDING
+# OLM: BEGIN OPERATOR ROLE
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  - pods
+  - pods/log
+  - services
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The role for the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - daemonsets
+  - statefulsets
+  - deployments
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # Pod access is needed for fencing
+  - pods
+  # Node access is needed for determining nodes where mons should run
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+    # PVs and PVCs are managed by the Rook provisioner
+  - persistentvolumes
+  - persistentvolumeclaims
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - policy
+  - apps
+  resources:
+  #this is for the clusterdisruption controller
+  - poddisruptionbudgets
+  #this is for both clusterdisruption and nodedrain controllers
+  - deployments
+  verbs:
+  - "*"
+- apiGroups:
+  - healthchecking.openshift.io
+  resources:
+  - machinedisruptionbudgets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - machine.openshift.io
+  resources:
+  - machines
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+  - list
+  - get
+  - watch
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-object-bucket
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  verbs:
+  - "*"
+  resources:
+  - secrets
+  - configmaps
+- apiGroups:
+    - storage.k8s.io
+  resources:
+    - storageclasses
+  verbs:
+    - get
+    - list
+    - watch
+- apiGroups:
+  - "objectbucket.io"
+  verbs:
+  - "*"
+  resources:
+  - "*"
+# OLM: END OPERATOR ROLE
+# OLM: BEGIN SERVICE ACCOUNT SYSTEM
+---
+# The rook system service account used by the operator, agent, and discovery pods
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# OLM: END SERVICE ACCOUNT SYSTEM
+# OLM: BEGIN OPERATOR ROLEBINDING
+---
+# Grant the operator, agent, and discovery agents access to resources in the namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-global
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+# OLM: END OPERATOR ROLEBINDING
+#################################################################################################################
+# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
+# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
+# and bindings accordingly.
+#################################################################################################################
+# Service account for the Ceph OSDs. Must exist and cannot be renamed.
+# OLM: BEGIN SERVICE ACCOUNT OSD
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# OLM: END SERVICE ACCOUNT OSD
+# OLM: BEGIN SERVICE ACCOUNT MGR
+---
+# Service account for the Ceph Mgr. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# OLM: END SERVICE ACCOUNT MGR
+# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+# OLM: END CMD REPORTER SERVICE ACCOUNT
+# OLM: BEGIN CLUSTER ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+- apiGroups: ["ceph.rook.io"]
+  resources: ["cephclusters", "cephclusters/finalizers"]
+  verbs: [ "get", "list", "create", "update", "delete" ]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: "{{ rook_namespace }}"
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system-rules
+  namespace: "{{ rook_namespace }}"
+  labels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+# OLM: END CLUSTER ROLE
+# OLM: BEGIN CMD REPORTER ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+# OLM: END CMD REPORTER ROLE
+# OLM: BEGIN CLUSTER ROLEBINDING
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cluster-mgmt
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+
+---
+# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+
+# OLM: END CLUSTER ROLEBINDING
+# OLM: BEGIN CMD REPORTER ROLEBINDING
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-cmd-reporter
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+# OLM: END CMD REPORTER ROLEBINDING
+#################################################################################################################
+# Beginning of pod security policy resources. The example will assume the cluster will be created in the
+# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify
+# the roles and bindings accordingly.
+#################################################################################################################
+# OLM: BEGIN CLUSTER POD SECURITY POLICY
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: rook-privileged
+spec:
+  privileged: true
+  allowedCapabilities:
+    # required by CSI
+    - SYS_ADMIN
+  # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group
+  fsGroup:
+    rule: RunAsAny
+  # runAsUser, supplementalGroups - Rook needs to run some pods as root
+  # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time
+  runAsUser:
+    rule: RunAsAny
+  supplementalGroups:
+    rule: RunAsAny
+  # seLinux - seLinux context is unknown ahead of time; set if this is well-known
+  seLinux:
+    rule: RunAsAny
+  volumes:
+    # recommended minimum set
+    - configMap
+    - downwardAPI
+    - emptyDir
+    - persistentVolumeClaim
+    - secret
+    - projected
+    # required for Rook
+    - hostPath
+    - flexVolume
+  # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
+  # directory-based OSDs make this hard to nail down
+  # allowedHostPaths:
+  #   - pathPrefix: "/run/udev"  # for OSD prep
+  #     readOnly: false
+  #   - pathPrefix: "/dev"  # for OSD prep
+  #     readOnly: false
+  #   - pathPrefix: "/var/lib/rook"  # or whatever the dataDirHostPath value is set to
+  #     readOnly: false
+  # Ceph requires host IPC for setting up encrypted devices
+  hostIPC: true
+  # Ceph OSDs need to share the same PID namespace
+  hostPID: true
+  # hostNetwork can be set to 'false' if host networking isn't used
+  hostNetwork: true
+  hostPorts:
+    # Ceph messenger protocol v1
+    - min: 6789
+      max: 6790 # <- support old default port
+    # Ceph messenger protocol v2
+    - min: 3300
+      max: 3300
+    # Ceph RADOS ports for OSDs, MDSes
+    - min: 6800
+      max: 7300
+    # # Ceph dashboard port HTTP (not recommended)
+    # - min: 7000
+    #   max: 7000
+    # Ceph dashboard port HTTPS
+    - min: 8443
+      max: 8443
+    # Ceph mgr Prometheus Metrics
+    - min: 9283
+      max: 9283
+# OLM: END CLUSTER POD SECURITY POLICY
+# OLM: BEGIN POD SECURITY POLICY BINDINGS
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: 'psp:rook'
+rules:
+  - apiGroups:
+      - policy
+    resources:
+      - podsecuritypolicies
+    resourceNames:
+      - rook-privileged
+    verbs:
+      - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-ceph-system-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-ceph-system
+    namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-default-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-osd-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-mgr-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: rook-ceph-cmd-reporter-psp
+  namespace: "{{ rook_namespace }}"
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:rook
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-cmd-reporter
+  namespace: "{{ rook_namespace }}"
+# OLM: END CLUSTER POD SECURITY POLICY BINDINGS
+# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-plugin-sa
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-provisioner-sa
+  namespace: "{{ rook_namespace }}"
+# OLM: END CSI CEPHFS SERVICE ACCOUNT
+# OLM: BEGIN CSI CEPHFS ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  namespace: "{{ rook_namespace }}"
+  name: cephfs-external-provisioner-cfg
+rules:
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+  - apiGroups: ["coordination.k8s.io"]
+    resources: ["leases"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+# OLM: END CSI CEPHFS ROLE
+# OLM: BEGIN CSI CEPHFS ROLEBINDING
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role-cfg
+  namespace: "{{ rook_namespace }}"
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: Role
+  name: cephfs-external-provisioner-cfg
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI CEPHFS ROLEBINDING
+# OLM: BEGIN CSI CEPHFS CLUSTER ROLE
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+# OLM: END CSI CEPHFS CLUSTER ROLE
+# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-cephfs-plugin-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-cephfs-provisioner-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: cephfs-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: cephfs-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI CEPHFS CLUSTER ROLEBINDING
+# OLM: BEGIN CSI RBD SERVICE ACCOUNT
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-plugin-sa
+  namespace: "{{ rook_namespace }}"
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-provisioner-sa
+  namespace: "{{ rook_namespace }}"
+# OLM: END CSI RBD SERVICE ACCOUNT
+# OLM: BEGIN CSI RBD ROLE
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  namespace: "{{ rook_namespace }}"
+  name: rbd-external-provisioner-cfg
+rules:
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "watch", "create", "delete"]
+  - apiGroups: ["coordination.k8s.io"]
+    resources: ["leases"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+# OLM: END CSI RBD ROLE
+# OLM: BEGIN CSI RBD ROLEBINDING
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role-cfg
+  namespace: "{{ rook_namespace }}"
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: Role
+  name: rbd-external-provisioner-cfg
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI RBD ROLEBINDING
+# OLM: BEGIN CSI RBD CLUSTER ROLE
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create", "list", "watch", "delete", "get", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots/status"]
+    verbs: ["update"]
+# OLM: END CSI RBD CLUSTER ROLE
+# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-rbd-plugin-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: "{{ rook_namespace }}"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: rook-csi-rbd-provisioner-sa-psp
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: 'psp:rook'
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: rbd-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: "{{ rook_namespace }}"
+roleRef:
+  kind: ClusterRole
+  name: rbd-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
+# OLM: END CSI RBD CLUSTER ROLEBINDING
diff --git a/playbooks/roles/package/tasks/dib.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/external-dashboard-https.yaml.j2
similarity index 69%
copy from playbooks/roles/package/tasks/dib.yaml
copy to playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/external-dashboard-https.yaml.j2
index 887977b..a15a040 100644
--- a/playbooks/roles/package/tasks/dib.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/external-dashboard-https.yaml.j2
@@ -1,4 +1,3 @@
----
 # ============LICENSE_START=======================================================
 #  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
 # ================================================================================
@@ -17,19 +16,22 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store images used for provisioning
-  file:
-    path: "{{ dib_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Download distro images used for provisioning nodes
-  get_url:
-    url: "{{ item }}"
-    dest: "{{ dib_folder }}"
-    force: true
-  loop: "{{ dib_images }}"
-
-# vim: set ts=2 sw=2 expandtab:
+apiVersion: v1
+kind: Service
+metadata:
+  name: rook-ceph-mgr-dashboard-external-https
+  namespace: "{{ rook_namespace }}"
+  labels:
+    app: rook-ceph-mgr
+    rook_cluster: rook-ceph
+spec:
+  ports:
+  - name: dashboard
+    port: 8443
+    protocol: TCP
+    targetPort: 8443
+  selector:
+    app: rook-ceph-mgr
+    rook_cluster: rook-ceph
+  sessionAffinity: None
+  type: NodePort
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2
new file mode 100644
index 0000000..b2575f5
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem-storageclass.yaml.j2
@@ -0,0 +1,31 @@
+# taken from example at https://rook.github.io/docs/rook/v1.2/ceph-filesystem.html
+
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: {{ rook_filesystem_storageclass_name }}
+# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
+provisioner: {{ rook_namespace }}.cephfs.csi.ceph.com
+parameters:
+  # clusterID is the namespace where operator is deployed.
+  clusterID: {{ rook_namespace }}
+
+  # CephFS filesystem name into which the volume shall be created
+  fsName: {{ rook_filesystem_name }}
+
+  # Ceph pool into which the volume shall be created
+  # Required for provisionVolume: "true"
+  pool: {{ rook_filesystem_name }}-data0
+
+  # Root path of an existing CephFS volume
+  # Required for provisionVolume: "false"
+  # rootPath: /absolute/path
+
+  # The secrets contain Ceph admin credentials. These are generated automatically by the operator
+  # in the same namespace as the cluster.
+  csi.storage.k8s.io/provisioner-secret-name: rook-ceph-csi
+  csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_namespace }}
+  csi.storage.k8s.io/node-stage-secret-name: rook-ceph-csi
+  csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_namespace }}
+
+reclaimPolicy: Delete
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2
new file mode 100644
index 0000000..5a4345f
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/filesystem.yaml.j2
@@ -0,0 +1,18 @@
+# taken from example at https://rook.github.io/docs/rook/v1.2/ceph-filesystem.html
+
+apiVersion: ceph.rook.io/v1
+kind: CephFilesystem
+metadata:
+  name: {{ rook_filesystem_name }}
+  namespace: {{ rook_namespace }}
+spec:
+  metadataPool:
+    replicated:
+      size: 3
+  dataPools:
+    - replicated:
+        size: 3
+  preservePoolsOnDelete: true
+  metadataServer:
+    activeCount: 1
+    activeStandby: true
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2
new file mode 100644
index 0000000..24a5db0
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/operator.yaml.j2
@@ -0,0 +1,261 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+#################################################################################################################
+# The deployment for the rook operator
+# Contains the common settings for most Kubernetes deployments.
+# For example, to create the rook-ceph cluster:
+#   kubectl create -f common.yaml
+#   kubectl create -f operator.yaml
+#   kubectl create -f cluster.yaml
+#
+# Also see other operator sample files for variations of operator.yaml:
+# - operator-openshift.yaml: Common settings for running in OpenShift
+#################################################################################################################
+# OLM: BEGIN OPERATOR DEPLOYMENT
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-operator
+  namespace: "{{ rook_namespace }}"
+  labels:
+    operator: rook
+    storage-backend: ceph
+spec:
+  selector:
+    matchLabels:
+      app: rook-ceph-operator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-operator
+    spec:
+      serviceAccountName: rook-ceph-system
+      containers:
+      - name: rook-ceph-operator
+        image: "{{ rook_repository }}:{{ rook_version }}"
+        args: ["ceph", "operator"]
+        volumeMounts:
+        - mountPath: /var/lib/rook
+          name: rook-config
+        - mountPath: /etc/ceph
+          name: default-config-dir
+        env:
+        # If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
+        # If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
+        - name: ROOK_CURRENT_NAMESPACE_ONLY
+          value: "false"
+        # To disable RBAC, uncomment the following:
+        # - name: RBAC_ENABLED
+        #   value: "false"
+        # Rook Agent toleration. Will tolerate all taints with all keys.
+        # Choose between NoSchedule, PreferNoSchedule and NoExecute:
+        # - name: AGENT_TOLERATION
+        #   value: "NoSchedule"
+        # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate
+        # - name: AGENT_TOLERATION_KEY
+        #   value: "<KeyOfTheTaintToTolerate>"
+        # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format.
+        # - name: AGENT_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # (Optional) Rook Agent NodeAffinity.
+        # - name: AGENT_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook,ceph"
+        # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
+        # `Any` uses Ceph admin credentials by default/fallback.
+        # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
+        # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name.
+        # to the namespace in which the `mountSecret` Kubernetes secret namespace.
+        # - name: AGENT_MOUNT_SECURITY_MODE
+        #   value: "Any"
+        # Set the path where the Rook agent can find the flex volumes
+        # - name: FLEXVOLUME_DIR_PATH
+        #   value: "<PathToFlexVolumes>"
+        # Set the path where kernel modules can be found
+        # - name: LIB_MODULES_DIR_PATH
+        #   value: "<PathToLibModules>"
+        # Mount any extra directories into the agent container
+        # - name: AGENT_MOUNTS
+        #   value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2"
+        # Rook Discover toleration. Will tolerate all taints with all keys.
+        # Choose between NoSchedule, PreferNoSchedule and NoExecute:
+        # - name: DISCOVER_TOLERATION
+        #   value: "NoSchedule"
+        # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
+        # - name: DISCOVER_TOLERATION_KEY
+        #   value: "<KeyOfTheTaintToTolerate>"
+        # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
+        # - name: DISCOVER_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # (Optional) Discover Agent NodeAffinity.
+        # - name: DISCOVER_AGENT_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook, ceph"
+        # Allow rook to create multiple file systems. Note: This is considered
+        # an experimental feature in Ceph as described at
+        # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
+        # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027
+        - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS
+          value: "false"
+
+        # The logging level for the operator: INFO | DEBUG
+        - name: ROOK_LOG_LEVEL
+          value: "INFO"
+
+        # The interval to check the health of the ceph cluster and update the status in the custom resource.
+        - name: ROOK_CEPH_STATUS_CHECK_INTERVAL
+          value: "60s"
+
+        # The interval to check if every mon is in the quorum.
+        - name: ROOK_MON_HEALTHCHECK_INTERVAL
+          value: "45s"
+
+        # The duration to wait before trying to failover or remove/replace the
+        # current mon with a new mon (useful for compensating flapping network).
+        - name: ROOK_MON_OUT_TIMEOUT
+          value: "600s"
+
+        # The duration between discovering devices in the rook-discover daemonset.
+        - name: ROOK_DISCOVER_DEVICES_INTERVAL
+          value: "60m"
+
+        # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
+        # This is necessary to workaround the anyuid issues when running on OpenShift.
+        # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
+        - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
+          value: "false"
+
+        # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
+        # Disable it here if you have similar issues.
+        # For more details see https://github.com/rook/rook/issues/2417
+        - name: ROOK_ENABLE_SELINUX_RELABELING
+          value: "true"
+
+        # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues.
+        # For more details see https://github.com/rook/rook/issues/2254
+        - name: ROOK_ENABLE_FSGROUP
+          value: "true"
+
+        # Disable automatic orchestration when new devices are discovered
+        - name: ROOK_DISABLE_DEVICE_HOTPLUG
+          value: "false"
+
+        # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release
+        # in favor of the CSI driver.
+        - name: ROOK_ENABLE_FLEX_DRIVER
+          value: "true"
+
+        # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
+        # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
+        - name: ROOK_ENABLE_DISCOVERY_DAEMON
+          value: "false"
+
+        # Enable the default version of the CSI CephFS driver. To start another version of the CSI driver, see image properties below.
+        - name: ROOK_CSI_ENABLE_CEPHFS
+          value: "true"
+
+        # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
+        - name: ROOK_CSI_ENABLE_RBD
+          value: "true"
+        - name: ROOK_CSI_ENABLE_GRPC_METRICS
+          value: "true"
+        # The default version of CSI supported by Rook will be started. To change the version
+        # of the CSI driver to something other than what is officially supported, change
+        # these images to the desired release of the CSI driver.
+        #- name: ROOK_CSI_CEPH_IMAGE
+        #  value: "quay.io/cephcsi/cephcsi:v1.2.1"
+        #- name: ROOK_CSI_REGISTRAR_IMAGE
+        #  value: "quay.io/k8scsi/csi-node-driver-registrar:v1.1.0"
+        #- name: ROOK_CSI_PROVISIONER_IMAGE
+        #  value: "quay.io/k8scsi/csi-provisioner:v1.3.0"
+        #- name: ROOK_CSI_SNAPSHOTTER_IMAGE
+        #  value: "quay.io/k8scsi/csi-snapshotter:v1.2.0"
+        #- name: ROOK_CSI_ATTACHER_IMAGE
+        #  value: "quay.io/k8scsi/csi-attacher:v1.2.0"
+        # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
+        #- name: ROOK_CSI_KUBELET_DIR_PATH
+        #  value: "/var/lib/kubelet"
+        # (Optional) Ceph Provisioner NodeAffinity.
+        # - name: CSI_PROVISIONER_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook, ceph"
+        # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format.
+        #  CSI provisioner would be best to start on the same nodes as other ceph daemons.
+        # - name: CSI_PROVISIONER_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # (Optional) Ceph CSI plugin NodeAffinity.
+        # - name: CSI_PLUGIN_NODE_AFFINITY
+        #   value: "role=storage-node; storage=rook, ceph"
+        # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format.
+        # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+        # - name: CSI_PLUGIN_TOLERATIONS
+        #   value: |
+        #     - effect: NoSchedule
+        #       key: node-role.kubernetes.io/controlplane
+        #       operator: Exists
+        #     - effect: NoExecute
+        #       key: node-role.kubernetes.io/etcd
+        #       operator: Exists
+        # The name of the node to pass with the downward API
+        - name: ROOK_CSI_CEPH_IMAGE
+          value: "{{ cephcsi_repository }}:{{ cephcsi_version }}"
+        - name: ROOK_CSI_REGISTRAR_IMAGE
+          value: "{{ csi_node_driver_registrar_repository }}:{{ csi_node_driver_registrar_version }}"
+        - name: ROOK_CSI_PROVISIONER_IMAGE
+          value: "{{ csi_provisioner_repository }}:{{ csi_provisioner_version }}"
+        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
+          value: "{{ csi_snapshotter_repository }}:{{ csi_snapshotter_version }}"
+        - name: ROOK_CSI_ATTACHER_IMAGE
+          value: "{{ csi_attacherr_repository }}:{{ csi_attacher_version }}"
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        # The pod name to pass with the downward API
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        # The pod namespace to pass with the downward API
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+      volumes:
+      - name: rook-config
+        emptyDir: {}
+      - name: default-config-dir
+        emptyDir: {}
+# OLM: END OPERATOR DEPLOYMENT
diff --git a/playbooks/roles/package/tasks/pip.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/pool.yaml.j2
similarity index 64%
rename from playbooks/roles/package/tasks/pip.yaml
rename to playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/pool.yaml.j2
index a4a4642..0db4c51 100644
--- a/playbooks/roles/package/tasks/pip.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/pool.yaml.j2
@@ -1,4 +1,3 @@
----
 # ============LICENSE_START=======================================================
 #  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
 # ================================================================================
@@ -17,24 +16,17 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store pip packages
-  file:
-    path: "{{ pip_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Download pip packages using requirements.txt file
-  command: "pip download -r {{ engine_path }}/requirements.txt --no-cache"
-  changed_when: false
-  args:
-    chdir: "{{ pip_folder }}"
-
-- name: Copy pip.conf
-  template:
-    src: pip.conf.j2
-    dest: "{{ pip_folder }}/pip.conf"
-    force: true
-
-# vim: set ts=2 sw=2 expandtab:
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+  name: "{{ rook_block_pool_name }}"
+  namespace: "{{ rook_namespace }}"
+spec:
+  # The failure domain will spread the replicas of the data across different failure zones
+  failureDomain: osd
+  # For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy.
+  replicated:
+    size: {{ rook_block_pool_replicas }}
+  # A key/value list of annotations
+  annotations:
+  #  key: value
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2
new file mode 100644
index 0000000..21ada26
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/storageclass.yaml.j2
@@ -0,0 +1,40 @@
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: "{{ rook_block_storage_name }}"
+  annotations:
+    storageclass.kubernetes.io/is-default-class: "true"
+provisioner: ceph.rook.io/block
+# Works for Kubernetes 1.14+
+allowVolumeExpansion: true
+parameters:
+  blockPool: "{{ rook_block_pool_name }}"
+  # Specify the namespace of the rook cluster from which to create volumes.
+  # If not specified, it will use `rook` as the default namespace of the cluster.
+  # This is also the namespace where the cluster will be
+  clusterNamespace: "{{ rook_namespace }}"
+  # Specify the filesystem type of the volume. If not specified, it will use `ext4`.
+  fstype: "{{ rook_block_storage_fs }}"
+  # (Optional) Specify an existing Ceph user that will be used for mounting storage with this StorageClass.
+  #mountUser: user1
+  # (Optional) Specify an existing Kubernetes secret name containing just one key holding the Ceph user secret.
+  # The secret must exist in each namespace(s) where the storage will be consumed.
+  #mountSecret: ceph-user1-secret
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2 b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2
new file mode 100644
index 0000000..0dd3c0f
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/templates/toolbox.yaml.j2
@@ -0,0 +1,59 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-tools
+  namespace: "{{ rook_namespace }}"
+  labels:
+    app: rook-ceph-tools
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: rook-ceph-tools
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-tools
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+      - name: rook-ceph-tools
+        image: "{{ rook_repository }}:{{ rook_version }}"
+        command: ["/tini"]
+        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+        imagePullPolicy: IfNotPresent
+        env:
+          - name: ROOK_ADMIN_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: admin-secret
+        securityContext:
+          privileged: true
+        volumeMounts:
+          - mountPath: /dev
+            name: dev
+          - mountPath: /sys/bus
+            name: sysbus
+          - mountPath: /lib/modules
+            name: libmodules
+          - name: mon-endpoint-volume
+            mountPath: /etc/rook
+      # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+      hostNetwork: true
+      volumes:
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: sysbus
+          hostPath:
+            path: /sys/bus
+        - name: libmodules
+          hostPath:
+            path: /lib/modules
+        - name: mon-endpoint-volume
+          configMap:
+            name: rook-ceph-mon-endpoints
+            items:
+            - key: data
+              path: mon-endpoints
diff --git a/playbooks/roles/package/tasks/dib.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
similarity index 67%
copy from playbooks/roles/package/tasks/dib.yaml
copy to playbooks/apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
index 887977b..9a4c206 100644
--- a/playbooks/roles/package/tasks/dib.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
@@ -17,19 +17,12 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store images used for provisioning
-  file:
-    path: "{{ dib_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Download distro images used for provisioning nodes
-  get_url:
-    url: "{{ item }}"
-    dest: "{{ dib_folder }}"
-    force: true
-  loop: "{{ dib_images }}"
+ceph_repository: "{{ server_fqdn }}/ceph/ceph"
+rook_repository: "{{ server_fqdn }}/rook/ceph"
+cephcsi_repository: "{{ server_fqdn }}/cephcsi/cephcsi"
+csi_node_driver_registrar_repository: "{{ server_fqdn }}/k8scsi/csi-node-driver-registrar"
+csi_provisioner_repository: "{{ server_fqdn }}/k8scsi/csi-provisioner"
+csi_snapshotter_repository: "{{ server_fqdn }}/k8scsi/csi-snapshotter"
+csi_attacherr_repository: "{{ server_fqdn }}/k8scsi/csi-attacher"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/dib.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml
similarity index 70%
copy from playbooks/roles/package/tasks/dib.yaml
copy to playbooks/apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml
index 887977b..21a9bb7 100644
--- a/playbooks/roles/package/tasks/dib.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/install/vars/online-deployment.yaml
@@ -17,19 +17,12 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store images used for provisioning
-  file:
-    path: "{{ dib_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Download distro images used for provisioning nodes
-  get_url:
-    url: "{{ item }}"
-    dest: "{{ dib_folder }}"
-    force: true
-  loop: "{{ dib_images }}"
+ceph_repository: "docker.io/ceph/ceph"
+rook_repository: "rook/ceph"
+cephcsi_repository: "quay.io/cephcsi/cephcsi"
+csi_node_driver_registrar_repository: "quay.io/k8scsi/csi-node-driver-registrar"
+csi_provisioner_repository: "quay.io/k8scsi/csi-provisioner"
+csi_snapshotter_repository: "quay.io/k8scsi/csi-snapshotter"
+csi_attacherr_repository: "quay.io/k8scsi/csi-attacher"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/ceph/kubespray/playbooks/roles/prepare/files/clean-ceph-osd.sh b/playbooks/apps/ceph/kubespray/playbooks/roles/prepare/files/clean-ceph-osd.sh
new file mode 100644
index 0000000..ed133fa
--- /dev/null
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/prepare/files/clean-ceph-osd.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+DISK="/dev/$1"
+# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean)
+# You will have to run this step for all disks.
+sgdisk --zap-all $DISK
+
+# These steps only have to be run once on each node
+# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks.
+ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
+# ceph-volume setup can leave ceph-<UUID> directories in /dev (unnecessary clutter)
+rm -rf /dev/ceph-*
diff --git a/playbooks/roles/package/tasks/pip.yaml b/playbooks/apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml
similarity index 62%
copy from playbooks/roles/package/tasks/pip.yaml
copy to playbooks/apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml
index a4a4642..3844479 100644
--- a/playbooks/roles/package/tasks/pip.yaml
+++ b/playbooks/apps/ceph/kubespray/playbooks/roles/prepare/tasks/main.yml
@@ -17,24 +17,30 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store pip packages
-  file:
-    path: "{{ pip_folder }}"
-    state: "{{ item }}"
+- name: Install packages
+  action: >
+    {{ ansible_pkg_mgr }} name={{ item }} state=present update_cache=yes
   with_items:
-    - absent
-    - directory
+    - "xfsprogs"
+    - "gdisk"
 
-- name: Download pip packages using requirements.txt file
-  command: "pip download -r {{ engine_path }}/requirements.txt --no-cache"
-  changed_when: false
-  args:
-    chdir: "{{ pip_folder }}"
+- name: Remove existing rook data directories
+  file:
+    path: "{{ rook_data_dir_path }}"
+    state: absent
+  ignore_errors: true
+  tags: reset
 
-- name: Copy pip.conf
-  template:
-    src: pip.conf.j2
-    dest: "{{ pip_folder }}/pip.conf"
-    force: true
+- name: Remove existing rook storage directories
+  file:
+    path: "{{ rook_storage_dir_path }}"
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+- name: Remove existing rook ceph osds
+  script: "clean-ceph-osd.sh {{ rook_node_device_filter }}"
+  ignore_errors: true
+  tags: reset
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/istio/kubespray/playbooks/install.yml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/istio/kubespray/playbooks/install.yml
index bd7361f..97d22ae 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/istio/kubespray/playbooks/install.yml
@@ -17,14 +17,11 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
+- hosts: jumphost
+  gather_facts: true
+  become: false
 
-# service names
-docker_service_name: docker
+  roles:
+    - role: install
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/istio/kubespray/playbooks/roles/install/tasks/main.yml b/playbooks/apps/istio/kubespray/playbooks/roles/install/tasks/main.yml
new file mode 100644
index 0000000..1f4e859
--- /dev/null
+++ b/playbooks/apps/istio/kubespray/playbooks/roles/install/tasks/main.yml
@@ -0,0 +1,176 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Make sure "{{ istio_work_dir }}" exists
+  file:
+    path: "{{ istio_work_dir }}"
+    state: directory
+
+# TODO: validate download checksum
+- name: Download the installation files
+  unarchive:
+    src: "{{ istio_download_url }}"
+    dest: "{{ istio_work_dir }}"
+    remote_src: true
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- name: List existing installation of Istio
+  shell: helm list | awk '{print $1}' | grep istio
+  register: installed_istio_charts
+  ignore_errors: true
+  changed_when: false
+  tags: reset
+
+- name: Delete existing installation of Istio
+  command: helm delete --purge "{{ item }}"
+  loop: "{{ installed_istio_charts.stdout_lines }}"
+  ignore_errors: true
+  changed_when: true
+  tags: reset
+
+# This solves this bug: https://github.com/ansible/ansible/issues/47081 with the k8s module which is caused
+# due to the presence of --- at the end of the yaml file.
+- name: Fix upstream Istio CRDs
+  lineinfile:
+    path: "{{ item }}"
+    regex: '^-{3}\n+$'
+    line: ""
+    state: present
+    firstmatch: true
+  with_fileglob:
+    - "{{ istio_work_dir }}/istio-{{ istio_version }}/install/kubernetes/helm/istio-init/files/*"
+  ignore_errors: true
+  tags: reset
+
+- name: Delete existing Istio CRDs
+  k8s:
+    api_version: apiextensions.k8s.io/v1beta1
+    kind: CustomResourceDefinition
+    state: absent
+    src: "{{ item }}"
+  with_fileglob:
+    - "{{ istio_work_dir }}/istio-{{ istio_version }}/install/kubernetes/helm/istio-init/files/*"
+  ignore_errors: true
+  tags: reset
+
+- name: Delete Istio init namespace
+  k8s:
+    name: "{{ istio_init_namespace }}"
+    api_version: v1
+    kind: Namespace
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+# This can be avoided when we update Ansible to 2.8 version as is included in k8s module
+- name: Verify Istio init namespace deletion
+  k8s_facts:
+    kind: Namespace
+    name: "{{ istio_init_namespace }}"
+  register: namespace_status
+  until: not namespace_status.resources
+  retries: 5
+  delay: 10
+  ignore_errors: true
+  tags: reset
+
+- name: Delete Istio namespace
+  k8s:
+    name: "{{ istio_namespace }}"
+    api_version: v1
+    kind: Namespace
+    state: absent
+  ignore_errors: true
+  tags: reset
+
+# This can be avoided when we update Ansible to 2.8 version as is included in k8s module
+- name: Verify Istio init namespace deletion
+  k8s_facts:
+    kind: Namespace
+    name: "{{ istio_namespace }}"
+  register: namespace_status
+  until: not namespace_status.resources
+  retries: 5
+  delay: 10
+  ignore_errors: true
+  tags: reset
+
+- name: Install and bootstrap Istio CRDs
+  command: >
+    helm install "{{ istio_work_dir }}"/istio-"{{ istio_version }}"/install/kubernetes/helm/istio-init
+      --name "{{ istio_init_release_name }}"
+      --namespace "{{ istio_init_namespace }}"
+  changed_when: true
+
+- name: Verify the commitment of all Istio CRDs
+  k8s_facts:
+    kind: CustomResourceDefinition
+    api: apiextensions.k8s.io/v1beta1
+    label_selectors:
+      - release=istio
+  register: crd_status
+  until: crd_status.resources|length >= 23
+  retries: 5
+  delay: 10
+
+- name: Install Istio configuration profile
+  command: >
+    helm install "{{ istio_work_dir }}"/istio-"{{ istio_version }}"/install/kubernetes/helm/istio
+      --name "{{ istio_release_name }}"
+      --namespace "{{ istio_namespace }}"
+  changed_when: true
+
+- name: Verify Istio service existence
+  k8s_facts:
+    kind: Service
+    namespace: "{{ istio_namespace }}"
+    label_selectors:
+      - release=istio
+  register: istio_service_status
+  until: istio_service_status.resources is defined
+  retries: 5
+  delay: 10
+
+- name: Wait until Istio pods are ready
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ istio_namespace }}"
+    label_selectors:
+      - release=istio
+    field_selectors:
+      - status.phase=Running
+  register: istio_pod_status
+  until:
+    - istio_pod_status.resources is defined
+    - istio_pod_status.resources
+  retries: 5
+  delay: 10
+
+- name: Add istioctl CLI bin to path
+  become: true
+  copy:
+    src: '{{ istio_work_dir }}/istio-{{ istio_version }}/bin/istioctl'
+    dest: '/usr/local/bin/istioctl'
+    remote_src: true
+    mode: '0755'
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/istio/kubespray/playbooks/roles/install/vars/main.yml
similarity index 75%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/istio/kubespray/playbooks/roles/install/vars/main.yml
index bd7361f..3fa5752 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/istio/kubespray/playbooks/roles/install/vars/main.yml
@@ -17,14 +17,10 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
+istio_download_url: "https://github.com/istio/istio/releases/download/{{ istio_version }}/istio-{{ istio_version }}-linux.tar.gz"
+istio_work_dir: "/tmp/istio"
 
-# service names
-docker_service_name: docker
-
-# vim: set ts=2 sw=2 expandtab:
+istio_namespace: istio-system
+istio_release_name: istio
+istio_init_namespace: istio-init
+istio_init_release_name: istio-init
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/prometheus/kubespray/playbooks/install.yml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/prometheus/kubespray/playbooks/install.yml
index bd7361f..97d22ae 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/prometheus/kubespray/playbooks/install.yml
@@ -17,14 +17,11 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
+- hosts: jumphost
+  gather_facts: true
+  become: false
 
-# service names
-docker_service_name: docker
+  roles:
+    - role: install
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/prometheus/kubespray/playbooks/roles/install/tasks/main.yaml b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/tasks/main.yaml
new file mode 100644
index 0000000..3161f20
--- /dev/null
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/tasks/main.yaml
@@ -0,0 +1,133 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- block:
+  - name: Create directories for helm repositories
+    file:
+      path: "{{ item.path }}"
+      state: "{{ item.state }}"
+    loop:
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: directory}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: directory}
+
+  - name: Place index.yaml to webserver stable charts repository
+    template:
+      src: "index.yaml.j2"
+      dest: "{{ engine_workspace }}/offline/charts/stable/index.yaml"
+      force: true
+  when: execution_mode == "offline-deployment"
+
+- name: Initialize Helm
+  command: helm init --client-only --local-repo-url {{ local_repo_url }} --stable-repo-url {{ stable_repo_url }}
+  register: helm_init_result
+  changed_when: true
+
+- name: Clone Helm Charts repository
+  git:
+    repo: "{{ helm_charts_git_url }}"
+    dest: "{{ config_path }}/repos/charts"
+    version: "{{ charts_version }}"
+    force: true
+    recursive: true
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- name: Generate values.yaml
+  template:
+    src: "values.yaml.j2"
+    dest: "{{ config_path }}/repos/charts/stable/prometheus/values.yaml"
+    force: true
+
+- name: Remove previous installations of Prometheus
+  command: >
+    helm delete --purge "{{ prometheus_service }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Remove Prometheus namespace
+  command: >
+    kubectl delete ns "{{ prometheus_namespace }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Create Prometheus namespace
+  k8s:
+    state: present
+    definition:
+      apiVersion: v1
+      kind: Namespace
+      metadata:
+        name: "{{ prometheus_namespace }}"
+
+- name: Install Prometheus using helm
+  command: >
+    helm install
+      --name "{{ prometheus_service }}"
+      --namespace "{{ prometheus_namespace }}"
+      --timeout 900
+      {{ config_path }}/repos/charts/stable/prometheus
+  register: prometheus_helm_log
+  changed_when: true
+
+- name: Log Prometheus helm output to console
+  debug:
+    msg: "{{ prometheus_helm_log.stdout_lines }}"
+
+- name: Wait until Prometheus pods are available
+  k8s_facts:
+    kind: Pod
+    namespace: "{{ prometheus_namespace }}"
+    label_selectors:
+      - "app = {{ prometheus_service }}"
+    field_selectors:
+      - status.phase=Running
+  register: prometheus_pod_status
+  until:
+    - prometheus_pod_status.resources is defined
+    - prometheus_pod_status.resources
+  retries: 30
+  delay: 10
+
+- name: Install Prometheus LoadBalancer service
+  k8s:
+    state: present
+    definition: "{{ lookup('template', 'prometheus_service.yaml.j2') }}"
+  register: prometheus_service_status
+
+- name: Log Prometheus service information to console
+  debug:
+    msg:
+      - "------------------------------"
+      - "Prometheus Service information"
+      - "------------------------------"
+      - "clusterIP:  {{ prometheus_service_status.result.spec.clusterIP }}"
+      - "targetPort: {{ prometheus_service_status.result.spec.ports[0].targetPort }}"
+      - "nodePort:   {{ prometheus_service_status.result.spec.ports[0].nodePort }}"
+      - "------------------------------"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/index.yaml.j2 b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/index.yaml.j2
new file mode 100644
index 0000000..1e632d3
--- /dev/null
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/index.yaml.j2
@@ -0,0 +1,27 @@
+apiVersion: v1
+entries:
+  prometheus:
+  - apiVersion: v1
+    appVersion: {{ prom_prometheus_version }}
+    created: 2020-03-01T17:30:10.216789698Z
+    description: Prometheus is a monitoring system and time series database.
+    digest: 6fb65153c0c0dedc16a54be8da21dcb1b5dad891948552a5b3a94c5381c25433
+    engine: gotpl
+    home: https://prometheus.io/
+    icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
+    maintainers:
+    - email: gianrubio@gmail.com
+      name: gianrubio
+    - email: zanhsieh@gmail.com
+      name: zanhsieh
+    name: prometheus
+    sources:
+    - https://github.com/prometheus/alertmanager
+    - https://github.com/prometheus/prometheus
+    - https://github.com/prometheus/pushgateway
+    - https://github.com/prometheus/node_exporter
+    - https://github.com/kubernetes/kube-state-metrics
+    tillerVersion: '>=2.8.0'
+    urls:
+    - https://kubernetes-charts.storage.googleapis.com/prometheus-10.6.0.tgz
+    version: 10.6.0
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/prometheus_service.yaml.j2
similarity index 76%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/prometheus_service.yaml.j2
index bd7361f..e11cb23 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/prometheus_service.yaml.j2
@@ -1,4 +1,3 @@
----
 # ============LICENSE_START=======================================================
 #  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
 # ================================================================================
@@ -17,14 +16,18 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
-
-# vim: set ts=2 sw=2 expandtab:
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: "{{ prometheus_service }}"
+  namespace: "{{ prometheus_namespace }}"
+spec:
+  selector:
+    app: "{{ prometheus_service }}"
+  type: LoadBalancer
+  ports:
+  - name: http
+    port: 80
+    targetPort: 9090
+    protocol: TCP
diff --git a/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2 b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2
new file mode 100644
index 0000000..58a96cb
--- /dev/null
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/templates/values.yaml.j2
@@ -0,0 +1,1634 @@
+{% raw %}
+rbac:
+  create: true
+
+podSecurityPolicy:
+  enabled: false
+
+imagePullSecrets:
+# - name: "image-pull-secret"
+
+## Define serviceAccount names for components. Defaults to component's fully qualified name.
+##
+serviceAccounts:
+  alertmanager:
+    create: true
+    name:
+  kubeStateMetrics:
+    create: true
+    name:
+  nodeExporter:
+    create: true
+    name:
+  pushgateway:
+    create: true
+    name:
+  server:
+    create: true
+    name:
+
+alertmanager:
+  ## If false, alertmanager will not be installed
+  ##
+  enabled: true
+
+  ## alertmanager container name
+  ##
+  name: alertmanager
+
+  ## alertmanager container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/alertmanager
+    tag: {{ prom_alertmanager_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## alertmanager priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Additional alertmanager container arguments
+  ##
+  extraArgs: {}
+
+  ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
+  ## so that the various internal URLs are still able to access as they are in the default case.
+  ## (Optional)
+  prefixURL: ""
+
+  ## External URL which can access alertmanager
+  baseURL: "http://localhost:9093"
+
+  ## Additional alertmanager container environment variable
+  ## For instance to add a http_proxy
+  ##
+  extraEnv: {}
+
+  ## Additional alertmanager Secret mounts
+  # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+  extraSecretMounts: []
+    # - name: secret-files
+    #   mountPath: /etc/secrets
+    #   subPath: ""
+    #   secretName: alertmanager-secret-files
+    #   readOnly: true
+
+  ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}
+  ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configMapOverrideName: ""
+
+  ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config
+  ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configFromSecret: ""
+
+  ## The configuration file name to be loaded to alertmanager
+  ## Must match the key within configuration loaded from ConfigMap/Secret
+  ##
+  configFileName: alertmanager.yml
+
+  ingress:
+    ## If true, alertmanager Ingress will be created
+    ##
+    enabled: false
+
+    ## alertmanager Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## alertmanager Ingress additional labels
+    ##
+    extraLabels: {}
+
+    ## alertmanager Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - alertmanager.domain.com
+    #   - domain.com/alertmanager
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## alertmanager Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-alerts-tls
+    #     hosts:
+    #       - alertmanager.domain.com
+
+  ## Alertmanager Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  ## Node tolerations for alertmanager scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for alertmanager pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Pod affinity
+  ##
+  affinity: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  persistentVolume:
+    ## If true, alertmanager will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: true
+
+    ## alertmanager data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## alertmanager data Persistent Volume Claim annotations
+    ##
+    annotations: {}
+
+    ## alertmanager data Persistent Volume existing claim name
+    ## Requires alertmanager.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## alertmanager data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## alertmanager data Persistent Volume size
+    ##
+    size: 2Gi
+
+    ## alertmanager data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## alertmanager data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of alertmanager data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+  ## Annotations to be added to alertmanager pods
+  ##
+  podAnnotations: {}
+    ## Tell prometheus to use a specific set of alertmanager pods
+    ## instead of all alertmanager pods found in the same namespace
+    ## Useful if you deploy multiple releases within the same namespace
+    ##
+    ## prometheus.io/probe: alertmanager-teamA
+
+  ## Labels to be added to Prometheus AlertManager pods
+  ##
+  podLabels: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
+  ##
+  replicaCount: 1
+
+  statefulSet:
+    ## If true, use a statefulset instead of a deployment for pod management.
+    ## This allows to scale replicas to more than 1 pod
+    ##
+    enabled: false
+
+    podManagementPolicy: OrderedReady
+
+    ## Alertmanager headless service to use for the statefulset
+    ##
+    headless:
+      annotations: {}
+      labels: {}
+
+      ## Enabling peer mesh service end points for enabling the HA alert manager
+      ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
+      # enableMeshPeer : true
+
+      servicePort: 80
+
+  ## alertmanager resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 32Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 32Mi
+
+  ## Security context to be added to alertmanager pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+    runAsGroup: 65534
+    fsGroup: 65534
+
+  service:
+    annotations: {}
+    labels: {}
+    clusterIP: ""
+
+    ## Enabling peer mesh service end points for enabling the HA alert manager
+    ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
+    # enableMeshPeer : true
+
+    ## List of IP addresses at which the alertmanager service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    # nodePort: 30000
+    sessionAffinity: None
+    type: ClusterIP
+
+## Monitors ConfigMap changes and POSTs to a URL
+## Ref: https://github.com/jimmidyson/configmap-reload
+##
+configmapReload:
+  prometheus:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+
+    ## configmap-reload container image
+    ##
+    image:
+{% endraw %}
+      repository: {{ dockerio_image_repository }}/jimmidyson/configmap-reload
+      tag: {{ configmap_reload_version }}
+{% raw %}
+      pullPolicy: IfNotPresent
+
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+
+
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+
+
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+  alertmanager:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+
+    ## configmap-reload container image
+    ##
+    image:
+{% endraw %}
+      repository: {{ dockerio_image_repository }}/jimmidyson/configmap-reload
+      tag: {{ configmap_reload_version }}
+{% raw %}
+      pullPolicy: IfNotPresent
+
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+
+
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+
+
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+
+
+kubeStateMetrics:
+  ## If false, kube-state-metrics will not be installed
+  ##
+  enabled: true
+
+  ## kube-state-metrics container name
+  ##
+  name: kube-state-metrics
+
+  ## kube-state-metrics container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ quayio_image_repository }}/coreos/kube-state-metrics
+    tag: {{ kube_state_metrics_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## kube-state-metrics priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## kube-state-metrics container arguments
+  ##
+  args: {}
+
+  ## Node tolerations for kube-state-metrics scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for kube-state-metrics pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to kube-state-metrics pods
+  ##
+  podAnnotations: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  pod:
+    labels: {}
+
+  replicaCount: 1
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## kube-state-metrics resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 16Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 16Mi
+
+  ## Security context to be added to kube-state-metrics pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+    labels: {}
+
+    # Exposed as a headless service:
+    # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
+    clusterIP: None
+
+    ## List of IP addresses at which the kube-state-metrics service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    # Port for Kubestatemetric self telemetry
+    serviceTelemetryPort: 81
+    type: ClusterIP
+
+nodeExporter:
+  ## If false, node-exporter will not be installed
+  ##
+  enabled: true
+
+  ## If true, node-exporter pods share the host network namespace
+  ##
+  hostNetwork: true
+
+  ## If true, node-exporter pods share the host PID namespace
+  ##
+  hostPID: true
+
+  ## node-exporter container name
+  ##
+  name: node-exporter
+
+  ## node-exporter container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/node-exporter
+    tag: {{ prom_node_exporter_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## node-exporter priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Custom Update Strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  ## Additional node-exporter container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional node-exporter hostPath mounts
+  ##
+  extraHostPathMounts: []
+    # - name: textfile-dir
+    #   mountPath: /srv/txt_collector
+    #   hostPath: /var/lib/node-exporter
+    #   readOnly: true
+    #   mountPropagation: HostToContainer
+
+  extraConfigmapMounts: []
+    # - name: certs-configmap
+    #   mountPath: /prometheus
+    #   configMap: certs-configmap
+    #   readOnly: true
+
+  ## Node tolerations for node-exporter scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for node-exporter pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to node-exporter pods
+  ##
+  podAnnotations: {}
+
+  ## Labels to be added to node-exporter pods
+  ##
+  pod:
+    labels: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## node-exporter resource limits & requests
+  ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 200m
+    #   memory: 50Mi
+    # requests:
+    #   cpu: 100m
+    #   memory: 30Mi
+
+  ## Security context to be added to node-exporter pods
+  ##
+  securityContext: {}
+    # runAsUser: 0
+
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+    labels: {}
+
+    # Exposed as a headless service:
+    # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
+    clusterIP: None
+
+    ## List of IP addresses at which the node-exporter service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    hostPort: 9100
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 9100
+    type: ClusterIP
+
+server:
+  ## Prometheus server container name
+  ##
+  enabled: true
+  name: server
+  sidecarContainers:
+
+  ## Prometheus server container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/prometheus
+    tag: {{ prom_prometheus_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## prometheus server priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
+  ## so that the various internal URLs are still able to access as they are in the default case.
+  ## (Optional)
+  prefixURL: ""
+
+  ## External URL which can access alertmanager
+  ## Maybe same with Ingress host name
+  baseURL: ""
+
+  ## Additional server container environment variables
+  ##
+  ## You specify this manually like you would a raw deployment manifest.
+  ## This means you can bind in environment variables from secrets.
+  ##
+  ## e.g. static environment variable:
+  ##  - name: DEMO_GREETING
+  ##    value: "Hello from the environment"
+  ##
+  ## e.g. secret environment variable:
+  ## - name: USERNAME
+  ##   valueFrom:
+  ##     secretKeyRef:
+  ##       name: mysecret
+  ##       key: username
+  env: []
+
+  extraFlags:
+    - web.enable-lifecycle
+    ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as
+    ## deleting time series. This is disabled by default.
+    # - web.enable-admin-api
+    ##
+    ## storage.tsdb.no-lockfile flag controls BD locking
+    # - storage.tsdb.no-lockfile
+    ##
+    ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL)
+    # - storage.tsdb.wal-compression
+
+  ## Path to a configuration file on prometheus server container FS
+  configPath: /etc/config/prometheus.yml
+
+  global:
+    ## How frequently to scrape targets by default
+    ##
+    scrape_interval: 1m
+    ## How long until a scrape request times out
+    ##
+    scrape_timeout: 10s
+    ## How frequently to evaluate rules
+    ##
+    evaluation_interval: 1m
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
+  ##
+  remoteWrite: {}
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
+  ##
+  remoteRead: {}
+
+  ## Additional Prometheus server container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
+  ## Additional Prometheus server Volume mounts
+  ##
+  extraVolumeMounts: []
+
+  ## Additional Prometheus server Volumes
+  ##
+  extraVolumes: []
+
+  ## Additional Prometheus server hostPath mounts
+  ##
+  extraHostPathMounts: []
+    # - name: certs-dir
+    #   mountPath: /etc/kubernetes/certs
+    #   subPath: ""
+    #   hostPath: /etc/kubernetes/certs
+    #   readOnly: true
+
+  extraConfigmapMounts: []
+    # - name: certs-configmap
+    #   mountPath: /prometheus
+    #   subPath: ""
+    #   configMap: certs-configmap
+    #   readOnly: true
+
+  ## Additional Prometheus server Secret mounts
+  # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+  extraSecretMounts: []
+    # - name: secret-files
+    #   mountPath: /etc/secrets
+    #   subPath: ""
+    #   secretName: prom-secret-files
+    #   readOnly: true
+
+  ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}}
+  ## Defining configMapOverrideName will cause templates/server-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configMapOverrideName: ""
+
+  ingress:
+    ## If true, Prometheus server Ingress will be created
+    ##
+    enabled: false
+
+    ## Prometheus server Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## Prometheus server Ingress additional labels
+    ##
+    extraLabels: {}
+
+    ## Prometheus server Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - prometheus.domain.com
+    #   - domain.com/prometheus
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## Prometheus server Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-server-tls
+    #     hosts:
+    #       - prometheus.domain.com
+
+  ## Server Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  ## Node tolerations for server scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for Prometheus server pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Pod affinity
+  ##
+  affinity: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  persistentVolume:
+    ## If true, Prometheus server will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: true
+
+    ## Prometheus server data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## Prometheus server data Persistent Volume annotations
+    ##
+    annotations: {}
+
+    ## Prometheus server data Persistent Volume existing claim name
+    ## Requires server.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## Prometheus server data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## Prometheus server data Persistent Volume size
+    ##
+    size: 8Gi
+
+    ## Prometheus server data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## Prometheus server data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of Prometheus server data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+  emptyDir:
+    sizeLimit: ""
+
+  ## Annotations to be added to Prometheus server pods
+  ##
+  podAnnotations: {}
+    # iam.amazonaws.com/role: prometheus
+
+  ## Labels to be added to Prometheus server pods
+  ##
+  podLabels: {}
+
+  ## Prometheus AlertManager configuration
+  ##
+  alertmanagers: []
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
+  ##
+  replicaCount: 1
+
+  statefulSet:
+    ## If true, use a statefulset instead of a deployment for pod management.
+    ## This allows to scale replicas to more than 1 pod
+    ##
+    enabled: false
+
+    annotations: {}
+    labels: {}
+    podManagementPolicy: OrderedReady
+
+    ## Alertmanager headless service to use for the statefulset
+    ##
+    headless:
+      annotations: {}
+      labels: {}
+      servicePort: 80
+
+  ## Prometheus server readiness and liveness probe initial delay and timeout
+  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+  ##
+  readinessProbeInitialDelay: 30
+  readinessProbeTimeout: 30
+  readinessProbeFailureThreshold: 3
+  readinessProbeSuccessThreshold: 1
+  livenessProbeInitialDelay: 30
+  livenessProbeTimeout: 30
+  livenessProbeFailureThreshold: 3
+  livenessProbeSuccessThreshold: 1
+
+  ## Prometheus server resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 500m
+    #   memory: 512Mi
+    # requests:
+    #   cpu: 500m
+    #   memory: 512Mi
+
+  ## Vertical Pod Autoscaler config
+  ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
+  verticalAutoscaler:
+    ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs)
+    enabled: false
+    # updateMode: "Auto"
+    # containerPolicies:
+    # - containerName: 'prometheus-server'
+
+  ## Security context to be added to server pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+    runAsGroup: 65534
+    fsGroup: 65534
+
+  service:
+    annotations: {}
+    labels: {}
+    clusterIP: ""
+
+    ## List of IP addresses at which the Prometheus server service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    sessionAffinity: None
+    type: ClusterIP
+
+    ## Enable gRPC port on service to allow auto discovery with thanos-querier
+    gRPC:
+      enabled: false
+      servicePort: 10901
+      # nodePort: 10901
+
+    ## If using a statefulSet (statefulSet.enabled=true), configure the
+    ## service to connect to a specific replica to have a consistent view
+    ## of the data.
+    statefulsetReplica:
+      enabled: false
+      replica: 0
+
+  ## Prometheus server pod termination grace period
+  ##
+  terminationGracePeriodSeconds: 300
+
+  ## Prometheus data retention period (default if not specified is 15 days)
+  ##
+  retention: "15d"
+
+pushgateway:
+  ## If false, pushgateway will not be installed
+  ##
+  enabled: true
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  ## pushgateway container name
+  ##
+  name: pushgateway
+
+  ## pushgateway container image
+  ##
+  image:
+{% endraw %}
+    repository: {{ dockerio_image_repository }}/prom/pushgateway
+    tag: {{ prom_push_gateway_version }}
+{% raw %}
+    pullPolicy: IfNotPresent
+
+  ## pushgateway priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Additional pushgateway container arguments
+  ##
+  ## for example: persistence.file: /data/pushgateway.data
+  extraArgs: {}
+
+  ingress:
+    ## If true, pushgateway Ingress will be created
+    ##
+    enabled: false
+
+    ## pushgateway Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## pushgateway Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - pushgateway.domain.com
+    #   - domain.com/pushgateway
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## pushgateway Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-alerts-tls
+    #     hosts:
+    #       - pushgateway.domain.com
+
+  ## Node tolerations for pushgateway scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for pushgateway pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to pushgateway pods
+  ##
+  podAnnotations: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  replicaCount: 1
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## pushgateway resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 32Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 32Mi
+
+  ## Security context to be added to push-gateway pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+
+  service:
+    annotations:
+      prometheus.io/probe: pushgateway
+    labels: {}
+    clusterIP: ""
+
+    ## List of IP addresses at which the pushgateway service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 9091
+    type: ClusterIP
+
+  ## pushgateway Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  persistentVolume:
+    ## If true, pushgateway will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: false
+
+    ## pushgateway data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## pushgateway data Persistent Volume Claim annotations
+    ##
+    annotations: {}
+
+    ## pushgateway data Persistent Volume existing claim name
+    ## Requires pushgateway.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## pushgateway data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## pushgateway data Persistent Volume size
+    ##
+    size: 2Gi
+
+    ## pushgateway data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## pushgateway data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of pushgateway data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+
+## alertmanager ConfigMap entries
+##
+alertmanagerFiles:
+  alertmanager.yml:
+    global: {}
+      # slack_api_url: ''
+
+    receivers:
+      - name: default-receiver
+        # slack_configs:
+        #  - channel: '@you'
+        #    send_resolved: true
+
+    route:
+      group_wait: 10s
+      group_interval: 5m
+      receiver: default-receiver
+      repeat_interval: 3h
+
+## Prometheus server ConfigMap entries
+##
+serverFiles:
+
+  ## Alerts configuration
+  ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
+  alerting_rules.yml: {}
+  # groups:
+  #   - name: Instances
+  #     rules:
+  #       - alert: InstanceDown
+  #         expr: up == 0
+  #         for: 5m
+  #         labels:
+  #           severity: page
+  #         annotations:
+  #           description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.'
+  #           summary: 'Instance {{ $labels.instance }} down'
+  ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml
+  alerts: {}
+
+  ## Records configuration
+  ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
+  recording_rules.yml: {}
+  ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml
+  rules: {}
+
+  prometheus.yml:
+    rule_files:
+      - /etc/config/recording_rules.yml
+      - /etc/config/alerting_rules.yml
+    ## Below two files are DEPRECATED will be removed from this default values file
+      - /etc/config/rules
+      - /etc/config/alerts
+
+    scrape_configs:
+      - job_name: prometheus
+        static_configs:
+          - targets:
+            - localhost:9090
+
+      # A scrape configuration for running Prometheus on a Kubernetes cluster.
+      # This uses separate scrape configs for cluster components (i.e. API server, node)
+      # and services to allow each to use different authentication configs.
+      #
+      # Kubernetes labels will be added as Prometheus labels on metrics via the
+      # `labelmap` relabeling action.
+
+      # Scrape config for API servers.
+      #
+      # Kubernetes exposes API servers as endpoints to the default/kubernetes
+      # service so this uses `endpoints` role and uses relabelling to only keep
+      # the endpoints associated with the default/kubernetes service using the
+      # default named port `https`. This works for single API server deployments as
+      # well as HA API server deployments.
+      - job_name: 'kubernetes-apiservers'
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        # Keep only the default/kubernetes service endpoints for the https port. This
+        # will add targets for each API server which Kubernetes adds an endpoint to
+        # the default/kubernetes service.
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+            action: keep
+            regex: default;kubernetes;https
+
+      - job_name: 'kubernetes-nodes'
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        kubernetes_sd_configs:
+          - role: node
+
+        relabel_configs:
+          - action: labelmap
+            regex: __meta_kubernetes_node_label_(.+)
+          - target_label: __address__
+            replacement: kubernetes.default.svc:443
+          - source_labels: [__meta_kubernetes_node_name]
+            regex: (.+)
+            target_label: __metrics_path__
+            replacement: /api/v1/nodes/$1/proxy/metrics
+
+
+      - job_name: 'kubernetes-nodes-cadvisor'
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        kubernetes_sd_configs:
+          - role: node
+
+        # This configuration will work only on kubelet 1.7.3+
+        # As the scrape endpoints for cAdvisor have changed
+        # if you are using older version you need to change the replacement to
+        # replacement: /api/v1/nodes/$1:4194/proxy/metrics
+        # more info here https://github.com/coreos/prometheus-operator/issues/633
+        relabel_configs:
+          - action: labelmap
+            regex: __meta_kubernetes_node_label_(.+)
+          - target_label: __address__
+            replacement: kubernetes.default.svc:443
+          - source_labels: [__meta_kubernetes_node_name]
+            regex: (.+)
+            target_label: __metrics_path__
+            replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
+
+      # Scrape config for service endpoints.
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+      # to set this to `https` & most likely set the `tls_config` of the scrape config.
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+      # service then set this appropriately.
+      - job_name: 'kubernetes-service-endpoints'
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+            action: replace
+            target_label: __scheme__
+            regex: (https?)
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+            action: replace
+            target_label: __address__
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            action: replace
+            target_label: kubernetes_name
+          - source_labels: [__meta_kubernetes_pod_node_name]
+            action: replace
+            target_label: kubernetes_node
+
+      # Scrape config for slow service endpoints; same as above, but with a larger
+      # timeout and a larger interval
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
+      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+      # to set this to `https` & most likely set the `tls_config` of the scrape config.
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+      # service then set this appropriately.
+      - job_name: 'kubernetes-service-endpoints-slow'
+
+        scrape_interval: 5m
+        scrape_timeout: 30s
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+            action: replace
+            target_label: __scheme__
+            regex: (https?)
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+            action: replace
+            target_label: __address__
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            action: replace
+            target_label: kubernetes_name
+          - source_labels: [__meta_kubernetes_pod_node_name]
+            action: replace
+            target_label: kubernetes_node
+
+      - job_name: 'prometheus-pushgateway'
+        honor_labels: true
+
+        kubernetes_sd_configs:
+          - role: service
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+            action: keep
+            regex: pushgateway
+
+      # Example scrape config for probing services via the Blackbox Exporter.
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/probe`: Only probe services that have a value of `true`
+      - job_name: 'kubernetes-services'
+
+        metrics_path: /probe
+        params:
+          module: [http_2xx]
+
+        kubernetes_sd_configs:
+          - role: service
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+            action: keep
+            regex: true
+          - source_labels: [__address__]
+            target_label: __param_target
+          - target_label: __address__
+            replacement: blackbox
+          - source_labels: [__param_target]
+            target_label: instance
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            target_label: kubernetes_name
+
+      # Example scrape config for pods
+      #
+      # The relabeling allows the actual pod scrape endpoint to be configured via the
+      # following annotations:
+      #
+      # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+      - job_name: 'kubernetes-pods'
+
+        kubernetes_sd_configs:
+          - role: pod
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+            action: replace
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+            target_label: __address__
+          - action: labelmap
+            regex: __meta_kubernetes_pod_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_pod_name]
+            action: replace
+            target_label: kubernetes_pod_name
+
+      # Example Scrape config for pods which should be scraped slower. An useful example
+      # would be stackriver-exporter which querys an API on every scrape of the pod
+      #
+      # The relabeling allows the actual pod scrape endpoint to be configured via the
+      # following annotations:
+      #
+      # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+      - job_name: 'kubernetes-pods-slow'
+
+        scrape_interval: 5m
+        scrape_timeout: 30s
+
+        kubernetes_sd_configs:
+          - role: pod
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+            action: replace
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+            target_label: __address__
+          - action: labelmap
+            regex: __meta_kubernetes_pod_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_pod_name]
+            action: replace
+            target_label: kubernetes_pod_name
+
+# adds additional scrape configs to prometheus.yml
+# must be a string so you have to add a | after extraScrapeConfigs:
+# example adds prometheus-blackbox-exporter scrape config
+extraScrapeConfigs:
+  # - job_name: 'prometheus-blackbox-exporter'
+  #   metrics_path: /probe
+  #   params:
+  #     module: [http_2xx]
+  #   static_configs:
+  #     - targets:
+  #       - https://example.com
+  #   relabel_configs:
+  #     - source_labels: [__address__]
+  #       target_label: __param_target
+  #     - source_labels: [__param_target]
+  #       target_label: instance
+  #     - target_label: __address__
+  #       replacement: prometheus-blackbox-exporter:9115
+
+# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager
+# useful in H/A prometheus with different external labels but the same alerts
+alertRelabelConfigs:
+  # alert_relabel_configs:
+  # - source_labels: [dc]
+  #   regex: (.+)\d+
+  #   target_label: dc
+
+networkPolicy:
+  ## Enable creation of NetworkPolicy resources.
+  ##
+  enabled: false
+{% endraw %}
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/main.yaml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/main.yaml
index bd7361f..775be24 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/main.yaml
@@ -17,14 +17,7 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+prometheus_service: "prometheus"
+prometheus_namespace: "prometheus"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
similarity index 77%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
index bd7361f..84ead46 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
@@ -17,14 +17,10 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+dockerio_image_repository: "{{ server_fqdn }}"
+quayio_image_repository: "{{ server_fqdn }}"
+helm_charts_git_url: "{{ engine_workspace }}/offline/git/charts"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "http://{{ server_fqdn }}/charts/stable"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/online-deployment.yaml
similarity index 77%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/online-deployment.yaml
index bd7361f..9e97b0e 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/prometheus/kubespray/playbooks/roles/install/vars/online-deployment.yaml
@@ -17,14 +17,10 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+dockerio_image_repository: "docker.io"
+quayio_image_repository: "quay.io"
+helm_charts_git_url: "https://github.com/helm/charts.git"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "https://kubernetes-charts.storage.googleapis.com"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/spinnaker/kubespray/playbooks/install.yml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/spinnaker/kubespray/playbooks/install.yml
index bd7361f..97d22ae 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/install.yml
@@ -17,14 +17,11 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
+- hosts: jumphost
+  gather_facts: true
+  become: false
 
-# service names
-docker_service_name: docker
+  roles:
+    - role: install
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/files/log-spinnaker-status.sh b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/files/log-spinnaker-status.sh
new file mode 100755
index 0000000..c176ac1
--- /dev/null
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/files/log-spinnaker-status.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+cat << EOF
+---------------------------------------------------
+Halyard Spinnaker Deployment Log
+---------------------------------------------------
+$(kubectl -n spinnaker logs $(kubectl -n spinnaker get pod --no-headers -o custom-columns=':metadata.name' | grep spinnaker-install-using))
+
+
+---------------------------------------------------
+Spinnaker pods in Create or ImagePullBackOff state
+---------------------------------------------------
+$(kubectl get pod -n spinnaker | grep -i 'creating\|ImagePullBackOff')
+
+
+---------------------------------------------------
+Spinnaker pods in Init state
+---------------------------------------------------
+$(kubectl get pod -n spinnaker | grep Init | grep -v Error)
+
+
+---------------------------------------------------
+Spinnaker pods in Error or CrashLoopBackOff state
+---------------------------------------------------
+$(kubectl get pod -n spinnaker | grep 'Crash\|Error')
+
+
+---------------------------------------------------
+Spinnaker POD Summary
+---------------------------------------------------
+Creating/ImagePullBackOff     : $(kubectl get pod -n spinnaker | grep -i 'creating\|ImagePullBackOff' | wc -l) pods
+Init                          : $(kubectl get pod -n spinnaker | grep Init | grep -v Error | wc -l) pods
+Error/CrashLoopBackOff        : $(kubectl get pod -n spinnaker | grep 'Error\|Crash' | wc -l) pods
+Terminating                   : $(kubectl get pod -n spinnaker | grep -i terminating | wc -l) pods
+Running/Completed             : $(kubectl get pod -n spinnaker | grep -i 'running\|completed' | wc -l) pods
+Total                         : $(kubectl get pod -n spinnaker | grep -v RESTART | wc -l) pods
+---------------------------------------------------
+
+
+---------------------------------------------------
+Summary of Container Images pulled for Spinnaker
+---------------------------------------------------
+Number of Spinnaker containers     : $(kubectl get pods -n spinnaker -o jsonpath="{..image}" | tr -s '[[:space:]]' '\n' | sort | uniq | grep '^spin.*' |  wc -l)
+Number of non-Spinnaker containers : $(kubectl get pods -n spinnaker -o jsonpath="{..image}" | tr -s '[[:space:]]' '\n' | sort | uniq | grep -v 'spin' | wc -l)
+Total number of containers    : $(kubectl get pods -n spinnaker -o jsonpath="{..image}" | tr -s '[[:space:]]' '\n' | sort | uniq | wc -l)
+---------------------------------------------------
+EOF
diff --git a/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/tasks/main.yml b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/tasks/main.yml
new file mode 100644
index 0000000..a722222
--- /dev/null
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/tasks/main.yml
@@ -0,0 +1,166 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+#
+- name: Load execution mode variables
+  include_vars: "{{ execution_mode }}.yaml"
+
+- block:
+  - name: Create directories for helm repositories
+    file:
+      path: "{{ item.path }}"
+      state: "{{ item.state }}"
+    loop:
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/stable", state: directory}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: absent}
+      - {path: "{{ engine_workspace }}/offline/charts/local", state: directory}
+
+  - name: Place index.yaml to webserver stable charts repository
+    template:
+      src: "index.yaml.j2"
+      dest: "{{ engine_workspace }}/offline/charts/stable/index.yaml"
+      force: true
+  when: execution_mode == "offline-deployment"
+
+- name: Initialize Helm
+  command: helm init --client-only --local-repo-url {{ local_repo_url }} --stable-repo-url {{ stable_repo_url }}
+  register: helm_init_result
+  changed_when: true
+
+- name: Clone Helm Charts repository
+  git:
+    repo: "{{ helm_charts_git_url }}"
+    dest: "{{ engine_cache }}/repos/charts"
+    version: "{{ charts_version }}"
+    force: true
+    recursive: true
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- name: Generate values.yaml
+  template:
+    src: "values.yaml.j2"
+    dest: "{{ engine_cache }}/repos/charts/stable/spinnaker/values.yaml"
+    force: true
+
+- name: Remove previous installations of Spinnaker
+  command: >
+    helm delete --purge "{{ spinnaker_service }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Remove Spinnaker namespace
+  command: >
+    kubectl delete ns "{{ spinnaker_namespace }}"
+  changed_when: true
+  ignore_errors: true
+  tags: reset
+
+- name: Create Spinnaker namespace
+  k8s:
+    state: present
+    definition:
+      apiVersion: v1
+      kind: Namespace
+      metadata:
+        name: "{{ spinnaker_namespace }}"
+
+- name: Verify Spinnaker Helm charts are available to be deployed
+  command: helm search spinnaker -l
+  register: helm_search
+  changed_when: false
+
+- name: Log Helm chart list to console
+  debug:
+    msg: "{{ helm_search.stdout_lines }}"
+
+- name: Inform user about Spinnaker deployment
+  debug:
+    msg: >
+      Spinnaker deployment is about to start!
+      This takes a while and nothing will be logged to console until the process is completed.
+
+- name: Fetch all helm dependencies for Spinnaker
+  command: >
+    helm dependency update
+      {{ engine_cache }}/repos/charts/stable/spinnaker
+  changed_when: true
+
+- name: Install Spinnaker using helm
+  command: >
+    helm install
+      --name "{{ spinnaker_service }}"
+      --namespace "{{ spinnaker_namespace }}"
+      --timeout 900
+      {{ engine_cache }}/repos/charts/stable/spinnaker
+  register: spinnaker_helm_log
+  changed_when: true
+
+- name: Log Spinnaker helm output to console
+  debug:
+    msg: "{{ spinnaker_helm_log.stdout_lines }}"
+
+# wait 10 minutes for all containers to be started
+- name: Wait for all containers to be started
+  shell: |
+    set -o pipefail
+    kubectl get po -n spinnaker | grep ContainerCreating | wc -l
+  register: kube
+  changed_when:
+    kube.stdout  == '0'
+  until:
+    kube.stdout  == '0'
+  retries: 60
+  delay: 10
+
+# wait 20 minutes for all containers to be initialized
+- block:
+    - name: Wait for all containers to be initialized
+      shell: |
+        set -o pipefail
+        kubectl get po -n spinnaker | grep Init | grep -v Error | wc -l
+      register: kube
+      changed_when:
+        kube.stdout  == '0'
+      until:
+        kube.stdout  == '0'
+      retries: 120
+      delay: 10
+  always:
+    - name: Get POD status
+      command: kubectl get po -n spinnaker
+      changed_when: false
+      register: kube
+
+    - name: Log POD status to console
+      debug:
+        msg: "{{ kube.stdout_lines }}"
+
+    - name: Get summary of Spinnaker deployment
+      script: log-spinnaker-status.sh
+      register: spinnaker_status
+
+    - name: Log Spinnaker status to console
+      debug:
+        msg: "{{ spinnaker_status.stdout_lines }}"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/templates/index.yaml.j2 b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/templates/index.yaml.j2
new file mode 100644
index 0000000..963516c
--- /dev/null
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/templates/index.yaml.j2
@@ -0,0 +1,22 @@
+apiVersion: v1
+entries:
+  spinnaker:
+  - apiVersion: v1
+    description: Open source, multi-cloud continuous delivery platform for releasing software changes with high velocity and confidence.
+    name: spinnaker
+    version: {{ spinnaker_version }}
+    appVersion: {{ spinnaker_app_version }}
+    home: http://spinnaker.io/
+    sources:
+    - https://github.com/spinnaker
+    - https://github.com/viglesiasce/images
+    icon: https://pbs.twimg.com/profile_images/669205226994319362/O7OjwPrh_400x400.png
+    maintainers:
+    - name: viglesiasce
+      email: viglesias@google.com
+    - name: ezimanyi
+      email: ezimanyi@google.com
+    - name: dwardu89
+      email: hello@dwardu.com
+    - name: paulczar
+      email: username.taken@gmail.com
diff --git a/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/templates/values.yaml.j2 b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/templates/values.yaml.j2
new file mode 100644
index 0000000..8d88583
--- /dev/null
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/templates/values.yaml.j2
@@ -0,0 +1,308 @@
+halyard:
+  spinnakerVersion: {{ spinnaker_app_version }}
+  image:
+    repository: {{ gcrio_image_repository }}/spinnaker-marketplace/halyard
+    tag: {{ spinnaker_version }}
+    pullSecrets: []
+  # Set to false to disable persistence data volume for halyard
+  persistence:
+    enabled: false
+  # Provide a config map with Hal commands that will be run the core config (storage)
+  # The config map should contain a script in the config.sh key
+  additionalScripts:
+    enabled: false
+    configMapName: my-halyard-config
+    configMapKey: config.sh
+    # If you'd rather do an inline script, set create to true and put the content in the data dict like you would a configmap
+    # The content will be passed through `tpl`, so value interpolation is supported.
+    create: false
+    data: {}
+  additionalSecrets:
+    create: false
+    data: {}
+    ## Uncomment if you want to use a pre-created secret rather than feeding data in via helm.
+    # name:
+  additionalConfigMaps:
+    create: false
+    data: {}
+    ## Uncomment if you want to use a pre-created ConfigMap rather than feeding data in via helm.
+    # name:
+  ## Define custom profiles for Spinnaker services. Read more for details:
+  ## https://www.spinnaker.io/reference/halyard/custom/#custom-profiles
+  ## The contents of the files will be passed through `tpl`, so value interpolation is supported.
+  additionalProfileConfigMaps:
+    data: {}
+      ## if you're running spinnaker behind a reverse proxy such as a GCE ingress
+      ## you may need the following profile settings for the gate profile.
+      ## see https://github.com/spinnaker/spinnaker/issues/1630
+      ## otherwise its harmless and will likely become default behavior in the future
+      ## According to the linked github issue.
+      # gate-local.yml:
+      #   server:
+      #     tomcat:
+      #       protocolHeader: X-Forwarded-Proto
+      #       remoteIpHeader: X-Forwarded-For
+      #       internalProxies: .*
+      #       httpsServerPort: X-Forwarded-Port
+
+  ## Define custom settings for Spinnaker services. Read more for details:
+  ## https://www.spinnaker.io/reference/halyard/custom/#custom-service-settings
+  ## You can use it to add annotations for pods, override the image, etc.
+  additionalServiceSettings: {}
+    # deck.yml:
+    #   artifactId: gcr.io/spinnaker-marketplace/deck:2.9.0-20190412012808
+    #   kubernetes:
+    #     podAnnotations:
+    #       iam.amazonaws.com/role: <role_arn>
+    # clouddriver.yml:
+    #   kubernetes:
+    #     podAnnotations:
+    #       iam.amazonaws.com/role: <role_arn>
+
+  ## Populate to provide a custom local BOM for Halyard to use for deployment. Read more for details:
+  ## https://www.spinnaker.io/guides/operator/custom-boms/#boms-and-configuration-on-your-filesystem
+  bom: ~
+  #   artifactSources:
+  #     debianRepository: https://dl.bintray.com/spinnaker-releases/debians
+  #     dockerRegistry: gcr.io/spinnaker-marketplace
+  #     gitPrefix: https://github.com/spinnaker
+  #     googleImageProject: marketplace-spinnaker-release
+  #   services:
+  #     clouddriver:
+  #       commit: 031bcec52d6c3eb447095df4251b9d7516ed74f5
+  #       version: 6.3.0-20190904130744
+  #     deck:
+  #       commit: b0aac478e13a7f9642d4d39479f649dd2ef52a5a
+  #       version: 2.12.0-20190916141821
+  #     ...
+  #   timestamp: '2019-09-16 18:18:44'
+  #   version: 1.16.1
+
+  ## Define local configuration for Spinnaker services.
+  ## The contents of these files would be copies of the configuration normally retrieved from
+  ## `gs://halconfig/<service-name>`, but instead need to be available locally on the halyard pod to facilitate
+  ## offline installation. This would typically be used along with a custom `bom:` with the `local:` prefix on a
+  ## service version.
+  ## Read more for details:
+  ## https://www.spinnaker.io/guides/operator/custom-boms/#boms-and-configuration-on-your-filesystem
+  ## The key for each entry must be the name of the service and a file name separated by the '_' character.
+  serviceConfigs: {}
+  # clouddriver_clouddriver-ro.yml: |-
+  #   ...
+  # clouddriver_clouddriver-rw.yml: |-
+  #   ...
+  # clouddriver_clouddriver.yml: |-
+  #   ...
+  # deck_settings.json: |-
+  #   ...
+  # echo_echo.yml: |-
+  #   ...
+
+  ## Uncomment if you want to add extra commands to the init script
+  ## run by the init container before halyard is started.
+  ## The content will be passed through `tpl`, so value interpolation is supported.
+  # additionalInitScript: |-
+
+  ## Uncomment if you want to add annotations on halyard and install-using-hal pods
+  # annotations:
+  #   iam.amazonaws.com/role: <role_arn>
+
+  ## Uncomment the following resources definitions to control the cpu and memory
+  # resources allocated for the halyard pod
+  resources: {}
+    # requests:
+    #   memory: "1Gi"
+    #   cpu: "100m"
+    # limits:
+    #   memory: "2Gi"
+    #   cpu: "200m"
+
+  ## Uncomment if you want to set environment variables on the Halyard pod.
+  # env:
+  #   - name: JAVA_OPTS
+  #     value: -Dhttp.proxyHost=proxy.example.com
+  customCerts:
+    ## Enable to override the default cacerts with your own one
+    enabled: false
+    secretName: custom-cacerts
+
+# Define which registries and repositories you want available in your
+# Spinnaker pipeline definitions
+# For more info visit:
+#   https://www.spinnaker.io/setup/providers/docker-registry/
+
+# Configure your Docker registries here
+dockerRegistries:
+- name: dockerhub
+  address: index.docker.io
+  repositories:
+    - library/alpine
+    - library/ubuntu
+    - library/centos
+    - library/nginx
+# - name: gcr
+#   address: https://gcr.io
+#   username: _json_key
+#   password: '<INSERT YOUR SERVICE ACCOUNT JSON HERE>'
+#   email: 1234@5678.com
+
+# If you don't want to put your passwords into a values file
+# you can use a pre-created secret instead of putting passwords
+# (specify secret name in below `dockerRegistryAccountSecret`)
+# per account above with data in the format:
+# <name>: <password>
+
+# dockerRegistryAccountSecret: myregistry-secrets
+
+kubeConfig:
+  # Use this when you want to register arbitrary clusters with Spinnaker
+  # Upload your ~/kube/.config to a secret
+  enabled: false
+  secretName: my-kubeconfig
+  secretKey: config
+  # Use this when you want to configure halyard to reference a kubeconfig from s3
+  # This allows you to keep your kubeconfig in an encrypted s3 bucket
+  # For more info visit:
+  #   https://www.spinnaker.io/reference/halyard/secrets/s3-secrets/#secrets-in-s3
+  # encryptedKubeconfig: encrypted:s3!r:us-west-2!b:mybucket!f:mykubeconfig
+  # List of contexts from the kubeconfig to make available to Spinnaker
+  contexts:
+  - default
+  deploymentContext: default
+  omittedNameSpaces:
+  - kube-system
+  - kube-public
+  onlySpinnakerManaged:
+    enabled: false
+
+  # When false, clouddriver will skip the permission checks for all kubernetes kinds at startup.
+  # This can save a great deal of time during clouddriver startup when you have many kubernetes
+  # accounts configured. This disables the log messages at startup about missing permissions.
+  checkPermissionsOnStartup: true
+
+  # A list of resource kinds this Spinnaker account can deploy to and will cache.
+  # When no kinds are configured, this defaults to ‘all kinds'.
+  # kinds:
+  # -
+
+  # A list of resource kinds this Spinnaker account cannot deploy to or cache.
+  # This can only be set when –kinds is empty or not set.
+  # omittedKinds:
+  # -
+
+# Change this if youd like to expose Spinnaker outside the cluster
+ingress:
+  enabled: false
+  # host: spinnaker.example.org
+  # annotations:
+    # ingress.kubernetes.io/ssl-redirect: 'true'
+    # kubernetes.io/ingress.class: nginx
+    # kubernetes.io/tls-acme: "true"
+  # tls:
+  #  - secretName: -tls
+  #    hosts:
+  #      - domain.com
+
+ingressGate:
+  enabled: false
+  # host: gate.spinnaker.example.org
+  # annotations:
+    # ingress.kubernetes.io/ssl-redirect: 'true'
+    # kubernetes.io/ingress.class: nginx
+    # kubernetes.io/tls-acme: "true"
+  # tls:
+  #  - secretName: -tls
+  #    hosts:
+  #      - domain.com
+
+# spinnakerFeatureFlags is a list of Spinnaker feature flags to enable
+# Ref: https://www.spinnaker.io/reference/halyard/commands/#hal-config-features-edit
+# spinnakerFeatureFlags:
+#   - artifacts
+#   - pipeline-templates
+spinnakerFeatureFlags:
+  - artifacts
+  - jobs
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+# nodeSelector to provide to each of the Spinnaker components
+nodeSelector: {}
+
+# Redis password to use for the in-cluster redis service
+# Enable redis to use in-cluster redis
+redis:
+  enabled: true
+  # External Redis option will be enabled if in-cluster redis is disabled
+  external:
+    host: "<EXTERNAL-REDIS-HOST-NAME>"
+    port: 6379
+    # password: ""
+  password: password
+  nodeSelector: {}
+  cluster:
+    enabled: false
+# Uncomment if you don't want to create a PVC for redis
+  master:
+    persistence:
+      enabled: false
+
+# Minio access/secret keys for the in-cluster S3 usage
+# Minio is not exposed publically
+minio:
+  enabled: true
+  imageTag: RELEASE.2019-02-13T19-48-27Z
+  serviceType: ClusterIP
+  accessKey: spinnakeradmin
+  secretKey: spinnakeradmin
+  bucket: "spinnaker"
+  nodeSelector: {}
+# Uncomment if you don't want to create a PVC for minio
+  persistence:
+    enabled: false
+
+# Google Cloud Storage
+gcs:
+  enabled: false
+  project: my-project-name
+  bucket: "<GCS-BUCKET-NAME>"
+  ## if jsonKey is set, will create a secret containing it
+  jsonKey: '<INSERT CLOUD STORAGE JSON HERE>'
+  ## override the name of the secret to use for jsonKey, if `jsonKey`
+  ## is empty, it will not create a secret assuming you are creating one
+  ## external to the chart. the key for that secret should be `key.json`.
+  secretName:
+
+# AWS Simple Storage Service
+s3:
+  enabled: false
+  bucket: "<S3-BUCKET-NAME>"
+  # rootFolder: "front50"
+  # region: "us-east-1"
+  # endpoint: ""
+  # accessKey: ""
+  # secretKey: ""
+  # assumeRole: "<role to assume>"
+
+# Azure Storage Account
+azs:
+  enabled: false
+#   storageAccountName: ""
+#   accessKey: ""
+#   containerName: "spinnaker"
+
+rbac:
+  # Specifies whether RBAC resources should be created
+  create: true
+
+serviceAccount:
+  # Specifies whether a ServiceAccount should be created
+  create: true
+  # The name of the ServiceAccounts to use.
+  # If left blank it is auto-generated from the fullname of the release
+  halyardName:
+  spinnakerName:
+securityContext:
+  # Specifies permissions to write for user/group
+  runAsUser: 1000
+  fsGroup: 1000
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/main.yml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/main.yml
index bd7361f..4995e3d 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/main.yml
@@ -16,15 +16,9 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
+helm_charts_git_url: https://github.com/helm/charts.git
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+spinnaker_service: "spinnaker"
+spinnaker_namespace: "spinnaker"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
index bd7361f..2d8de91 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/offline-deployment.yaml
@@ -17,14 +17,9 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+gcrio_image_repository: "{{ server_fqdn }}"
+helm_charts_git_url: "{{ engine_workspace }}/offline/git/charts"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "http://{{ server_fqdn }}/charts/stable"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/online-deployment.yaml
similarity index 80%
copy from playbooks/roles/package/vars/Debian.yaml
copy to playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/online-deployment.yaml
index bd7361f..51511e6 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/apps/spinnaker/kubespray/playbooks/roles/install/vars/online-deployment.yaml
@@ -17,14 +17,9 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+gcrio_image_repository: "gcr.io"
+helm_charts_git_url: "https://github.com/helm/charts.git"
+local_repo_url: "http://{{ server_fqdn }}/charts/local"
+stable_repo_url: "https://kubernetes-charts.storage.googleapis.com"
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/configure-installer.yml b/playbooks/configure-installer.yml
index fc01f40..a6751c1 100644
--- a/playbooks/configure-installer.yml
+++ b/playbooks/configure-installer.yml
@@ -18,7 +18,6 @@
 # ============LICENSE_END=========================================================
 
 - hosts: localhost
-  connection: local
   gather_facts: true
 
   roles:
diff --git a/playbooks/post-deployment.yml b/playbooks/post-deployment.yml
deleted file mode 100644
index a23944d..0000000
--- a/playbooks/post-deployment.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-# check if any post-deployment task defined for the scenario
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  become: false
-
-  tasks:
-    - name: Check if any post-deployment task defined for '{{ deploy_scenario }}' scenario
-      stat:
-        path: "{{ engine_cache }}/repos/swconfig/{{ scenario[deploy_scenario].installers[installer_type].role }}/tasks/post-deployment.yml"
-      register: post_deployment_yml
-
-# run the scenario post-deployment tasks before running the common post-deployment tasks
-- hosts: all
-  gather_facts: true
-  become: true
-
-  tasks:
-    - name: Execute post-deployment tasks of '{{ deploy_scenario }}' scenario
-      include_role:
-        name: "{{ engine_cache }}/repos/swconfig/{{ scenario[deploy_scenario].installers[installer_type].role }}"
-        tasks_from: post-deployment
-      when: hostvars['localhost'].post_deployment_yml.stat.exists
-
-# run common post-deployment tasks
-# NOTE: The common post-deployment tasks is currently applicable only to
-# simple k8s and openstack scenarios.
-# in future, when statement could be moved to tasks in role if the current
-# tasks become relevant or new tasks are added.
-- hosts: all
-  gather_facts: true
-  become: true
-
-  roles:
-    - role: post-deployment
-      when: "'onap-' not in deploy_scenario"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/dib.yaml b/playbooks/postinstall.yaml
similarity index 72%
rename from playbooks/roles/package/tasks/dib.yaml
rename to playbooks/postinstall.yaml
index 887977b..c9f23f4 100644
--- a/playbooks/roles/package/tasks/dib.yaml
+++ b/playbooks/postinstall.yaml
@@ -17,19 +17,16 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-- name: Create directory to store images used for provisioning
-  file:
-    path: "{{ dib_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
+# run common postinstall tasks
+# NOTE: The common post-deployment tasks is currently applicable only to
+# simple k8s and openstack scenarios.
+# in future, when statement could be moved to tasks in role if the current
+# tasks become relevant or new tasks are added.
+- hosts: all
+  gather_facts: true
+  become: true
 
-- name: Download distro images used for provisioning nodes
-  get_url:
-    url: "{{ item }}"
-    dest: "{{ dib_folder }}"
-    force: true
-  loop: "{{ dib_images }}"
+  roles:
+    - role: postinstall
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/pre-deployment.yml b/playbooks/pre-deployment.yml
deleted file mode 100644
index 44e4b01..0000000
--- a/playbooks/pre-deployment.yml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-# check if any pre-deployment task defined for the scenario
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  become: false
-
-  tasks:
-    - name: Check if any pre-deployment task defined for '{{ deploy_scenario }}' scenario
-      stat:
-        path: "{{ engine_cache }}/repos/swconfig/{{ scenario[deploy_scenario].installers[installer_type].role }}/tasks/pre-deployment.yml"
-      register: pre_deployment_yml
-
-# copy binaries to webserver and load/tag/push images to local registry
-- hosts: jumphost
-  gather_facts: true
-  become: true
-
-  tasks:
-    - name: Prepare kubespray artifacts
-      include_role:
-        name: prepare-kubespray-artifacts
-      when: execution_mode == "offline-deployment"
-
-# run the scenario pre-deployment tasks before running the deployment itself
-- hosts: all
-  gather_facts: true
-  become: true
-
-  tasks:
-    - name: Execute pre-deployment tasks of '{{ deploy_scenario }}' scenario
-      include_role:
-        name: "{{ engine_cache }}/repos/swconfig/{{ scenario[deploy_scenario].installers[installer_type].role }}"
-        tasks_from: pre-deployment
-      when: hostvars['localhost'].pre_deployment_yml.stat.exists
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/vars/Debian.yaml b/playbooks/preinstall.yaml
similarity index 80%
rename from playbooks/roles/package/vars/Debian.yaml
rename to playbooks/preinstall.yaml
index bd7361f..cf10f79 100644
--- a/playbooks/roles/package/vars/Debian.yaml
+++ b/playbooks/preinstall.yaml
@@ -17,14 +17,6 @@
 # SPDX-License-Identifier: Apache-2.0
 # ============LICENSE_END=========================================================
 
-# package names
-packages:
-  - dpkg-dev
-  - docker-ce={{ docker_ce_version }}
-  - docker-ce-cli={{ docker_ce_cli_version }}
-  - containerd.io={{ containerd_io_version }}
-
-# service names
-docker_service_name: docker
+# NOTE (fdegir): left this playbook as placeholder
 
 # vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/defaults/main.yaml b/playbooks/roles/package/defaults/main.yaml
deleted file mode 100644
index d97f2be..0000000
--- a/playbooks/roles/package/defaults/main.yaml
+++ /dev/null
@@ -1,222 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-# locations of the packaged dependencies
-pkg_folder: "{{ offline_pkg_folder }}/pkg"
-dib_folder: "{{ offline_pkg_folder }}/dib"
-git_folder: "{{ offline_pkg_folder }}/git"
-binaries_folder: "{{ offline_pkg_folder }}/binaries"
-containers_folder: "{{ offline_pkg_folder }}/containers"
-pip_folder: "{{ offline_pkg_folder }}/pip"
-
-helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz"
-
-# some images require architecture
-image_arch: amd64
-
-dib_images:
-  - https://artifactory.nordix.org/artifactory/cloud-infra/dib/deployment_image/ubuntu1804/deployment_image.qcow2
-  - https://artifactory.nordix.org/artifactory/cloud-infra/dib/ipa_image/coreos/ipa.initramfs
-  - https://artifactory.nordix.org/artifactory/cloud-infra/dib/ipa_image/coreos/ipa.kernel
-
-k8s_binaries:
-  - calicoctl
-  - cni
-  - kubeadm
-  - kubectl
-  - kubelet
-
-k8s_misc_images:
-  - addon_resizer
-  - calico_cni
-  - calico_node
-  - calico_policy
-  - coredns
-  - dashboard
-  - dnsautoscaler
-  - etcd
-  - flannel
-  - flannel_cni
-  - helm
-  - metrics_server
-  - multus
-  - nginx
-  - nodelocaldns
-  - pod_infra
-  - registry_proxy
-  - tiller
-  - weave_kube
-  - weave_npc
-
-other_images:
-  # ceph
-  ceph:
-    repo: ceph/ceph
-    tag: "{{ ceph_version }}"
-  cephcsi:
-    repo: quay.io/cephcsi/cephcsi
-    tag: "{{ cephcsi_version }}"
-  csi-attacher:
-    repo: quay.io/k8scsi/csi-attacher
-    tag: "{{ csi_attacher_version }}"
-  csi-node-driver-registrar:
-    repo: quay.io/k8scsi/csi-node-driver-registrar
-    tag: "{{ csi_node_driver_registrar_version }}"
-  csi-provisioner:
-    repo: quay.io/k8scsi/csi-provisioner
-    tag: "{{ csi_provisioner_version }}"
-  csi-snapshotter:
-    repo: quay.io/k8scsi/csi-snapshotter
-    tag: "{{ csi_snapshotter_version }}"
-  rook:
-    repo: rook/ceph
-    tag: "{{ rook_version }}"
-  # prometheus
-  prom-alertmanager:
-    repo: docker.io/prom/alertmanager
-    tag: "{{ prom_alertmanager_version }}"
-  prom-node-exporter:
-    repo: docker.io/prom/node-exporter
-    tag: "{{ prom_node_exporter_version }}"
-  prom-prometheus:
-    repo: docker.io/prom/prometheus
-    tag: "{{ prom_prometheus_version }}"
-  prom-pushgateway:
-    repo: docker.io/prom/pushgateway
-    tag: "{{ prom_push_gateway_version }}"
-  # docker
-  docker-registry:
-    repo: docker.io/registry
-    tag: "{{ docker_registry_version }}"
-  # other - we don't know where this comes from
-  configmap-reload:
-    repo: docker.io/jimmidyson/configmap-reload
-    tag: "{{ configmap_reload_version }}"
-  kube-state-metrics:
-    repo: quay.io/coreos/kube-state-metrics
-    tag: "{{ kube_state_metrics_version }}"
-
-repositories:
-  # NOTE (fdegir): OpenDev Git Repositories - Using Nordix Mirrors
-  bifrost:
-    repo: "https://gerrit.nordix.org/opendev/openstack/bifrost"
-    dest: "bifrost"
-    version: "{{ bifrost_version }}"
-  diskimage-builder:
-    repo: "https://gerrit.nordix.org/opendev/openstack/diskimage-builder"
-    dest: "diskimage-builder"
-    version: "{{ diskimage_builder_version }}"
-  ironic:
-    repo: "https://gerrit.nordix.org/opendev/openstack/ironic"
-    dest: "ironic"
-    version: "{{ ironic_version }}"
-  ironic-inspector:
-    repo: "https://gerrit.nordix.org/opendev/openstack/ironic-inspector"
-    dest: "ironic-inspector"
-    version: "{{ ironic_inspector_version }}"
-  ironic-python-agent:
-    repo: "https://gerrit.nordix.org/opendev/openstack/ironic-python-agent"
-    dest: "ironic-python-agent"
-    version: "{{ ironic_python_agent_version }}"
-  ironic-python-agent-builder:
-    repo: "https://gerrit.nordix.org/opendev/openstack/ironic-python-agent-builder"
-    dest: "ironic-python-agent-builder"
-    version: "{{ ironic_python_agent_builder_version }}"
-  ironic-staging-drivers:
-    repo: "https://gerrit.nordix.org/opendev/x/ironic-staging-drivers"
-    dest: "ironic-staging-drivers"
-    version: "{{ ironic_staging_drivers_version }}"
-  keystone:
-    repo: "https://gerrit.nordix.org/opendev/openstack/keystone"
-    dest: "keystone"
-    version: "{{ keystone_version }}"
-  openstacksdk:
-    repo: "https://gerrit.nordix.org/opendev/openstack/openstacksdk"
-    dest: "openstacksdk"
-    version: "{{ openstacksdk_version }}"
-  python-ironicclient:
-    repo: "https://gerrit.nordix.org/opendev/openstack/python-ironicclient"
-    dest: "python-ironicclient"
-    version: "{{ python_ironicclient_version }}"
-  python-ironic-inspector-client:
-    repo: "https://gerrit.nordix.org/opendev/openstack/python-ironic-inspector-client"
-    dest: "python-ironic-inspector-client"
-    version: "{{ python_ironic_inspector_client_version }}"
-  requirements:
-    repo: "https://gerrit.nordix.org/opendev/openstack/requirements"
-    dest: "requirements"
-    version: "{{ requirements_version }}"
-  shade:
-    repo: "https://gerrit.nordix.org/opendev/openstack/shade"
-    dest: "shade"
-    version: "{{ shade_version }}"
-  sushy:
-    repo: "https://gerrit.nordix.org/opendev/openstack/sushy"
-    dest: "sushy"
-    version: "{{ sushy_version }}"
-  # NOTE (fdegir): Kubespray and Helm Git Repositories
-  charts:
-    repo: "https://github.com/helm/charts.git"
-    dest: "charts"
-    version: "{{ charts_version }}"
-  kubespray:
-    repo: "https://github.com/kubernetes-sigs/kubespray.git"
-    dest: "kubespray"
-    version: "{{ kubespray_version }}"
-  # NOTE (fdegir): Nordix Git Repositories
-  engine:
-    repo: "https://gerrit.nordix.org/infra/engine.git"
-    dest: "engine"
-    version: "{{ lookup('env', 'NORDIX_ENGINE_VERSION') | default('master', true) }}"
-    refspec: "{{ lookup('env', 'NORDIX_ENGINE_REFSPEC') | default(omit) }}"
-  hwconfig:
-    repo: "https://gerrit.nordix.org/infra/hwconfig.git"
-    dest: "hwconfig"
-    version: "{{ lookup('env', 'NORDIX_HWCONFIG_VERSION') | default('master', true) }}"
-    refspec: "{{ lookup('env', 'NORDIX_HWCONFIG_REFSPEC') | default(omit) }}"
-  swconfig:
-    repo: "https://gerrit.nordix.org/infra/swconfig.git"
-    dest: "swconfig"
-    version: "{{ lookup('env', 'NORDIX_SWCONFIG_VERSION') | default('master', true) }}"
-    refspec: "{{ lookup('env', 'NORDIX_SWCONFIG_REFSPEC') | default(omit) }}"
-  test:
-    repo: "https://gerrit.nordix.org/infra/test.git"
-    dest: "test"
-    version: "{{ lookup('env', 'NORDIX_TEST_VERSION') | default('master', true) }}"
-    refspec: "{{ lookup('env', 'NORDIX_TEST_REFSPEC') | default(omit) }}"
-  # NOTE (fdegir): Engine provisioner and installer repos will be prepended with engine
-  # in order to prevent name clashes during packaging
-  engine-bifrost:
-    repo: "https://gerrit.nordix.org/infra/provisioner/bifrost.git"
-    dest: "engine-bifrost"
-    version: "{{ lookup('env', 'NORDIX_BIFROST_VERSION') | default(provisioner['bifrost'].version, true) }}"
-    refspec: "{{ provisioner['bifrost'].refspec | default(omit) }}"
-  engine-heat:
-    repo: "https://gerrit.nordix.org/infra/provisioner/heat.git"
-    dest: "engine-heat"
-    version: "{{ lookup('env', 'NORDIX_HEAT_VERSION') | default(provisioner['heat'].version, true) }}"
-    refspec: "{{ provisioner['heat'].refspec | default(omit) }}"
-  engine-kubespray:
-    repo: "https://gerrit.nordix.org/infra/installer/kubespray.git"
-    dest: "engine-kubespray"
-    version: "{{ lookup('env', 'NORDIX_KUBESPRAY_VERSION') | default(installer['kubespray'].version, true) }}"
-    refspec: "{{ installer['kubespray'].refspec | default(omit) }}"
-
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/files/build.sh b/playbooks/roles/package/files/build.sh
deleted file mode 100755
index 728bd4a..0000000
--- a/playbooks/roles/package/files/build.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-export OFFLINE_PKG_FOLDER="${OFFLINE_PKG_FOLDER:-/tmp/offline-package}"
-export OFFLINE_PKG_FILE="${OFFLINE_PKG_FILE:-/tmp/offline-package.tgz}"
-
-# NOTE (fdegir): In order to package and test the change for offline deployment,
-# we need to include the change/patch within the package since that is what should
-# be used during the deployment phase.
-# check if we are running as part of CI verify job
-GERRIT_PROJECT="${GERRIT_PROJECT:-}"
-if [[ "$GERRIT_PROJECT" == "infra/engine" ]]; then
-  REPO_GIT_URL="https://gerrit.nordix.org/infra/engine.git"
-  echo "Info  : Running in CI - infra/engine patch will be packaged for testing."
-  echo "        Checking out the change/patch $GERRIT_REFSPEC for $REPO_GIT_URL"
-  # navigate to the folder and checkout the patch
-  cd "$OFFLINE_PKG_FOLDER/git/engine"
-  git fetch "$REPO_GIT_URL" "$GERRIT_REFSPEC" && git checkout FETCH_HEAD
-fi
-
-# compress & archive offline dependencies
-tar -C "$OFFLINE_PKG_FOLDER" -czf "$OFFLINE_PKG_FILE" .
-
-#removing intermediate files as when ONAP included size becomes HUGE
-rm -fr "$OFFLINE_PKG_FOLDER"
-
-# create self extracting installer
-cat /tmp/decompress.sh "$OFFLINE_PKG_FILE" > "$OFFLINE_INSTALLER_FILE"
-chmod +x "$OFFLINE_INSTALLER_FILE"
-
-#removing intermediate files as when ONAP included size becomes HUGE
-rm "$OFFLINE_PKG_FILE"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/files/decompress.sh b/playbooks/roles/package/files/decompress.sh
deleted file mode 100755
index 25c7570..0000000
--- a/playbooks/roles/package/files/decompress.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-cat <<EOF
-#---------------------------------------------------#
-#             Self Extracting Installer             #
-#---------------------------------------------------#
-User            : $USER
-Hostname        : $HOSTNAME
-Host OS         : $(source /etc/os-release &> /dev/null || source /usr/lib/os-release &> /dev/null; echo "${PRETTY_NAME}")
-IP              : $(hostname -I | cut -d' ' -f1)
-#---------------------------------------------------#
-Info  : Please wait while extracting dependencies.
-        This might take a while.
-#---------------------------------------------------#
-EOF
-
-ENGINE_WORKSPACE=/opt/engine
-DESTINATION_FOLDER="$ENGINE_WORKSPACE/offline"
-export ENGINE_WORKSPACE DESTINATION_FOLDER
-
-# NOTE (fdegir): we need to clean things up in order to prevent side effects from leftovers
-sudo rm -rf "$ENGINE_WORKSPACE"
-sudo mkdir -p "$DESTINATION_FOLDER"
-sudo chown -R "$USER":"$USER" "$ENGINE_WORKSPACE"
-
-ARCHIVE=$(awk '/^__ARCHIVE_BELOW__/ {print NR + 1; exit 0; }' "$0")
-
-tail -n+"$ARCHIVE" "$0" | tar -xz -C "$DESTINATION_FOLDER"
-
-cd "$DESTINATION_FOLDER"
-./install.sh
-
-exit 0
-__ARCHIVE_BELOW__
diff --git a/playbooks/roles/package/files/install.sh b/playbooks/roles/package/files/install.sh
deleted file mode 100755
index a5c1ccb..0000000
--- a/playbooks/roles/package/files/install.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# TODO (fdegir): This script could be enhanced to provide full installation functionality
-# by parsing arguments and executing actual engine deploy.sh with the arguments but left for later
-echo "Info  : Dependencies are extracted to $DESTINATION_FOLDER"
-echo "Info  : Please navigate to $DESTINATION_FOLDER/git/engine/engine folder and issue deployment command"
-echo "        You can get help about the engine usage by issuing command ./deploy.sh -h"
-echo "        Do not forget to specify PDF and IDF file locations using -p and -i arguments!"
-echo "Info  : Done!"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/binaries.yaml b/playbooks/roles/package/tasks/binaries.yaml
deleted file mode 100644
index f90ac01..0000000
--- a/playbooks/roles/package/tasks/binaries.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Create directory to store k8s binaries
-  file:
-    path: "{{ binaries_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Set versions of k8s components in Kubespray downloads role
-  lineinfile:
-    path: "{{ git_folder }}/kubespray/roles/download/defaults/main.yml"
-    regexp: "{{ item.regexp }}"
-    line: "{{ item.line }}"
-  with_items:
-    - {regexp: "^kube_version:.*", line: "kube_version: {{ kubernetes_version }}"}
-    - {regexp: "^helm_version:.*", line: "helm_version: {{ helm_version }}"}
-
-# NOTE (fdegir): order of vars files is significant
-- name: Include kubespray vars files
-  include_vars: "{{ item }}"
-  with_items:
-    - "{{ git_folder }}/kubespray/roles/kubespray-defaults/defaults/main.yaml"
-    - "{{ git_folder }}/kubespray/roles/download/defaults/main.yml"
-
-- name: Download k8s binaries
-  get_url:
-    url: "{{ downloads[item].url }}"
-    dest: "{{ binaries_folder }}/{{ item }}"
-    mode: 0755
-    force: true
-  loop: "{{ k8s_binaries }}"
-
-- name: Download helm binary
-  get_url:
-    url: "{{ helm_download_url }}"
-    dest: "{{ binaries_folder }}"
-    mode: 0755
-    force: true
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/containers.yaml b/playbooks/roles/package/tasks/containers.yaml
deleted file mode 100644
index 47e5efd..0000000
--- a/playbooks/roles/package/tasks/containers.yaml
+++ /dev/null
@@ -1,151 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Create directory to store container images
-  file:
-    path: "{{ containers_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Set versions of k8s components in Kubespray downloads role
-  lineinfile:
-    path: "{{ git_folder }}/kubespray/roles/download/defaults/main.yml"
-    regexp: "{{ item.regexp }}"
-    line: "{{ item.line }}"
-  with_items:
-    - {regexp: "^kube_version:.*", line: "kube_version: {{ kubernetes_version }}"}
-    - {regexp: "^helm_version:.*", line: "helm_version: {{ helm_version }}"}
-
-# NOTE (fdegir): order of vars files is significant
-- name: Include kubespray vars files
-  include_vars: "{{ item }}"
-  with_items:
-    - "{{ git_folder }}/kubespray/roles/kubespray-defaults/defaults/main.yaml"
-    - "{{ git_folder }}/kubespray/roles/download/defaults/main.yml"
-
-- name: Remove outdated kubeadm-images.yaml file
-  file:
-    path: "/tmp/kubeadm-images.yaml"
-    state: absent
-
-- name: Generate kubeadm-images.yaml to fetch container images
-  template:
-    src: "kubeadm-images.yaml.j2"
-    dest: "/tmp/kubeadm-images.yaml"
-    force: true
-
-# NOTE (fdegir): the tasks below are taken from kubespray/roles/download/tasks/prep_kubeadm_images.yml
-- name: Get list of kubeadm images
-  shell: |
-    set -o pipefail
-    {{ binaries_folder }}/kubeadm config images list --config=/tmp/kubeadm-images.yaml | grep -v coredns
-  args:
-    executable: /bin/bash
-  register: kubeadm_images_raw
-  run_once: true
-  changed_when: false
-
-- name: Parse list of kubeadm images
-  vars:
-    kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
-  set_fact:
-    kubeadm_image:
-      key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*','')).split(':')[0] }}"
-      value:
-        enabled: true
-        container: true
-        repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}"
-        tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}"
-  loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
-  register: kubeadm_images_cooked
-  run_once: true
-
-- name: Convert list of kubeadm images to dict
-  set_fact:
-    kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
-  run_once: true
-
-# NOTE (fdegir): docker_image module doesn't seem to respect become so falling back to command module
-- name: Pull kubeadm container images
-  command: "docker pull {{ kubeadm_images[item.key].repo }}:{{ kubeadm_images[item.key].tag }}"
-  with_dict: "{{ kubeadm_images }}"
-  become: true
-  changed_when: false
-
-- name: Pull misc container images
-  command: "docker pull {{ downloads[item].repo }}:{{ downloads[item].tag }}"
-  loop: "{{ k8s_misc_images }}"
-  become: true
-  changed_when: false
-
-- name: combine ONAP containers if any
-  set_fact:
-    other_images: "{{ other_images | combine({item.key: item.value}) }}"
-  with_dict: "{{ onap_images | default ({}) }}"
-
-- name: Pull other container images
-  command: "docker pull {{ other_images[item.key].repo }}:{{ other_images[item.key].tag }}"
-  with_dict: "{{ other_images }}"
-  become: true
-  changed_when: false
-
-
-# save container images
-- name: Save kubeadm container images
-  command: |-
-    docker save {{ kubeadm_images[item.key].repo }}:{{ kubeadm_images[item.key].tag }}
-    -o {{ kubeadm_images[item.key].repo | replace('/', '_') }}_{{ kubeadm_images[item.key].tag }}.tar
-  with_dict: "{{ kubeadm_images }}"
-  args:
-    chdir: "{{ containers_folder }}"
-  become: true
-  changed_when: false
-
-- name: Save misc container images
-  command: |-
-    docker save {{ downloads[item].repo }}:{{ downloads[item].tag }}
-    -o {{ downloads[item].repo }} -o {{ downloads[item].repo | replace('/', '_') }}_{{ downloads[item].tag }}.tar
-  loop: "{{ k8s_misc_images }}"
-  args:
-    chdir: "{{ containers_folder }}"
-  become: true
-  changed_when: false
-
-- name: Save other container images
-  command: |-
-    docker save {{ other_images[item.key].repo }}:{{ other_images[item.key].tag }}
-    -o {{ other_images[item.key].repo | replace('/', '_') }}_{{ other_images[item.key].tag }}.tar
-  with_dict: "{{ other_images }}"
-  args:
-    chdir: "{{ containers_folder }}"
-  become: true
-  changed_when: false
-
-# NOTE (fdegir): archive fails due to wrong permissions so we fix them
-- name: Fix container image permissions
-  file:
-    path: "{{ containers_folder }}"
-    state: directory
-    recurse: true
-    mode: 0755
-  become: true
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/fetch-component-vars.yaml b/playbooks/roles/package/tasks/fetch-component-vars.yaml
deleted file mode 100644
index b1a4fc7..0000000
--- a/playbooks/roles/package/tasks/fetch-component-vars.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Clone '{{ component.key }}' repository and checkout version '{{ component.value.version }}'
-  git:
-    repo: "{{ component.value.src }}"
-    dest: "{{ tempdir.path }}/{{ component.key }}"
-    version: "{{ component.value.version }}"
-    refspec: "{{ component.value.refspec | default(omit) }}"
-    force: true
-  environment:
-    http_proxy: "{{ lookup('env','http_proxy') }}"
-    https_proxy: "{{ lookup('env','https_proxy') }}"
-    no_proxy: "{{ lookup('env','no_proxy') }}"
-
-- name: Ensure outdated '{{ component.key }}' vars file is removed
-  file:
-    path: "{{ inventory_path }}/group_vars/all/{{ component.key }}.yaml"
-  ignore_errors: true
-
-- name: Copy '{{ component.key }}' vars file to engine group_vars
-  copy:
-    src: "{{ tempdir.path }}/{{ component.key }}/vars/{{ component.key }}.yaml"
-    dest: "{{ inventory_path }}/group_vars/all/{{ component.key }}.yaml"
-    force: true
-
-- name: Include '{{ component.key }}' vars
-  include_vars: "{{ inventory_path }}/group_vars/all/{{ component.key }}.yaml"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/git.yaml b/playbooks/roles/package/tasks/git.yaml
deleted file mode 100644
index 72d1b87..0000000
--- a/playbooks/roles/package/tasks/git.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Create directory to store git repositories
-  file:
-    path: "{{ git_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: combine ONAP repos if any
-  set_fact:
-    repositories: "{{ repositories | combine({item.key: item.value}) }}"
-  with_dict: "{{ onap_repositories | default ({}) }}"
-
-- name: Clone repositories
-  git:
-    repo: "{{ repositories[item.key].repo }}"
-    dest: "{{ git_folder }}/{{ repositories[item.key].dest }}"
-    version: "{{ repositories[item.key].version }}"
-    refspec: "{{ repositories[item.key].refspec | default(omit) }}"
-    force: true
-  with_dict: "{{ repositories | default({}) }}"
-  environment:
-    http_proxy: "{{ lookup('env','http_proxy') }}"
-    https_proxy: "{{ lookup('env','https_proxy') }}"
-    no_proxy: "{{ lookup('env','no_proxy') }}"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/install-packages-Debian.yml b/playbooks/roles/package/tasks/install-packages-Debian.yml
deleted file mode 100644
index b69f4bd..0000000
--- a/playbooks/roles/package/tasks/install-packages-Debian.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Load distribution variables
-  include_vars: '{{ ansible_os_family }}.yaml'
-
-- name: Add docker apt key
-  apt_key:
-    url: https://download.docker.com/linux/ubuntu/gpg
-    state: present
-  become: true
-
-# NOTE(fdegir): ansible apt_repository gives segmentation fault so failling back to command
-- name: Add docker apt repository
-  command: |-
-    add-apt-repository \
-    "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
-  changed_when: false
-  become: true
-
-- name: Run apt update
-  apt:
-    update_cache: true
-  become: true
-
-- name: Install packages
-  apt:
-    name: "{{ packages }}"
-    state: "{{ item }}"
-    force: true
-    install_recommends: true
-    autoremove: true
-    update_cache: true
-  with_items:
-    - absent
-    - present
-  become: true
-
-- name: Restart docker service
-  service:
-    name: "{{ docker_service_name }}"
-    state: restarted
-  become: true
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/main.yaml b/playbooks/roles/package/tasks/main.yaml
deleted file mode 100644
index c7ee75e..0000000
--- a/playbooks/roles/package/tasks/main.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Create temporary folder to clone provisioner and installer repositories
-  tempfile:
-    state: directory
-  register: tempdir
-
-- name: Fetch vars files of provisioners and installers
-  include_tasks: fetch-component-vars.yaml
-  with_dict:
-    - "{{ provisioner }}"
-    - "{{ installer }}"
-  loop_control:
-    loop_var: component
-
-- name: Delete temporary folder
-  file:
-    path: "{{ tempdir.path }}"
-    state: absent
-
-- name: Create folder to store dependencies for offline deployment
-  file:
-    path: "{{ offline_pkg_folder }}"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-- name: Load ONAP specific variables
-  include_vars: "onap-offlinepackagevars-{{ lookup('env', 'NORDIX_ONAP_VERSION') | default('6.0.0', true) }}.yaml"
-
-- name: Install packages on {{ ansible_os_family }}
-  include_tasks: "install-packages-{{ ansible_os_family }}.yml"
-
-# TODO (fdegir): we need to switch to build with dib here
-- name: Fetch operating system images for provisioning
-  include_tasks: dib.yaml
-
-# collect apt packages
-- name: Fetch operating system packages
-  include_tasks: "pkg-{{ ansible_os_family }}.yaml"
-
-# clone git repositories
-- name: Fetch git repositories
-  include_tasks: git.yaml
-
-# download binaries
-- name: Fetch binaries
-  include_tasks: binaries.yaml
-
-# download pip packages
-- name: Fetch pip python packages
-  include_tasks: pip.yaml
-
-# fetch k8s container images
-- name: Fetch container images
-  include_tasks: containers.yaml
-
-# fetch  artifactory items
-- name: Fetch artifactory items
-  include_tasks: artifactory.yaml
-
-# ensure we don't have leftovers
-- name: Delete outdated files
-  file:
-    path: "{{ item }}"
-    state: absent
-  with_items:
-    - "{{ offline_pkg_file }}"
-    - "{{ offline_pkg_folder }}/install.sh"
-    - "{{ offline_installer_file }}"
-    - "/tmp/decompress.sh"
-
-- name: Copy decompress and install scripts
-  copy:
-    src: "{{ item.src }}"
-    dest: "{{ item.dest }}"
-    mode: 0755
-  with_items:
-    - {src: "install.sh", dest: "{{ offline_pkg_folder }}/install.sh"}
-    - {src: "decompress.sh", dest: "/tmp/decompress.sh"}
-
-# create tarball
-- name: Create engine installer file
-  script: build.sh
-  register: build_script
-
-- name: Log build script output to console
-  debug:
-    msg: "{{ build_script.stdout_lines }}"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/tasks/pkg-Debian.yaml b/playbooks/roles/package/tasks/pkg-Debian.yaml
deleted file mode 100644
index f844bf9..0000000
--- a/playbooks/roles/package/tasks/pkg-Debian.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Create directory to store apt packages
-  file:
-    path: "{{ pkg_folder }}/amd64"
-    state: "{{ item }}"
-  with_items:
-    - absent
-    - directory
-
-# NOTE (fdegir): docker version is taken and updated from engine versions file
-- name: Generate ubuntu.list file from template
-  template:
-    src: ubuntu.list.j2
-    dest: /tmp/ubuntu.list
-    force: true
-
-- name: Download apt packages using ubuntu.list file
-  shell: |
-    set -o pipefail
-    apt download $(grep -vE "^\s*#" /tmp/ubuntu.list | tr "\n" " ")
-  changed_when: false
-  args:
-    executable: /bin/bash
-    chdir: "{{ pkg_folder }}/amd64"
-
-- name: Generate Packages.gz file for apt packages
-  shell: |
-    set -o pipefail
-    dpkg-scanpackages amd64 | gzip -9c > amd64/Packages.gz
-  args:
-    executable: /bin/bash
-    creates: "{{ pkg_folder }}/amd64/Packages.gz"
-    chdir: "{{ pkg_folder }}"
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/package/templates/kubeadm-images.yaml.j2 b/playbooks/roles/package/templates/kubeadm-images.yaml.j2
deleted file mode 100644
index cc4f212..0000000
--- a/playbooks/roles/package/templates/kubeadm-images.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: kubeadm.k8s.io/v1beta1
-kind: InitConfiguration
-nodeRegistration:
-  criSocket: {{ cri_socket }}
----
-apiVersion: kubeadm.k8s.io/v1beta1
-kind: ClusterConfiguration
-imageRepository: {{ kube_image_repo }}
-kubernetesVersion: {{ kube_version }}
-dns:
-  type: CoreDNS
-  imageRepository: {{ coredns_image_repo | regex_replace('/coredns$','') }}
-  imageTag: {{ coredns_image_tag }}
diff --git a/playbooks/roles/package/templates/pip.conf.j2 b/playbooks/roles/package/templates/pip.conf.j2
deleted file mode 100644
index 9ab40ef..0000000
--- a/playbooks/roles/package/templates/pip.conf.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-[global]
-timeout=10
-find-links={{ engine_workspace }}/offline/pip
-no-index=yes
diff --git a/playbooks/roles/package/templates/ubuntu.list.j2 b/playbooks/roles/package/templates/ubuntu.list.j2
deleted file mode 100644
index 7003921..0000000
--- a/playbooks/roles/package/templates/ubuntu.list.j2
+++ /dev/null
@@ -1,859 +0,0 @@
-accountsservice
-acl
-acpid
-adduser
-amd64-microcode
-apparmor
-apport
-apport-symptoms
-apt
-apt-cacher-ng
-apt-transport-https
-apt-utils
-at
-aufs-tools
-augeas-lenses
-base-files
-base-passwd
-bash
-bash-completion
-bc
-bcache-tools
-bind9-host
-binutils
-binutils-common:amd64
-binutils-x86-64-linux-gnu
-bridge-utils
-bsdmainutils
-bsdutils
-btrfs-progs
-btrfs-tools
-build-essential
-busybox
-busybox-initramfs
-busybox-static
-byobu
-bzip2
-ca-certificates
-chrony
-cloud-guest-utils
-cloud-image-utils
-cloud-init
-cloud-initramfs-copymods
-cloud-initramfs-dyn-netconf
-cloud-utils
-command-not-found
-command-not-found-data
-conntrack
-console-setup
-console-setup-linux
-coreutils
-cpio
-cpp
-cpp-7
-cpu-checker
-crda
-cron
-cryptsetup
-cryptsetup-bin
-curl
-dash
-dbus
-dconf-gsettings-backend:amd64
-dconf-service
-debconf
-debconf-i18n
-debianutils
-debootstrap
-dh-python
-diffutils
-dirmngr
-distro-info-data
-dmeventd
-dmidecode
-dmsetup
-dnsmasq
-dnsmasq-base
-dns-root-data
-dnsutils
-dosfstools
-dpkg
-dpkg-dev
-e2fsprogs
-eatmydata
-ebtables
-ed
-efibootmgr
-eject
-ethtool
-fakeroot
-fdisk
-file
-findutils
-fontconfig
-fontconfig-config
-fonts-dejavu-core
-fonts-ubuntu-console
-freeipmi-common
-friendly-recovery
-ftp
-fuse
-g++
-g++-7
-galera-3
-gawk
-gcc
-gcc-7
-gcc-7-base:amd64
-gcc-8-base:amd64
-gdisk
-genisoimage
-geoip-database
-gettext-base
-gir1.2-glib-2.0:amd64
-gir1.2-harfbuzz-0.0:amd64
-git
-git-man
-glib-networking:amd64
-glib-networking-common
-glib-networking-services
-gnupg
-gnupg-agent
-gnupg-l10n
-gnupg-utils
-gpg
-gpg-agent
-gpgconf
-gpgsm
-gpgv
-gpg-wks-client
-gpg-wks-server
-grep
-groff-base
-grub2-common
-grub-common
-grub-efi-amd64
-grub-efi-amd64-bin
-grub-efi-amd64-signed
-grub-gfxpayload-lists
-grub-ipxe
-grub-legacy-ec2
-grub-pc
-grub-pc-bin
-gsettings-desktop-schemas
-gstreamer1.0-plugins-base:amd64
-gstreamer1.0-plugins-good:amd64
-gstreamer1.0-x:amd64
-gzip
-hdparm
-hostname
-htop
-ibverbs-providers:amd64
-icu-devtools
-ifupdown
-info
-init
-initramfs-tools
-initramfs-tools-bin
-initramfs-tools-core
-init-system-helpers
-install-info
-intel-microcode
-ipmitool
-iproute2
-ipset
-iptables
-iputils-ping
-iputils-tracepath
-ipvsadm
-ipxe
-ipxe-qemu
-ipxe-qemu-256k-compat-efi-roms
-irqbalance
-isc-dhcp-client
-isc-dhcp-common
-iso-codes
-iucode-tool
-iw
-javascript-common
-kbd
-keyboard-configuration
-klibc-utils
-kmod
-kpartx
-krb5-locales
-landscape-common
-language-pack-en
-language-pack-en-base
-language-selector-common
-less
-libaa1:amd64
-libaccountsservice0:amd64
-libacl1:amd64
-libaio1:amd64
-libalgorithm-diff-perl
-libalgorithm-diff-xs-perl
-libalgorithm-merge-perl
-libapparmor1:amd64
-libapt-inst2.0:amd64
-libapt-pkg5.0:amd64
-libargon2-0:amd64
-libasan4:amd64
-libasn1-8-heimdal:amd64
-libasound2:amd64
-libasound2-data
-libassuan0:amd64
-libasyncns0:amd64
-libatm1:amd64
-libatomic1:amd64
-libattr1:amd64
-libaudit1:amd64
-libaudit-common
-libaugeas0:amd64
-libavahi-client3:amd64
-libavahi-common3:amd64
-libavahi-common-data:amd64
-libavc1394-0:amd64
-libbind9-160:amd64
-libbinutils:amd64
-libblkid1:amd64
-libbluetooth3:amd64
-libbrlapi0.6:amd64
-libbsd0:amd64
-libbz2-1.0:amd64
-libc6:amd64
-libc6-dev:amd64
-libcaca0:amd64
-libcacard0:amd64
-libcairo2:amd64
-libcairo-gobject2:amd64
-libcap2:amd64
-libcap2-bin
-libcap-ng0:amd64
-libc-bin
-libcc1-0:amd64
-libc-dev-bin
-libcdparanoia0:amd64
-libcgi-fast-perl
-libcgi-pm-perl
-libcilkrts5:amd64
-libcom-err2:amd64
-libconfig-inifiles-perl
-libcryptsetup12:amd64
-libcurl3-gnutls:amd64
-libcurl4:amd64
-libdatrie1:amd64
-libdb5.3:amd64
-libdbd-mysql-perl
-libdbi-perl
-libdbus-1-3:amd64
-libdconf1:amd64
-libdebconfclient0:amd64
-libdevmapper1.02.1:amd64
-libdevmapper-event1.02.1:amd64
-libdns1100:amd64
-libdns-export1100
-libdpkg-perl
-libdrm2:amd64
-libdrm-common
-libdumbnet1:amd64
-libdv4:amd64
-libeatmydata1:amd64
-libedit2:amd64
-libefiboot1:amd64
-libefivar1:amd64
-libelf1:amd64
-libencode-locale-perl
-liberror-perl
-libestr0:amd64
-libevent-2.1-6:amd64
-libexpat1:amd64
-libexpat1-dev:amd64
-libext2fs2:amd64
-libfakeroot:amd64
-libfastjson4:amd64
-libfcgi-perl
-libfdisk1:amd64
-libfdt1:amd64
-libffi6:amd64
-libffi-dev:amd64
-libfile-copy-recursive-perl
-libfile-fcntllock-perl
-libflac8:amd64
-libfontconfig1:amd64
-libfreeipmi16
-libfreetype6:amd64
-libfribidi0:amd64
-libfuse2:amd64
-libgcc1:amd64
-libgcc-7-dev:amd64
-libgcrypt20:amd64
-libgd3:amd64
-libgdbm5:amd64
-libgdbm-compat4:amd64
-libgdk-pixbuf2.0-0:amd64
-libgdk-pixbuf2.0-bin
-libgdk-pixbuf2.0-common
-libgeoip1:amd64
-libgirepository-1.0-1:amd64
-libglib2.0-0:amd64
-libglib2.0-bin
-libglib2.0-data
-libglib2.0-dev:amd64
-libglib2.0-dev-bin
-libgmp10:amd64
-libgnutls30:amd64
-libgomp1:amd64
-libgpg-error0:amd64
-libgpm2:amd64
-libgraphite2-3:amd64
-libgraphite2-dev:amd64
-libgssapi3-heimdal:amd64
-libgssapi-krb5-2:amd64
-libgstreamer1.0-0:amd64
-libgstreamer-plugins-base1.0-0:amd64
-libgstreamer-plugins-good1.0-0:amd64
-libgudev-1.0-0:amd64
-libharfbuzz0b:amd64
-libharfbuzz-dev:amd64
-libharfbuzz-gobject0:amd64
-libharfbuzz-icu0:amd64
-libhcrypto4-heimdal:amd64
-libheimbase1-heimdal:amd64
-libheimntlm0-heimdal:amd64
-libhogweed4:amd64
-libhtml-parser-perl
-libhtml-tagset-perl
-libhtml-template-perl
-libhttp-date-perl
-libhttp-message-perl
-libhx509-5-heimdal:amd64
-libibverbs1:amd64
-libicu60:amd64
-libicu-dev
-libicu-le-hb0:amd64
-libicu-le-hb-dev:amd64
-libiculx60:amd64
-libidn11:amd64
-libidn2-0:amd64
-libiec61883-0:amd64
-libio-html-perl
-libip4tc0:amd64
-libip6tc0:amd64
-libipset3:amd64
-libiptc0:amd64
-libirs160:amd64
-libisc169:amd64
-libisccc160:amd64
-libisccfg160:amd64
-libisc-export169:amd64
-libiscsi7:amd64
-libisl19:amd64
-libisns0:amd64
-libitm1:amd64
-libjack-jackd2-0:amd64
-libjbig0:amd64
-libjemalloc1
-libjpeg8:amd64
-libjpeg-turbo8:amd64
-libjs-jquery
-libjson-c3:amd64
-libjs-sphinxdoc
-libjs-underscore
-libk5crypto3:amd64
-libkeyutils1:amd64
-libklibc
-libkmod2:amd64
-libkrb5-26-heimdal:amd64
-libkrb5-3:amd64
-libkrb5support0:amd64
-libksba8:amd64
-libldap-2.4-2:amd64
-libldap-common
-liblocale-gettext-perl
-liblsan0:amd64
-libltdl7
-liblvm2app2.2:amd64
-liblvm2cmd2.02:amd64
-liblwp-mediatypes-perl
-liblwres160:amd64
-liblxc1
-liblxc-common
-liblz4-1:amd64
-liblzma5:amd64
-liblzo2-2:amd64
-libmagic1:amd64
-libmagic-mgc
-libmnl0:amd64
-libmount1:amd64
-libmp3lame0:amd64
-libmpc3:amd64
-libmpdec2:amd64
-libmpfr6:amd64
-libmpg123-0:amd64
-libmpx2:amd64
-libmspack0:amd64
-libmysqlclient20:amd64
-libncurses5:amd64
-libncursesw5:amd64
-libnetcf1:amd64
-libnetfilter-conntrack3:amd64
-libnettle6:amd64
-libnewt0.52:amd64
-libnfnetlink0:amd64
-libnghttp2-14:amd64
-libnginx-mod-http-geoip
-libnginx-mod-http-image-filter
-libnginx-mod-http-xslt-filter
-libnginx-mod-mail
-libnginx-mod-stream
-libnih1:amd64
-libnl-3-200:amd64
-libnl-genl-3-200:amd64
-libnl-route-3-200:amd64
-libnorm1:amd64
-libnpth0:amd64
-libnspr4:amd64
-libnss3:amd64
-libnss-systemd:amd64
-libntfs-3g88
-libnuma1:amd64
-libogg0:amd64
-libopenipmi0
-libopus0:amd64
-liborc-0.4-0:amd64
-libp11-kit0:amd64
-libpam0g:amd64
-libpam-cap:amd64
-libpam-modules:amd64
-libpam-modules-bin
-libpam-runtime
-libpam-systemd:amd64
-libpango-1.0-0:amd64
-libpangocairo-1.0-0:amd64
-libpangoft2-1.0-0:amd64
-libparted2:amd64
-libpcap0.8:amd64
-libpci3:amd64
-libpciaccess0:amd64
-libpcre16-3:amd64
-libpcre32-3:amd64
-libpcre3:amd64
-libpcre3-dev:amd64
-libpcrecpp0v5:amd64
-libperl5.26:amd64
-libpgm-5.2-0:amd64
-libpipeline1:amd64
-libpixman-1-0:amd64
-libplymouth4:amd64
-libpng16-16:amd64
-libpolkit-agent-1-0:amd64
-libpolkit-backend-1-0:amd64
-libpolkit-gobject-1-0:amd64
-libpopt0:amd64
-libprocps6:amd64
-libproxy1v5:amd64
-libpsl5:amd64
-libpulse0:amd64
-libpython2.7:amd64
-libpython2.7-dev:amd64
-libpython2.7-minimal:amd64
-libpython2.7-stdlib:amd64
-libpython3.6:amd64
-libpython3.6-dev:amd64
-libpython3.6-minimal:amd64
-libpython3.6-stdlib:amd64
-libpython3-dev:amd64
-libpython3-stdlib:amd64
-libpython-all-dev:amd64
-libpython-dev:amd64
-libpython-stdlib:amd64
-libquadmath0:amd64
-librados2
-libraw1394-11:amd64
-librbd1
-librdmacm1:amd64
-libreadline5:amd64
-libreadline7:amd64
-libroken18-heimdal:amd64
-librtmp1:amd64
-libsamplerate0:amd64
-libsasl2-2:amd64
-libsasl2-modules:amd64
-libsasl2-modules-db:amd64
-libsdl1.2debian:amd64
-libseccomp2:amd64
-libselinux1:amd64
-libsemanage1:amd64
-libsemanage-common
-libsensors4:amd64
-libsepol1:amd64
-libshout3:amd64
-libsigsegv2:amd64
-libslang2:amd64
-libsmartcols1:amd64
-libsndfile1:amd64
-libsnmp30:amd64
-libsnmp-base
-libsodium23:amd64
-libsoup2.4-1:amd64
-libspeex1:amd64
-libspice-server1:amd64
-libsqlite3-0:amd64
-libss2:amd64
-libssl1.0.0:amd64
-libssl1.1:amd64
-libssl-dev:amd64
-libstdc++6:amd64
-libstdc++-7-dev:amd64
-libsystemd0:amd64
-libtag1v5:amd64
-libtag1v5-vanilla:amd64
-libtasn1-6:amd64
-libterm-readkey-perl
-libtext-charwidth-perl
-libtext-iconv-perl
-libtext-wrapi18n-perl
-libthai0:amd64
-libthai-data
-libtheora0:amd64
-libtiff5:amd64
-libtimedate-perl
-libtinfo5:amd64
-libtsan0:amd64
-libtwolame0:amd64
-libubsan0:amd64
-libudev1:amd64
-libunistring2:amd64
-libunwind8:amd64
-liburi-perl
-libusb-1.0-0:amd64
-libusbredirparser1:amd64
-libutempter0:amd64
-libuuid1:amd64
-libv4l-0:amd64
-libv4lconvert0:amd64
-libvirt0:amd64
-libvirt-bin
-libvirt-clients
-libvirt-daemon
-libvirt-daemon-driver-storage-rbd
-libvirt-daemon-system
-libvirt-dev:amd64
-libvisual-0.4-0:amd64
-libvorbis0a:amd64
-libvorbisenc2:amd64
-libvpx5:amd64
-libwavpack1:amd64
-libwebp6:amd64
-libwind0-heimdal:amd64
-libwrap0:amd64
-libwsman1:amd64
-libwsman-client4:amd64
-libwsman-curl-client-transport1:amd64
-libx11-6:amd64
-libx11-data
-libxau6:amd64
-libxcb1:amd64
-libxcb-render0:amd64
-libxcb-shm0:amd64
-libxdamage1:amd64
-libxdmcp6:amd64
-libxen-4.9:amd64
-libxen-dev:amd64
-libxenstore3.0:amd64
-libxext6:amd64
-libxfixes3:amd64
-libxml2:amd64
-libxml2-dev:amd64
-libxml2-utils
-libxmlsec1:amd64
-libxmlsec1-openssl:amd64
-libxmuu1:amd64
-libxpm4:amd64
-libxrender1:amd64
-libxslt1.1:amd64
-libxslt1-dev:amd64
-libxtables12:amd64
-libxv1:amd64
-libyajl2:amd64
-libyaml-0-2:amd64
-libzmq5:amd64
-libzstd1:amd64
-linux-base
-linux-firmware
-linux-headers-4.15.0-20
-linux-headers-4.15.0-20-generic
-linux-headers-generic
-linux-headers-virtual
-linux-image-4.15.0-20-generic
-linux-image-4.15.0-88-generic
-linux-image-generic
-linux-image-virtual
-linux-libc-dev:amd64
-linux-modules-4.15.0-20-generic
-linux-modules-4.15.0-88-generic
-linux-modules-extra-4.15.0-88-generic
-linux-virtual
-locales
-login
-logrotate
-lsb-base
-lsb-release
-lshw
-lsof
-ltrace
-lvm2
-lxcfs
-lxd
-lxd-client
-make
-man-db
-manpages
-manpages-dev
-mariadb-client-10.1
-mariadb-client-core-10.1
-mariadb-common
-mariadb-server
-mariadb-server-10.1
-mariadb-server-core-10.1
-mawk
-mdadm
-mime-support
-mlocate
-mokutil
-mount
-msr-tools
-mtr-tiny
-multiarch-support
-mysql-common
-nano
-ncurses-base
-ncurses-bin
-ncurses-term
-netbase
-netcat-openbsd
-netplan.io
-net-tools
-networkd-dispatcher
-nginx
-nginx-common
-nginx-core
-nplan
-ntfs-3g
-openipmi
-open-iscsi
-openssh-client
-openssh-server
-openssh-sftp-server
-openssl
-open-vm-tools
-os-prober
-overlayroot
-parted
-passwd
-pastebinit
-patch
-pciutils
-perl
-perl-base
-perl-modules-5.26
-pinentry-curses
-pkg-config
-plymouth
-plymouth-theme-ubuntu-text
-policykit-1
-pollinate
-popularity-contest
-powermgmt-base
-procps
-psmisc
-publicsuffix
-python
-python2.7
-python2.7-dev
-python2.7-minimal
-python3
-python3.6
-python3.6-dev
-python3.6-minimal
-python3-apport
-python3-apt
-python3-asn1crypto
-python3-attr
-python3-automat
-python3-blinker
-python3-certifi
-python3-cffi-backend
-python3-chardet
-python3-click
-python3-colorama
-python3-commandnotfound
-python3-configobj
-python3-constantly
-python3-crypto
-python3-cryptography
-python3-dbus
-python3-debconf
-python3-debian
-python3-dev
-python3-distro-info
-python3-distupgrade
-python3-distutils
-python3-gdbm:amd64
-python3-gi
-python3-httplib2
-python3-hyperlink
-python3-idna
-python3-incremental
-python3-jinja2
-python3-jsonpatch
-python3-json-pointer
-python3-jsonschema
-python3-jwt
-python3-keyring
-python3-keyrings.alt
-python3-lib2to3
-python3-markupsafe
-python3-minimal
-python3-mysqldb
-python3-newt:amd64
-python3-oauthlib
-python3-openssl
-python3-pam
-python3-pip
-python3-pkg-resources
-python3-problem-report
-python3-pyasn1
-python3-pyasn1-modules
-python3-pymysql
-python3-requests
-python3-requests-unixsocket
-python3-secretstorage
-python3-serial
-python3-service-identity
-python3-setuptools
-python3-six
-python3-software-properties
-python3-systemd
-python3-twisted
-python3-twisted-bin:amd64
-python3-update-manager
-python3-urllib3
-python3-virtualenv
-python3-wheel
-python3-xdg
-python3-yaml
-python3-zmq
-python3-zope.interface
-python-all
-python-all-dev
-python-apt
-python-apt-common
-python-asn1crypto
-python-cffi-backend
-python-configparser
-python-crypto
-python-cryptography
-python-dbus
-python-dev
-python-enum34
-python-gi
-python-idna
-python-ipaddress
-python-keyring
-python-keyrings.alt
-python-minimal
-python-mysqldb
-python-openwsman
-python-pip
-python-pip-whl
-python-pkg-resources
-python-pymysql
-python-secretstorage
-python-setuptools
-python-six
-python-virtualenv
-python-wheel
-python-xdg
-qemu-block-extra:amd64
-qemu-kvm
-qemu-system-common
-qemu-system-x86
-qemu-utils
-readline-common
-rsync
-rsyslog
-run-one
-sbsigntool
-screen
-seabios
-secureboot-db
-sed
-sensible-utils
-sgabios
-shared-mime-info
-sharutils
-shim
-shim-signed
-snapd
-socat
-software-properties-common
-sosreport
-squashfs-tools
-ssh-import-id
-strace
-sudo
-systemd
-systemd-sysv
-sysvinit-utils
-tar
-tcpdump
-telnet
-tftpd-hpa
-tftp-hpa
-time
-tmux
-tzdata
-ubuntu-advantage-tools
-ubuntu-keyring
-ubuntu-minimal
-ubuntu-release-upgrader-core
-ubuntu-server
-ubuntu-standard
-ucf
-udev
-ufw
-uidmap
-unattended-upgrades
-unzip
-update-inetd
-update-manager-core
-update-notifier-common
-ureadahead
-usbutils
-util-linux
-uuid-runtime
-vim
-vim-common
-vim-runtime
-vim-tiny
-virtualenv
-wget
-whiptail
-wireless-regdb
-xauth
-xdelta3
-xdg-user-dirs
-xfsprogs
-xinetd
-xkb-data
-xxd
-xz-utils
-zerofree
-zlib1g:amd64
-zlib1g-dev:amd64
-# NOTE (fdegir): pinned docker versions
-docker-ce={{ docker_ce_version }}
-docker-ce-cli={{ docker_ce_cli_version }}
-containerd.io={{ containerd_io_version }}
diff --git a/playbooks/roles/post-deployment/tasks/configure-jumphost.yml b/playbooks/roles/postinstall/tasks/configure-jumphost.yml
similarity index 92%
rename from playbooks/roles/post-deployment/tasks/configure-jumphost.yml
rename to playbooks/roles/postinstall/tasks/configure-jumphost.yml
index ec16056..3e0cfb5 100644
--- a/playbooks/roles/post-deployment/tasks/configure-jumphost.yml
+++ b/playbooks/roles/postinstall/tasks/configure-jumphost.yml
@@ -20,13 +20,6 @@
 - name: Load execution mode variables
   include_vars: "{{ execution_mode }}.yaml"
 
-- name: Install pip
-  action: |
-    {{ ansible_pkg_mgr }} name={{ item }} state=present update_cache=true
-  with_items:
-    - "python-pip"
-    - "python-setuptools"
-
 - name: Install openshift
   pip:
     name: openshift
diff --git a/playbooks/roles/post-deployment/tasks/configure-localhost.yml b/playbooks/roles/postinstall/tasks/configure-localhost.yml
similarity index 100%
rename from playbooks/roles/post-deployment/tasks/configure-localhost.yml
rename to playbooks/roles/postinstall/tasks/configure-localhost.yml
diff --git a/playbooks/roles/post-deployment/tasks/main.yml b/playbooks/roles/postinstall/tasks/main.yml
similarity index 100%
rename from playbooks/roles/post-deployment/tasks/main.yml
rename to playbooks/roles/postinstall/tasks/main.yml
diff --git a/playbooks/roles/post-deployment/vars/main.yaml b/playbooks/roles/postinstall/vars/main.yaml
similarity index 100%
rename from playbooks/roles/post-deployment/vars/main.yaml
rename to playbooks/roles/postinstall/vars/main.yaml
diff --git a/playbooks/roles/post-deployment/vars/offline-deployment.yaml b/playbooks/roles/postinstall/vars/offline-deployment.yaml
similarity index 100%
rename from playbooks/roles/post-deployment/vars/offline-deployment.yaml
rename to playbooks/roles/postinstall/vars/offline-deployment.yaml
diff --git a/playbooks/roles/post-deployment/vars/online-deployment.yaml b/playbooks/roles/postinstall/vars/online-deployment.yaml
similarity index 100%
rename from playbooks/roles/post-deployment/vars/online-deployment.yaml
rename to playbooks/roles/postinstall/vars/online-deployment.yaml
diff --git a/playbooks/roles/prepare-kubespray-artifacts/tasks/main.yaml b/playbooks/roles/prepare-kubespray-artifacts/tasks/main.yaml
deleted file mode 100644
index a575c5b..0000000
--- a/playbooks/roles/prepare-kubespray-artifacts/tasks/main.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-# ============LICENSE_START=======================================================
-#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# ============LICENSE_END=========================================================
-
-- name: Get list of k8s container image tarfiles
-  find:
-    path: "{{ engine_workspace }}/offline/containers"
-    patterns: '*.tar'
-  register: container_image
-
-# NOTE (fdegir): the user may not be member of docker group so we need root
-# TODO (fdegir): we can perhaps skip loading already existing images here
-- name: Load k8s container images from tarfiles
-  shell: docker load < {{ item.path }}
-  loop: "{{ container_image.files }}"
-  changed_when: false
-  become: true
-
-# NOTE (fdegir): the escape of curly brackets in ansible is really messy unfortunately
-# we also shouldn't attempt to tag and push container images that are already on local registry
-# NOTE (fdegir): we do not push any image that is already on engine.local and any image without tag
-- name: Get list of loaded k8s container images to push
-  shell: |
-    set -o pipefail
-    docker images --format '{{ '{{' }}.Repository{{ '}}' }}':'{{ '{{' }}.Tag{{ '}}' }}' | grep -v '{{ server_fqdn }}\|<none>' | sort
-  args:
-    executable: /bin/bash
-  changed_when: false
-  become: true
-  register: container_images
-
-# (eeiafr) workaround for nexus3.onap.org:10001 because two : not allowed in pushing
-
-- name: Create dict of k8s container images to tag and push
-  set_fact:
-    container_images_dict: "{{ ( container_images_dict | default({}) ) |\
-     combine({item: item | regex_replace('.*?.io/', '') | regex_replace('nexus3.onap.org:10001/', '') }) }}"
-  loop: "{{ container_images.stdout_lines }}"
-
-# TODO (fdegir): it is messy to use ansible module for tagging and pushing but we can still look into it
-# TODO (fdegir): we can perhaps skip tagging & pushing already existing images here
-- name: Tag and push k8s container images to local registry
-  shell: |
-    docker tag {{ item.key }} {{ server_fqdn }}/{{ item.value }}
-    docker push {{ server_fqdn }}/{{ item.value }}
-  with_dict: "{{ container_images_dict }}"
-  changed_when: false
-  become: true
-
-# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-calico-istio.yaml b/playbooks/scenarios/k8-calico-istio.yaml
new file mode 100644
index 0000000..37ecc4a
--- /dev/null
+++ b/playbooks/scenarios/k8-calico-istio.yaml
@@ -0,0 +1,54 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to calico
+    - name: Set network plugin to Calico
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Istio
+  import_playbook: "../apps/istio/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-calico-nofeature.yaml b/playbooks/scenarios/k8-calico-nofeature.yaml
new file mode 100644
index 0000000..ac0afc5
--- /dev/null
+++ b/playbooks/scenarios/k8-calico-nofeature.yaml
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to calico
+    - name: Set network plugin to Calico
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-calico-spinnaker.yaml b/playbooks/scenarios/k8-calico-spinnaker.yaml
new file mode 100644
index 0000000..a8c858a
--- /dev/null
+++ b/playbooks/scenarios/k8-calico-spinnaker.yaml
@@ -0,0 +1,54 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to calico
+    - name: Set network plugin to calico
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Spinnaker
+  import_playbook: "../apps/spinnaker/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-canal-nofeature.yaml b/playbooks/scenarios/k8-canal-nofeature.yaml
new file mode 100644
index 0000000..caac800
--- /dev/null
+++ b/playbooks/scenarios/k8-canal-nofeature.yaml
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to canal
+    - name: Set network plugin to Canal
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: canal"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-cilium-nofeature.yaml b/playbooks/scenarios/k8-cilium-nofeature.yaml
new file mode 100644
index 0000000..27e098c
--- /dev/null
+++ b/playbooks/scenarios/k8-cilium-nofeature.yaml
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to cilium
+    - name: Set network plugin to Cilium
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: cilium"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-flannel-nofeature.yaml b/playbooks/scenarios/k8-flannel-nofeature.yaml
new file mode 100644
index 0000000..5d87f00
--- /dev/null
+++ b/playbooks/scenarios/k8-flannel-nofeature.yaml
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to flannel
+    - name: Set network plugin to Flannel
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: flannel"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-multus-nofeature.yaml b/playbooks/scenarios/k8-multus-nofeature.yaml
new file mode 100644
index 0000000..7c54072
--- /dev/null
+++ b/playbooks/scenarios/k8-multus-nofeature.yaml
@@ -0,0 +1,57 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set master plugin to calico for multus to use as the primary network plugin
+    - name: Configure Multus to use Calico as the primary network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+    # configure multus to use ca
+    - name: Enable Multus network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin_multus:.*"
+        line: "kube_network_plugin_multus: true"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-multus-plugins.yaml b/playbooks/scenarios/k8-multus-plugins.yaml
new file mode 100644
index 0000000..ad56075
--- /dev/null
+++ b/playbooks/scenarios/k8-multus-plugins.yaml
@@ -0,0 +1,72 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set master plugin to calico for multus to use as the primary network plugin
+    - name: Configure Multus to use Calico as the primary network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: calico"
+
+    # configure multus to use ca
+    - name: Enable Multus network plugin
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin_multus:.*"
+        line: "kube_network_plugin_multus: true"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- hosts: k8s-cluster
+  gather_facts: false
+  become: false
+  tags:
+    - postinstall
+
+  tasks:
+    - name: Include Kubespray vars
+      include_vars: "{{ item }}"
+      with_items:
+        - "{{ engine_cache }}/repos/kubespray/roles/kubespray-defaults/defaults/main.yaml"
+        - "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+
+    - include_tasks: "{{ engine_cache }}/repos/kubespray/roles/network_plugin/cni/tasks/main.yml"
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/scenarios/k8-weave-nofeature.yaml b/playbooks/scenarios/k8-weave-nofeature.yaml
new file mode 100644
index 0000000..fb50f05
--- /dev/null
+++ b/playbooks/scenarios/k8-weave-nofeature.yaml
@@ -0,0 +1,50 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# NOTE (fdegir): scenario specific preinstall tasks
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - preinstall
+
+  tasks:
+    # set networking plugin to weave
+    - name: Set network plugin to Weave
+      lineinfile:
+        path: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+        regexp: "^kube_network_plugin:.*"
+        line: "kube_network_plugin: weave"
+
+# NOTE (fdegir): common postinstall tasks
+- name: Execute common postinstall tasks
+  import_playbook: "../postinstall.yaml"
+  tags: postinstall
+
+# NOTE (fdegir): scenario specific postinstall tasks
+- name: Install CEPH
+  import_playbook: "../apps/ceph/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+- name: Install Prometheus
+  import_playbook: "../apps/prometheus/kubespray/playbooks/install.yml"
+  tags: postinstall
+
+# vim: set ts=2 sw=2 expandtab: