Configure Kubespray installer to install Kubernetes

This change configures everything to install Kubernetes using
Kubespray.

As noted in the corresponding files, there are things that could
be pushed into the scenario and it will be done once the basic
installation works from a single place.

Apart from that,. there is a workaround in place to avoid the
Kubespray bug.

https://github.com/kubernetes-sigs/kubespray/issues/4300

Change-Id: I5e3a6831bd371f49b57c41f106e3b854f835f911
diff --git a/playbooks/roles/configure-installer/files/docker.yml b/playbooks/roles/configure-installer/files/docker.yml
new file mode 100644
index 0000000..8347c43
--- /dev/null
+++ b/playbooks/roles/configure-installer/files/docker.yml
@@ -0,0 +1,67 @@
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
+docker_container_storage_setup: false
+
+## It must be define a disk path for docker_container_storage_setup_devs.
+## Otherwise docker-storage-setup will be executed incorrectly.
+#docker_container_storage_setup_devs: /dev/vdb
+
+## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+docker_dns_servers_strict: false
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## Used to set docker daemon iptables options to true
+docker_iptables_enabled: "false"
+
+# Docker log options
+# Rotate container stderr/stdout logs at 50m and keep last 5
+docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
+
+# define docker bin_dir
+docker_bin_dir: "/usr/bin"
+
+# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
+# kubespray deletes the docker package on each run, so caching the package makes sense
+docker_rpm_keepcache: 0
+
+## An obvious use case is allowing insecure-registry access to self hosted registries.
+## Can be ipaddress and domain_name.
+## example define 172.19.16.11 or mirror.registry.io
+#docker_insecure_registries:
+#   - mirror.registry.io
+#   - 172.19.16.11
+
+## Add other registry,example China registry mirror.
+#docker_registry_mirrors:
+#   - https://registry.docker-cn.com
+#   - https://mirror.aliyuncs.com
+
+## If non-empty will override default system MounFlags value.
+## This option takes a mount propagation flag: shared, slave
+## or private, which control whether mounts in the file system
+## namespace set up for docker will receive or propagate mounts
+## and unmounts. Leave empty for system default
+#docker_mount_flags:
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+docker_options: >-
+  {%- if docker_insecure_registries is defined %}
+  {{ docker_insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
+  {%- endif %}
+  {% if docker_registry_mirrors is defined %}
+  {{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
+  {%- endif %}
+  {%- if docker_version is defined %}
+  --data-root={{ docker_daemon_graph }} {{ docker_log_opts }}
+  {%- endif %}
+  {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+  --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+  --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+  --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+  {%- endif -%}
diff --git a/playbooks/roles/configure-installer/files/k8s-cluster.yml b/playbooks/roles/configure-installer/files/k8s-cluster.yml
new file mode 100644
index 0000000..f9d1727
--- /dev/null
+++ b/playbooks/roles/configure-installer/files/k8s-cluster.yml
@@ -0,0 +1,193 @@
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: true
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.12.5
+
+# kubernetes image repo define
+kube_image_repo: "gcr.io/google-containers"
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changeable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Directory where credentials will be stored
+credentials_dir: "{{ inventory_dir }}/credentials"
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
+kube_users:
+  kube:
+    pass: "{{kube_api_pwd}}"
+    role: admin
+    groups:
+      - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+#kube_basic_auth: false
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
+# kube_oidc_username_claim: sub
+# kube_oidc_username_prefix: oidc:
+# kube_oidc_groups_claim: groups
+# kube_oidc_groups_prefix: oidc:
+
+
+# Choose network plugin (cilium, calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: calico
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network.  With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+#kube_apiserver_insecure_port: 8080 # (http)
+# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
+kube_apiserver_insecure_port: 0 # (disabled)
+
+# Kube-proxy proxyMode configuration.
+# Can be ipvs, iptables
+kube_proxy_mode: ipvs
+
+# Kube-proxy nodeport address.
+# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
+kube_proxy_nodeport_addresses: false
+# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
+
+## Encrypting Secret Data at Rest (experimental)
+kube_encrypt_secret_data: false
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
+dns_mode: coredns
+# Set manual server if using a custom cluster DNS server
+#manual_dns_server: 10.x.x.x
+
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+## Container runtime
+## docker for docker and crio for cri-o.
+container_manager: docker
+
+## Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# audit log for kubernetes
+kubernetes_audit: false
+
+# dynamic kubelet configuration
+dynamic_kubelet_configuration: false
+
+# define kubelet config dir for dynamic kubelet
+#kubelet_config_dir:
+default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
+dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
+
+# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
+podsecuritypolicy_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
+# kubectl_localhost: false
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+#  - /resolvethiszone.with/10.0.4.250
+#  - 8.8.8.8
+
+#  Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be useful for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
+
+## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
+## See https://github.com/kubernetes-sigs/kubespray/issues/2141
+## Set this variable to true to get rid of this issue
+volume_cross_zone_attachment: false
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+## Container Engine Acceleration
+## Enable container acceleration feature, for example use gpu acceleration in containers
+# nvidia_accelerator_enabled: true
+## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
+## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
+## Array with nvida_gpu_nodes, leave empty or comment if you dont't want to install drivers.
+## Labels and taints won't be set to nodes if they are not in the array.
+# nvidia_gpu_nodes:
+#   - kube-gpu-001
+# nvidia_driver_version: "384.111"
+## flavor can be tesla or gtx
+# nvidia_gpu_flavor: gtx
diff --git a/playbooks/roles/configure-installer/files/main.yaml b/playbooks/roles/configure-installer/files/main.yaml
new file mode 100644
index 0000000..98d7504
--- /dev/null
+++ b/playbooks/roles/configure-installer/files/main.yaml
@@ -0,0 +1,463 @@
+---
+# Use proxycommand if bastion host is in group all
+# This change obseletes editing ansible.cfg file depending on bastion existance
+ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
+
+kube_api_anonymous_auth: false
+
+# Default value, but will be set to true automatically if detected
+is_atomic: false
+
+# optional disable the swap
+disable_swap: true
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.12.5
+
+## Kube Proxy mode One of ['iptables','ipvs']
+kube_proxy_mode: ipvs
+
+# Kube-proxy nodeport address.
+# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
+kube_proxy_nodeport_addresses: false
+# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
+
+# Set to true to allow pre-checks to fail and continue deployment
+ignore_assert_errors: false
+
+# nginx-proxy configure
+nginx_config_dir: "/etc/nginx"
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+docker_bin_dir: /usr/bin
+etcd_data_dir: /var/lib/etcd
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns, manual or none
+dns_mode: coredns
+
+# Should be set to a cluster IP if using a custom cluster DNS
+# manual_dns_server: 10.x.x.x
+
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+kube_dns_servers:
+  kubedns: ["{{skydns_server}}"]
+  coredns: ["{{skydns_server}}"]
+  coredns_dual: "{{[skydns_server] + [ skydns_server_secondary ]}}"
+  manual: ["{{manual_dns_server}}"]
+  dnsmasq_kubedns: ["{{dnsmasq_dns_server}}"]
+
+dns_servers: "{{kube_dns_servers[dns_mode]}}"
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changeable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+kube_api_pwd: "changeme"
+kube_users:
+  kube:
+    pass: "{{kube_api_pwd}}"
+    role: admin
+
+# Choose network plugin (cilium, calico, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: calico
+kube_network_plugin_multus: false
+
+# Determines if calico-rr group exists
+peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
+
+# Set to false to disable calico-upgrade
+calico_upgrade_enabled: true
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network.  With these defaults you should have
+# room for 64 nodes with 254 pods per node.
+# Example: Up to 256 nodes, 100 pods per node (/16 network):
+#  - kube_service_addresses: 10.233.0.0/17
+#  - kube_pods_subnet: 10.233.128.0/17
+#  - kube_network_node_prefix: 25
+# Example: Up to 4096 nodes, 100 pods per node (/12 network):
+#  - kube_service_addresses: 10.192.0.0/13
+#  - kube_pods_subnet: 10.200.0.0/13
+#  - kube_network_node_prefix: 25
+kube_network_node_prefix: 24
+
+# The virtual cluster IP, real host IPs and ports the API Server will be
+# listening on.
+# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
+# access IP value (automatically evaluated below)
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_bind_address: 0.0.0.0
+# https
+kube_apiserver_port: 6443
+# http
+kube_apiserver_insecure_bind_address: 127.0.0.1
+kube_apiserver_insecure_port: 0
+
+# dynamic kubelet configuration
+dynamic_kubelet_configuration: false
+
+# define kubelet config dir for dynamic kubelet
+# kubelet_config_dir:
+default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
+dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
+
+# Aggregator
+kube_api_aggregator_routing: false
+
+# Profiling
+kube_profiling: false
+
+# Container for runtime
+container_manager: docker
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+# docker_storage_options: -s overlay2
+
+## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
+docker_container_storage_setup: false
+
+## It must be define a disk path for docker_container_storage_setup_devs.
+## Otherwise docker-storage-setup will be executed incorrectly.
+# docker_container_storage_setup_devs: /dev/vdb
+
+## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+docker_dns_servers_strict: false
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## Used to set docker daemon iptables options to true
+docker_iptables_enabled: "false"
+
+# Docker log options
+# Rotate container stderr/stdout logs at 50m and keep last 5
+docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
+
+## An obvious use case is allowing insecure-registry access to self hosted registries.
+## Can be ipaddress and domain_name.
+## example define 172.19.16.11 or mirror.registry.io
+# docker_insecure_registries:
+#   - mirror.registry.io
+#   - 172.19.16.11
+
+## Add other registry,example China registry mirror.
+# docker_registry_mirrors:
+#   - https://registry.docker-cn.com
+#   - https://mirror.aliyuncs.com
+
+## If non-empty will override default system MounFlags value.
+## This option takes a mount propagation flag: shared, slave
+## or private, which control whether mounts in the file system
+## namespace set up for docker will receive or propagate mounts
+## and unmounts. Leave empty for system default
+# docker_mount_flags:
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+docker_options: >-
+  {%- if docker_insecure_registries is defined %}
+  {{ docker_insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
+  {%- endif %}
+  {% if docker_registry_mirrors is defined %}
+  {{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
+  {%- endif %}
+  {%- if docker_version is defined %}
+  --data-root={{ docker_daemon_graph }} {{ docker_log_opts }}
+  {%- endif %}
+  {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+  --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+  --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+  --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+  {%- endif -%}
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: docker
+cert_management: script
+
+helm_deployment_type: host
+
+# Enable kubeadm deployment
+kubeadm_enabled: true
+
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
+kubeconfig_localhost: false
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
+kubectl_localhost: false
+
+# Define credentials_dir here so it can be overridden
+credentials_dir: "{{ inventory_dir }}/credentials"
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Addons which can be enabled
+helm_enabled: false
+registry_enabled: false
+metrics_server_enabled: false
+enable_network_policy: true
+local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
+persistent_volumes_enabled: false
+cephfs_provisioner_enabled: false
+ingress_nginx_enabled: false
+cert_manager_enabled: false
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
+# openstack_blockstorage_version: "v1/v2/auto (default)"
+openstack_blockstorage_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
+openstack_lbaas_enabled: false
+# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+## To enable automatic floating ip provisioning, specify a subnet.
+# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+## Override default LBaaS behavior
+# openstack_lbaas_use_octavia: False
+# openstack_lbaas_method: "ROUND_ROBIN"
+# openstack_lbaas_provider: "haproxy"
+openstack_lbaas_create_monitor: "yes"
+openstack_lbaas_monitor_delay: "1m"
+openstack_lbaas_monitor_timeout: "30s"
+openstack_lbaas_monitor_max_retries: "3"
+openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
+
+## List of authorization modes that must be configured for
+## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
+## 'RBAC' modes are tested. Order is important.
+authorization_modes: ['Node', 'RBAC']
+rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
+
+# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint
+kubelet_authentication_token_webhook: true
+
+# When enabled, access to the kubelet API requires authorization by delegation to the API server
+kubelet_authorization_mode_webhook: false
+
+## v1.11 feature
+feature_gate_v1_11:
+  - "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
+  - "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
+  - "MountPropagation={{ local_volume_provisioner_enabled | string }}"
+
+## v1.12 feature
+feature_gate_v1_12: []
+
+## List of key=value pairs that describe feature gates for
+## the k8s cluster.
+kube_feature_gates: |-
+  {%- if kube_version is version('v1.12.0', '<') -%}
+  {{ feature_gate_v1_11 }}
+  {%- else -%}
+  {{ feature_gate_v1_12 }}
+  {%- endif %}
+
+# Local volume provisioner storage classes
+local_volume_provisioner_storage_classes:
+  - name: "{{ local_volume_provisioner_storage_class | default('local-storage') }}"
+    host_dir: "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}"
+    mount_dir: "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}"
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role in group_vars/k8s-cluster.yml.
+# Do not manually change these values
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Contiv L3 BGP Mode
+contiv_peer_with_uplink_leaf: false
+contiv_global_as: "65002"
+contiv_global_neighbor_as: "500"
+
+## Set no_proxy to all assigned cluster IPs and hostnames
+no_proxy: >-
+  {%- if http_proxy is defined or https_proxy is defined %}
+  {%- if loadbalancer_apiserver is defined -%}
+  {{ apiserver_loadbalancer_domain_name| default('') }},
+  {{ loadbalancer_apiserver.address | default('') }},
+  {%- endif -%}
+  {%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
+  {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }},
+  {%-   if (item != hostvars[item]['ansible_hostname']) -%}
+  {{ hostvars[item]['ansible_hostname'] }},
+  {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
+  {%-   endif -%}
+  {{ item }},{{ item }}.{{ dns_domain }},
+  {%- endfor -%}
+  {%- if additional_no_proxy is defined -%}
+  {{ additional_no_proxy }},
+  {%- endif -%}
+  127.0.0.1,localhost
+  {%- endif %}
+
+proxy_env:
+  http_proxy: "{{ http_proxy| default ('') }}"
+  https_proxy: "{{ https_proxy| default ('') }}"
+  no_proxy: "{{ no_proxy| default ('') }}"
+
+ssl_ca_dirs: >-
+  [
+  {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
+  '/usr/share/ca-certificates',
+  {% elif ansible_os_family == 'RedHat' -%}
+  '/etc/pki/tls',
+  '/etc/pki/ca-trust',
+  {% elif ansible_os_family == 'Debian' -%}
+  '/usr/share/ca-certificates',
+  {% endif -%}
+  ]
+
+# Vars for pointing to kubernetes api endpoints
+is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
+kube_apiserver_count: "{{ groups['kube-master'] | length }}"
+kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
+kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
+first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
+loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
+# applied if only external loadbalancer_apiserver is defined, otherwise ignored
+apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
+kube_apiserver_endpoint: |-
+  {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
+       https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
+  {%- elif is_kube_master -%}
+       https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
+  {%- else -%}
+  {%-   if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
+       https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
+  {%-   else -%}
+       https://{{ first_kube_master }}:{{ kube_apiserver_port }}
+  {%-  endif -%}
+  {%- endif %}
+kube_apiserver_insecure_endpoint: >-
+  http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
+kube_apiserver_client_cert: |-
+  {% if kubeadm_enabled -%}
+  {{ kube_cert_dir }}/ca.crt
+  {%- else -%}
+  {{ kube_cert_dir }}/apiserver.pem
+  {%- endif %}
+kube_apiserver_client_key: |-
+  {% if kubeadm_enabled -%}
+  {{ kube_cert_dir }}/ca.key
+  {%- else -%}
+  {{ kube_cert_dir }}/apiserver-key.pem
+  {%- endif %}
+
+# Set to true to deploy etcd-events cluster
+etcd_events_cluster_enabled: false
+
+# Vars for pointing to etcd endpoints
+is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
+etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
+etcd_access_address: "{{ access_ip | default(etcd_address) }}"
+etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
+etcd_peer_url: "https://{{ etcd_access_address }}:2380"
+etcd_client_url: "https://{{ etcd_access_address }}:2379"
+etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382"
+etcd_events_client_url: "https://{{ etcd_events_access_address }}:2381"
+etcd_access_addresses: |-
+  {% for item in groups['etcd'] -%}
+    https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
+  {%- endfor %}
+etcd_events_access_addresses_list: |-
+  [
+  {% for item in groups['etcd'] -%}
+    'https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381'{% if not loop.last %},{% endif %}
+  {%- endfor %}
+  ]
+etcd_events_access_addresses: "{{etcd_events_access_addresses_list | join(',')}}"
+etcd_events_access_addresses_semicolon: "{{etcd_events_access_addresses_list | join(';')}}"
+# user should set etcd_member_name in inventory/mycluster/hosts.ini
+etcd_member_name: |-
+  {% for host in groups['etcd'] %}
+  {%   if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %}
+  {% endfor %}
+etcd_peer_addresses: |-
+  {% for item in groups['etcd'] -%}
+    {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
+  {%- endfor %}
+etcd_events_peer_addresses: |-
+  {% for item in groups['etcd'] -%}
+    {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
+  {%- endfor %}
+
+podsecuritypolicy_enabled: false
+etcd_heartbeat_interval: "250"
+etcd_election_timeout: "5000"
+etcd_snapshot_count: "10000"
+
+certificates_key_size: 2048
+certificates_duration: 36500
+
+pip_extra_args: |-
+  {%- set pip_extra_args_list = [] -%}
+  {%- if pyrepo_index is defined -%}
+  {%- set DO = pip_extra_args_list.append('--index-url %s' | format(pyrepo_index)) -%}
+  {%- if pyrepo_cert is defined -%}
+  {%- set DO = pip_extra_args_list.append('--cert %s' | format(pyrepo_cert)) -%}
+  {%- endif -%}
+  {%- endif -%}
+  {{ pip_extra_args_list|join(' ') }}
diff --git a/playbooks/roles/configure-installer/tasks/main.yml b/playbooks/roles/configure-installer/tasks/main.yml
new file mode 100644
index 0000000..b850457
--- /dev/null
+++ b/playbooks/roles/configure-installer/tasks/main.yml
@@ -0,0 +1,70 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+- name: Remove existing files and directories
+  file:
+    path: "{{ engine_cache }}/repos/kubespray"
+    state: absent
+
+- name: Clone kubespray repository and checkout '{{ kubespray_version }}'
+  git:
+    repo: "{{ kubespray_git_url }}"
+    dest: "{{ engine_cache }}/repos/kubespray"
+    version: "{{ kubespray_version }}"
+    force: yes
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+    no_proxy: "{{ lookup('env','no_proxy') }}"
+
+# TODO: some stuff below could and perhaps should be pushed into the scenario
+# it is put here to make basic installation work without complexities
+- name: Put initial structure in place
+  synchronize:
+    src: "{{ engine_cache }}/repos/kubespray/inventory/sample/"
+    dest: "{{ engine_cache }}/repos/kubespray/inventory/engine"
+    recursive: yes
+    delete: yes
+
+# TODO: reuse of bifrost dynamic inventory is only valid for the cases that use
+# bifrost for provisioning so this needs fixing and we could perhaps switch to
+# our own inventory, dynamic or static.
+- name: Place bifrost_inventory.py in Kubespray inventory
+  file:
+    src: "{{ engine_cache }}/repos/bifrost/playbooks/inventory/bifrost_inventory.py"
+    dest: "{{ engine_cache }}/repos/kubespray/inventory/engine/bifrost_inventory.py"
+    state: link
+
+- name: Place scenario k8s-cluster.yml
+  copy:
+    src: "k8s-cluster.yml"
+    dest: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/k8s-cluster/k8s-cluster.yml"
+
+# TODO: this task is put here in order to avoid hitting the kubespray bug:
+# https://github.com/kubernetes-sigs/kubespray/issues/4300
+
+- name: WORKAROUND - Place main.yml and docker.yml
+  copy:
+    src: "{{ item.src }}"
+    dest: "{{ item.dest }}"
+  with_items:
+    - { src: "main.yaml", dest: "{{ engine_cache }}/repos/kubespray/roles/kubespray-defaults/defaults/main.yaml" }
+    - { src: "docker.yml", dest: "{{ engine_cache }}/repos/kubespray/inventory/engine/group_vars/all/docker.yml" }
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/configure-installer/vars/main.yml b/playbooks/roles/configure-installer/vars/main.yml
new file mode 100644
index 0000000..4987cee
--- /dev/null
+++ b/playbooks/roles/configure-installer/vars/main.yml
@@ -0,0 +1,21 @@
+---
+# ============LICENSE_START=======================================================
+#  Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+kubespray_git_url: https://github.com/kubernetes-sigs/kubespray.git
+
+# vim: set ts=2 sw=2 expandtab: