blob: 1077ee802fd5d3e1ddec50ae06f9b2d1a48939c3 [file] [log] [blame]
---
rke_binary: rke_linux-amd64
rke_username: rke
rke_bin_dir: /usr/local/bin
kube_config_dir: "{{ ansible_env.HOME }}/.kube"
kubernetes_config_dir: "/etc/kubernetes"
kubelet:
static_config: "kubelet-static-config.yml"
runtime_request_timeout: "2m0s"
cluster_config_dir: "{{ app_data_path }}/cluster"
# Whether dashboard is exposed.
rke_dashboard_exposed: true
rke_dns: {}
rke_etcd:
# By default rke creates bind mount:
# /var/lib/etcd -> /var/lib/rancher/etcd
# These parameters provide means of modifying it:
# - custom bind mount
# - option to use volatile storage
# Custom bind mount
#
# I did not find a proper way (in the docs) how to override the
# defaults so I just abuse the extra_* args for the rke etcd
# service. It means that it will create another mount point in the
# container and you should use different pathnames than default...
#
# The custom bind mount is by default disabled.
enabled_custom_etcd_storage: false
# Applicated only if custom mount is enabled.
# Paths must be absolute (start with '/')
#
# Path on the kubernetes/etcd node
storage_path: /var/lib/etcd-custom
# Path inside the container where it is mounted.
storage_mountpoint: /var/lib/rancher/etcd-custom
# On top of it (with or without custom mount) you can use tmpfs
# as a volatile storage.
#
# CAUTION: This will create temporary filesystem (in the memory)
# so if an etcd node will be poweroff then all etcd data will be
# lost!!!
#
# Don't use unless you don't care about your cluster!
#
# This is intended as an attempt to make deployment little bit
# more faster...by default it is disabled.
enabled_unsafe_volatile_storage: false
# Size of the volatile storage - tmpfs (this will eat your RAM)
tmpfs_size: 5G
rke:
# rke (rancher) images
etcd: "rancher/coreos-etcd:v3.3.10-rancher1"
alpine: "rancher/rke-tools:v0.1.50"
nginx_proxy: "rancher/rke-tools:v0.1.50"
cert_downloader: "rancher/rke-tools:v0.1.50"
kubernetes_services_sidecar: "rancher/rke-tools:v0.1.50"
kubedns: "rancher/k8s-dns-kube-dns:1.15.0"
dnsmasq: "rancher/k8s-dns-dnsmasq-nanny:1.15.0"
kubedns_sidecar: "rancher/k8s-dns-sidecar:1.15.0"
kubedns_autoscaler: "rancher/cluster-proportional-autoscaler:1.3.0"
coredns: "rancher/coredns-coredns:1.3.1"
coredns_autoscaler: "rancher/cluster-proportional-autoscaler:1.3.0"
kubernetes: "rancher/hyperkube:v1.15.4-rancher1"
flannel: "rancher/coreos-flannel:v0.11.0-rancher1"
flannel_cni: "rancher/flannel-cni:v0.3.0-rancher5"
calico_node: "rancher/calico-node:v3.7.4"
calico_cni: "rancher/calico-cni:v3.7.4"
calico_controllers: "rancher/calico-kube-controllers:v3.7.4"
calico_ctl: "rancher/calico-ctl:v2.0.0"
canal_node: "rancher/calico-node:v3.7.4"
canal_cni: "rancher/calico-cni:v3.7.4"
canal_flannel: "rancher/coreos-flannel:v0.11.0"
weave_node: "weaveworks/weave-kube:2.5.2"
weave_cni: "weaveworks/weave-npc:2.5.2"
pod_infra_container: "rancher/pause:3.1"
ingress: "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"
ingress_backend: "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1"
metrics_server: "rancher/metrics-server:v0.3.3"