blob: f467ff3fb841647682fed4954c454c030e0ab0b2 [file] [log] [blame]
---
# DO NOT ADD SPACE AROUND ';'
- name: Start rancher server container
docker_container:
name: rancher-server
image: "{{ rancher_server_image }}"
command: ["sh", "-c", "/usr/sbin/update-ca-certificates;/usr/bin/entry /usr/bin/s6-svscan /service"]
ports: 8080:8080
state: started
restart_policy: unless-stopped
volumes:
- "{{ app_data_path }}/certs:/usr/local/share/ca-certificates/extra:ro"
- name: Wait for rancher server to be ready
uri:
url: "{{ rancher_server_url }}/v2-beta"
register: response
retries: 10
delay: 30
until: not response.failed
- name: Create rancher kubernetes environment
rancher_k8s_environment:
name: "{{ app_name }}"
descr: "Kubernetes environment for {{ app_name }}"
server: "{{ rancher_server_url }}"
delete_other_k8s: "{{ rancher_remove_other_env }}"
force: "{{ rancher_redeploy_k8s_env }}"
host_os: "{{ ansible_os_family }}"
register: env
retries: 10
delay: 5
until: env.data is defined
# There is a lack of idempotency in the previous task and so there are new api
# key-pairs created with each run.
#
# ToDo: fix idempotency of rancher role
#
# Anyway as rke will be default k8s orchestrator in Dublin, it's supposed to be
# low prio topic. The following tasks dealing with the API are ignoring this problem
# and they simply use the new created API key-pair, which is set as a fact here:
- name: Set apikey values
set_fact:
k8s_env_id: "{{ env.data.environment.id }}"
key_public: "{{ env.data.apikey.public }}"
key_private: "{{ env.data.apikey.private }}"
rancher_agent_image: "{{ env.data.registration_tokens.image }}"
rancher_agent_reg_url: "{{ env.data.registration_tokens.reg_url }}"
# By default disabled - when enabled this playbook cannot be run more than once.
- name: Setup rancher admin password and enable authentication
rancher1_api:
server: "{{ rancher_server_url }}"
account_key: "{{ key_public }}:{{ key_private }}"
mode: access_control
data:
account_id: 1a1 # default rancher admin account
password: "{{ rancher.admin_password }}"
when: "rancher.auth_enabled is defined and rancher.auth_enabled"
- name: Configure the size of the rancher cattle db and logs
block:
- name: Main tables
rancher1_api:
server: "{{ rancher_server_url }}"
account_key: "{{ key_public }}:{{ key_private }}"
mode: settings
data:
option: main_tables.purge.after.seconds
value: "{{ rancher.main_tables_purge_after_seconds }}"
- name: Events
rancher1_api:
server: "{{ rancher_server_url }}"
account_key: "{{ key_public }}:{{ key_private }}"
mode: settings
data:
option: events.purge.after.seconds
value: "{{ rancher.events_purge_after_seconds }}"
- name: Service log
rancher1_api:
server: "{{ rancher_server_url }}"
account_key: "{{ key_public }}:{{ key_private }}"
mode: settings
data:
option: service_log.purge.after.seconds
value: "{{ rancher.service_log_purge_after_seconds }}"
- name: Audit log
rancher1_api:
server: "{{ rancher_server_url }}"
account_key: "{{ key_public }}:{{ key_private }}"
mode: settings
data:
option: audit_log.purge.after.seconds
value: "{{ rancher.audit_log_purge_after_seconds }}"
- name: Ensure .kube directory exists
file:
path: "{{ kube_directory }}"
state: directory
- name: Create kube config
template:
src: kube_config.j2
dest: "{{ kube_directory }}/config"