Initial implementation of offline engine deployment
A new option has been added "-x" to execute this use case.
It will use a predefined tar.gz package that contains all
the necessary files to perform the installation of
k8s-calico-nofeature scenario and has to be downloaded in
advance. It has been documented the structure of this file
and how to fetch the dependencies in an online machine.
Apt dependecies are handled by apt-cacher-ng that sets up
a proxy in localhost. This is used by the newly created nodes.
Kubespray cache (binaries and docker images) is copied to the
nodes and the playbook is configured to use those files.
In post deployment, the remaining of Docker images are pre-
loaded in the target machines. A better solution would be
to establish a local docker repository, but it is left out
of this patchset.
Change-Id: I3155e3bca56435a97f165a266935e7fec49f1a4d
diff --git a/playbooks/post-deployment.yml b/playbooks/post-deployment.yml
index a23944d..efc1122 100644
--- a/playbooks/post-deployment.yml
+++ b/playbooks/post-deployment.yml
@@ -17,6 +17,34 @@
# SPDX-License-Identifier: Apache-2.0
# ============LICENSE_END=========================================================
+# TODO: move these tasks to a more suitable place
+# TODO: load only the necessary images in each node according to their node
+- hosts: k8s-cluster
+ gather_facts: false
+ tasks:
+ - name: Execute preparation for post-deployment in offline mode
+ block:
+ # Load docker images in cache for offline installation
+ # TODO: we should set-up a Docker local repository in kickstart
+ - name: Find cached docker images
+ find:
+ paths: "/tmp/kubespray_cache/images"
+ patterns: '^.*\.tar'
+ use_regex: true
+ file_type: file
+ register: docker_images
+
+ # TODO: use the docker_image Ansible module
+ - name: Load missing docker images into node's memory
+ shell: |
+ set -o pipefail
+ cat "{{ item.path }}" | docker load
+ changed_when: false
+ args:
+ executable: /bin/bash
+ with_items: "{{ docker_images.files }}"
+ when: offline_deployment|bool
+
# check if any post-deployment task defined for the scenario
- hosts: localhost
connection: local
diff --git a/playbooks/roles/configure-installer/tasks/main.yml b/playbooks/roles/configure-installer/tasks/main.yml
index d57dfa5..b754e24 100644
--- a/playbooks/roles/configure-installer/tasks/main.yml
+++ b/playbooks/roles/configure-installer/tasks/main.yml
@@ -21,6 +21,7 @@
file:
path: "{{ engine_cache }}/repos/kubespray"
state: absent
+ when: not offline_deployment|bool
- name: Clone kubespray repository and checkout '{{ kubespray_version }}'
git:
@@ -29,6 +30,7 @@
version: "{{ kubespray_version }}"
force: true
environment: "{{ idf.proxy_settings | default({}) }}"
+ when: not offline_deployment|bool
# TODO: some stuff below could and perhaps should be pushed into the scenario
# it is put here to make basic installation work without complexities
@@ -148,4 +150,24 @@
src: kubespray-extra-vars.yml.j2
dest: "{{ config_path }}/kubespray-extra-vars.yml"
+# Ensure proper configuration for offline deployment
+- include_tasks: prepare-offline.yml
+ when: offline_deployment|bool
+
+# Enable file download for offline "Fetch dependency mode"
+- name: Enable one time file download in master node
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: "^download_run_once:.*"
+ line: "download_run_once: true"
+ when: offline_dependencies|bool
+
+# Use a fake docker repository to overcome apt-cacher-ng limitation with https
+- name: Configure docker repo to use jumphost apt proxy
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/container-engine/docker/defaults/main.yml"
+ regexp: "^docker_ubuntu_repo_base_url:.*"
+ line: 'docker_ubuntu_repo_base_url: "http://nordix.download.docker.com/linux/ubuntu"'
+ when: offline_dependencies|bool or offline_deployment|bool
+
# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/configure-installer/tasks/prepare-offline.yml b/playbooks/roles/configure-installer/tasks/prepare-offline.yml
new file mode 100644
index 0000000..741a447
--- /dev/null
+++ b/playbooks/roles/configure-installer/tasks/prepare-offline.yml
@@ -0,0 +1,103 @@
+---
+# ============LICENSE_START=======================================================
+# Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+# skip the default repo sync in helm init for offline deployments
+# Helm 2 has hardcoded links to google repositories
+- name: Skip default repository in helm
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/kubernetes-apps/helm/defaults/main.yml"
+ regexp: "^helm_skip_refresh:.*"
+ line: "helm_skip_refresh: true"
+
+- name: Disable file download
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: "^download_run_once:.*"
+ line: "download_run_once: false"
+
+# Kubernetes version is hardcoded in the downloads section. This point to the wrong versions
+# of kubeadm, kubectl and other packages stored in cache.
+- name: Set k8s version to '{{ kubernetes_version }}' in downloads file
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: "^kube_version:.*"
+ line: "kube_version: {{ kubernetes_version }}"
+
+# Downloads should take place in master node since has docker properly configure, i.e.
+# the current user is either in the docker group or can do passwordless sudo.
+- name: Ensure download happens in master node
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: "^download_localhost:.*"
+ line: "download_localhost: false"
+
+- name: Enable the use of local cache
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: "^download_force_cache:.*"
+ line: "download_force_cache: true"
+
+# This will overcome a bug in Kubespray that only loads Helm docker containers in
+# the worker nodes, while some tasks needing those containers are executed in the
+# master node. Thus, hitting the error image not found.
+- name: Configure helm docker container to be loaded in master node
+ replace:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: '(helm:(?:\n.*){1,8}-\s)kube.*(\n)'
+ replace: '\1k8s-cluster\2'
+
+# This will introduce the bug related to image names and docker default registry
+- name: Enable local loading of docker images
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: "^download_container:.*"
+ line: "download_container: true"
+
+# These two tasks will workaround a Kubespray bug that points to the wrong path
+# when using docker images from the default registry. This causes the error of
+# of image not found and timeout when trying to pull from Internet.
+- name: Override wrong paths in cached docker images
+ block:
+ - name: Remove links to docker default registry
+ replace:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: '\{\{ docker_image_repo \}\}/'
+ replace: ''
+
+ - name: Fix path for nginx docker image
+ replace:
+ path: "{{ engine_cache }}/repos/kubespray/roles/download/defaults/main.yml"
+ regexp: 'library/nginx'
+ replace: 'nginx'
+
+# Docker engine apt key is usually added using url argument. The key is provided as a file, so
+# the ansible task has to be modified accordingly.
+- name: Modify apt_key task to use file argument instead of url
+ replace:
+ path: "{{ engine_cache }}/repos/kubespray/roles/container-engine/docker/tasks/main.yml"
+ regexp: '(\s)url:(\s.*key)'
+ replace: '\1file:\2'
+
+- name: Point docker repo to use the provided key file
+ lineinfile:
+ path: "{{ engine_cache }}/repos/kubespray/roles/container-engine/docker/defaults/main.yml"
+ regexp: "^docker_ubuntu_repo_gpgkey:.*"
+ line: 'docker_ubuntu_repo_gpgkey: "/tmp/docker.key"'
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/configure-targethosts/tasks/configure-offline.yml b/playbooks/roles/configure-targethosts/tasks/configure-offline.yml
new file mode 100644
index 0000000..bff0774
--- /dev/null
+++ b/playbooks/roles/configure-targethosts/tasks/configure-offline.yml
@@ -0,0 +1,40 @@
+---
+# ============LICENSE_START=======================================================
+# # Copyright (C) 2019 The Nordix Foundation. All rights reserved.
+# # ================================================================================
+# # Licensed under the Apache License, Version 2.0 (the "License");
+# # you may not use this file except in compliance with the License.
+# # You may obtain a copy of the License at
+# #
+# # http://www.apache.org/licenses/LICENSE-2.0
+# #
+# # Unless required by applicable law or agreed to in writing, software
+# # distributed under the License is distributed on an "AS IS" BASIS,
+# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# # See the License for the specific language governing permissions and
+# # limitations under the License.
+# #
+# # SPDX-License-Identifier: Apache-2.0
+# # ============LICENSE_END=========================================================
+
+# TODO: replace hardcoded IP by a reference to idf
+# TODO: replace echo command
+- name: Proxy apt to jumphost with apt-cacher
+ shell: echo 'Acquire::http { Proxy "http://10.2.0.1:3142"; };' > /etc/apt/apt.conf.d/02Proxy
+ register: echo
+ changed_when: '"Proxy" in echo.stdout'
+
+- name: Configure end nodes in offline mode
+ block:
+ - name: Copy kubespray cache to k8s nodes
+ synchronize:
+ src: "{{ engine_cache }}/offline/kubespray_cache"
+ dest: /tmp
+
+ - name: Copy docker gpg key
+ synchronize:
+ src: "{{ engine_cache }}/offline/docker.key"
+ dest: /tmp/docker.key
+ when: offline_deployment|bool
+
+# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/configure-targethosts/tasks/main.yml b/playbooks/roles/configure-targethosts/tasks/main.yml
index 523849a..0e7afe4 100644
--- a/playbooks/roles/configure-targethosts/tasks/main.yml
+++ b/playbooks/roles/configure-targethosts/tasks/main.yml
@@ -22,7 +22,12 @@
# it is tricky to make it work quickly so we skip it for those nodes
- include: configure-network.yml
when: provisioner_type == "bifrost"
+- include: configure-offline.yml
+ when: provisioner_type == "bifrost" and (offline_deployment|bool or offline_dependencies|bool)
- include: install-packages.yml
+# TODO: NTP is not working in offline mode. We should look either into a proper
+# configuration or bringing up a local npt server in kickstart.
- include: sync-time.yml
+ when: not offline_deployment|bool
# vim: set ts=2 sw=2 expandtab:
diff --git a/playbooks/roles/post-deployment/tasks/configure-localhost.yml b/playbooks/roles/post-deployment/tasks/configure-localhost.yml
index 174e484..93306bf 100644
--- a/playbooks/roles/post-deployment/tasks/configure-localhost.yml
+++ b/playbooks/roles/post-deployment/tasks/configure-localhost.yml
@@ -45,7 +45,7 @@
- name: Download helm client
unarchive:
src: "{{ helm_client_download_url }}"
- remote_src: true
+ remote_src: "{{ not offline_deployment|bool }}"
dest: /tmp
- name: Place helm and tiller binaries to /usr/local/bin
diff --git a/playbooks/roles/post-deployment/tasks/main.yml b/playbooks/roles/post-deployment/tasks/main.yml
index c2eefc1..d014ef9 100644
--- a/playbooks/roles/post-deployment/tasks/main.yml
+++ b/playbooks/roles/post-deployment/tasks/main.yml
@@ -17,6 +17,13 @@
# SPDX-License-Identifier: Apache-2.0
# ============LICENSE_END=========================================================
+# TODO: implement this in a more Ansible-like manner
+- name: Override helm and kubectl locations to local file
+ set_fact:
+ kubectl_download_url: "file://{{ engine_cache }}/offline/kubespray_cache/kubectl-{{ kubectl_version }}-amd64"
+ helm_client_download_url: "{{ engine_cache }}/offline/helm/helm-{{ helm_version }}-linux-amd64.tar.gz"
+ when: offline_deployment|bool
+
# NOTE: Install OpenShift and configure kubectl & helm on localhost
# we operate against Kubernetes cluster from localhost
- include_tasks: configure-{{ jumphost }}.yml