| # -*- mode: ruby -*- |
| # -*- coding: utf-8 -*- |
| |
| host_ip = "192.168.121.1" |
| operator_key = "${HOME}/.ssh/onap-key" |
| vagrant_user = "vagrant" |
| vagrant_password = "vagrant" |
| synced_folder_main = "/vagrant" |
| synced_folder_config = "#{synced_folder_main}/config" |
| synced_folder_tools_config = "#{synced_folder_main}/tools/config" |
| os_config = "#{synced_folder_config}/local.conf" |
| os_env = "#{synced_folder_config}/dot_env" |
| cluster_yml = "cluster.yml" |
| apt_prefs_dir = "/etc/apt/apt.conf.d" |
| apt_prefs = "95silent-approval" |
| |
| vm_memory = 1 * 1024 |
| vm_memory_os = 6 * 1024 |
| vm_memory_onap = 12 * 1024 |
| vm_cpu = 1 |
| vm_cpus = 2 |
| vm_box = "generic/ubuntu1804" |
| |
| operation = { name: 'operator', hostname: 'operator', ip: '172.17.4.254', cpus: vm_cpu, memory: vm_memory } |
| devstack = { name: 'devstack', hostname: 'devstack', ip: '172.17.4.200', cpus: vm_cpus, memory: vm_memory_os } |
| control = { name: 'control', hostname: 'control', ip: '172.17.4.100', cpus: vm_cpu, memory: vm_memory } |
| worker = { name: 'worker', hostname: 'worker', ip: '172.17.4.101', cpus: vm_cpus, memory: vm_memory_onap } |
| |
| cluster = [] << control << worker |
| all = cluster.dup << operation << devstack |
| |
| operation_post_msg = "Run: \"vagrant provision #{operation[:name]} --provision-with=rke_up,setup_kubectl,setup_helm_cluster,setup_helm_repo,deploy_onap\" to complete ONAP deployment" |
| |
| $replace_dns = <<-SCRIPT |
| HOST_IP="$1" |
| rm -f /etc/resolv.conf # drop its dynamic management by systemd-resolved |
| echo nameserver "$HOST_IP" | tee /etc/resolv.conf |
| SCRIPT |
| |
| $enable_ipv6 = <<-SCRIPT |
| sed -i'' 's/net.ipv6.conf.all.disable_ipv6.*$/net.ipv6.conf.all.disable_ipv6 = 0/' /etc/sysctl.conf |
| sysctl -p |
| SCRIPT |
| |
| $setup_devstack = <<-SCRIPT |
| CONFIG="$1" |
| git clone https://opendev.org/openstack/devstack |
| cd devstack |
| cp "$CONFIG" . |
| ./stack.sh |
| SCRIPT |
| |
| $add_to_docker_group = <<-SCRIPT |
| USER="$1" |
| echo "Adding ${USER} to 'docker' group" |
| usermod -aG docker "$USER" |
| SCRIPT |
| |
| $setup_debconf = <<-SCRIPT |
| echo "Setting debconf frontend to noninteractive" |
| sed -i'.orig' '/^Config:/a Frontend: noninteractive' /etc/debconf.conf |
| SCRIPT |
| |
| $install_sshpass = <<-SCRIPT |
| apt-get update |
| echo "Installing 'sshpass'" |
| apt-get install sshpass |
| SCRIPT |
| |
| $install_make = <<-SCRIPT |
| apt-get update |
| echo "Installing 'make'" |
| apt-get install make |
| SCRIPT |
| |
| $generate_key = <<-SCRIPT |
| KEY_FILE="$1" |
| echo "Generating SSH key (${KEY_FILE})" |
| ssh-keygen -q -b 4096 -t rsa -f "$KEY_FILE" -N "" |
| SCRIPT |
| |
| $deploy_key = <<-SCRIPT |
| KEY="$1" |
| USER="$2" |
| PASS="$PASSWORD" |
| IPS="$3" |
| echo "Deploying ${KEY} for ${USER}" |
| for ip in $IPS; do |
| echo "on ${ip}" |
| sshpass -p "$PASS" ssh-copy-id -o StrictHostKeyChecking=no -i "$KEY" "${USER}@${ip}" |
| done |
| SCRIPT |
| |
| $link_dotfiles = <<-SCRIPT |
| SYNC_DIR="$1" |
| for rc in ${SYNC_DIR}/dot_*; do |
| src="$rc" |
| dst="${HOME}/.${rc##*dot_}" |
| echo "Symlinking ${src} to ${dst}" |
| ln -sf "$src" "$dst" |
| done |
| SCRIPT |
| |
| $link_file = <<-SCRIPT |
| SYNC_DIR="$1" |
| FILE="$2" |
| src="${SYNC_DIR}/${FILE}" |
| dst="$3" |
| echo "Symlinking ${src} to ${dst}" |
| ln -sf "$src" "$dst" |
| SCRIPT |
| |
| $rke_up = "rke up" |
| $rke_down = "rke remove --force" |
| |
| $get_oom = <<-SCRIPT |
| BRANCH="${1:-5.0.1-ONAP}" |
| REPO="${2:-https://git.onap.org/oom}" |
| git clone -b "$BRANCH" "$REPO" --recurse-submodules |
| SCRIPT |
| |
| $get_helm_plugins = "mkdir -p ${HOME}/.helm && cp -R ${HOME}/oom/kubernetes/helm/plugins/ ${HOME}/.helm" |
| |
| $setup_helm_cluster = <<-SCRIPT |
| export KUBECONFIG="${HOME}/.kube/config.onap" |
| kubectl config use-context onap |
| kubectl -n kube-system create serviceaccount tiller |
| kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller |
| helm init --service-account tiller |
| kubectl -n kube-system rollout status deploy/tiller-deploy |
| SCRIPT |
| |
| # FIXME: replace sleep command with helm repo readiness probe |
| $setup_helm_repo = <<-SCRIPT |
| helm serve & |
| sleep 3 |
| helm repo add local http://127.0.0.1:8879 |
| make -C ${HOME}/oom/kubernetes all |
| make -C ${HOME}/oom/kubernetes onap |
| SCRIPT |
| |
| $deploy_onap = <<-SCRIPT |
| OVERRIDE="${1:-${HOME}/oom/kubernetes/onap/resources/environments/minimal-onap.yaml}" |
| |
| ENV="${2:-#{os_env}}" |
| export $(cat "$ENV" | xargs) |
| |
| encrypt () { |
| KEY="${HOME}/oom/kubernetes/so/resources/config/mso/encryption.key" |
| echo -n "$1" \ |
| | openssl aes-128-ecb -e -K `cat "$KEY"` -nosalt \ |
| | xxd -c 256 -p |
| } |
| |
| export OPENSTACK_ENCRYPTED_PASSWORD="$(encrypt $OPENSTACK_PASSWORD)" |
| |
| export KUBECONFIG="${HOME}/.kube/config.onap" |
| |
| helm deploy minimal local/onap --namespace onap -f "$OVERRIDE" --verbose --timeout 900 |
| SCRIPT |
| |
| Vagrant.configure('2') do |config| |
| all.each do |machine| |
| config.vm.define machine[:name] do |config| |
| config.vm.box = vm_box |
| config.vm.hostname = machine[:hostname] |
| |
| config.vm.provider :virtualbox do |v| |
| v.name = machine[:name] |
| v.memory = machine[:memory] |
| v.cpus = machine[:cpus] |
| end |
| |
| config.vm.provider :libvirt do |v| |
| v.memory = machine[:memory] |
| v.cpus = machine[:cpus] |
| end |
| |
| config.vm.network :private_network, ip: machine[:ip] |
| config.vm.provision "replace_dns", type: :shell, run: "always", inline: $replace_dns, args: host_ip |
| |
| if machine[:name] == 'devstack' |
| config.vm.synced_folder ".", synced_folder_main, type: "rsync", rsync__exclude: "Vagrantfile" |
| |
| config.vm.provision "enable_ipv6", type: :shell, run: "always", inline: $enable_ipv6 |
| config.vm.provision "setup_devstack", type: :shell, privileged: false, inline: $setup_devstack, args: os_config |
| end |
| |
| if machine[:name] == 'control' |
| config.vm.provision "customize_control", type: :shell, path: "tools/imported/openstack-k8s-controlnode.sh" |
| config.vm.provision "fix_groups_control", type: :shell, inline: $add_to_docker_group, args: vagrant_user |
| end |
| |
| if machine[:name] == 'worker' |
| config.vm.provision "customize_worker", type: :shell, path: "tools/imported/openstack-k8s-workernode.sh" |
| config.vm.provision "fix_group_worker", type: :shell, inline: $add_to_docker_group, args: vagrant_user |
| end |
| |
| if machine[:name] == 'operator' |
| config.vm.synced_folder ".", synced_folder_main, type: "rsync", rsync__exclude: ["Vagrantfile", "operator"] |
| config.vm.synced_folder "~/.ssh", "/home/#{vagrant_user}/.ssh", type: "rsync", rsync__exclude: "authorized_keys" |
| config.vm.synced_folder "./operator", "/home/#{vagrant_user}", type: "sshfs", reverse: true, sshfs_opts_append: "-o nonempty" |
| |
| config.vm.provision "setup_debconf", type: :shell, inline: $setup_debconf |
| config.vm.provision "link_apt_prefs", type: :shell, run: "always" do |s| |
| s.inline = $link_file |
| s.args = [synced_folder_tools_config, apt_prefs, apt_prefs_dir] |
| end |
| config.vm.provision "link_dotfiles_root", type: :shell, run: "always" do |s| |
| s.inline = $link_dotfiles |
| s.args = synced_folder_tools_config |
| end |
| config.vm.provision "link_dotfiles_user", type: :shell, run: "always" do |s| |
| s.privileged = false |
| s.inline = $link_dotfiles |
| s.args = synced_folder_tools_config |
| end |
| |
| config.vm.provision "install_sshpass", type: :shell, inline: $install_sshpass |
| config.vm.provision "generate_key", type: :shell, privileged: false, inline: $generate_key, args: operator_key |
| |
| ips = "" |
| cluster.each { |node| ips << node[:ip] << " " } |
| config.vm.provision "deploy_key", type: :shell do |s| |
| s.privileged = false |
| s.inline = $deploy_key |
| s.args = [operator_key, vagrant_user, ips] |
| s.env = {'PASSWORD': vagrant_password} |
| end |
| |
| config.vm.provision "get_rke", type: :shell, path: "tools/get_rke.sh" |
| config.vm.provision "link_cluster_yml", type: :shell, run: "always" do |s| |
| s.privileged = false |
| s.inline = $link_file |
| s.args = [synced_folder_config, cluster_yml, "$HOME"] |
| end |
| |
| config.vm.post_up_message = operation_post_msg |
| config.vm.provision "rke_up", type: :shell, run: "never", privileged: false, inline: $rke_up |
| config.trigger.before :destroy do |trigger| |
| trigger.warn = "Removing cluster" |
| trigger.run_remote = {privileged: false, inline: $rke_down} |
| end |
| |
| config.vm.provision "get_kubectl", type: :shell, path: "tools/get_kubectl.sh" |
| config.vm.provision "setup_kubectl", type: :shell, run: "never" do |s| |
| s.privileged = false |
| s.path = "tools/setup_kubectl.sh" |
| end |
| config.vm.provision "get_helm", type: :shell, path: "tools/get_helm.sh" |
| config.vm.provision "get_oom", type: :shell do |s| |
| s.privileged = false |
| s.inline = $get_oom |
| end |
| config.vm.provision "get_helm_plugins", type: :shell, privileged: false, inline: $get_helm_plugins |
| config.vm.provision "install_make", type: :shell, inline: $install_make |
| config.vm.provision "setup_helm_cluster", type: :shell, run: "never", privileged: false, inline: $setup_helm_cluster |
| config.vm.provision "setup_helm_repo", type: :shell, run: "never", privileged: false, inline: $setup_helm_repo |
| config.vm.provision "deploy_onap", type: :shell, run: "never" do |s| |
| s.privileged = false |
| s.inline = $deploy_onap |
| end |
| end |
| end |
| end |
| end |