Install JJB and shell scripts for offline installation
Change-Id: I9cd16a1bbc1932244d7fa5bd216e3116703623cb
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ef477dc
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+non-gerrit
+build/heat
diff --git a/build/jjb/1_build_stack_creation.yaml b/build/jjb/1_build_stack_creation.yaml
index f2f39d7..9272065 100644
--- a/build/jjb/1_build_stack_creation.yaml
+++ b/build/jjb/1_build_stack_creation.yaml
@@ -11,36 +11,64 @@
default: 'onap_offline_heat.tmpl'
description: 'Name of the heat template file'
- string:
+ name: 'openstack_rc'
+ default: 'openstack.rc'
+ description: 'Openstack RC file'
+ - string:
+ name: 'openstack_user'
+ description: 'Name of Openstack user'
+ - password:
+ name: 'openstack_pwd'
+ description: 'Openstack password'
+ - string:
name: 'openstack_build_stack_name'
default: 'onap_offline_auto_build'
description: 'Name of ONAP Offline build stack'
- string:
name: 'openstack_image'
- default: 'est-centos7-1901'
description: 'Openstack Image name for Offline Build VM'
- string:
- name: 'openstack_flavour'
- default: '24C-128GB-400GB'
- description: 'Openstack Flavour name for Offline Build VM'
+ name: 'openstack_flavor'
+ description: 'Openstack Flavor name for Offline Build VM'
+ - string:
+ name: 'openstack_auth_url'
+ description: 'Openstack authentication URL'
+ - string:
+ name: 'openstack_user_domain'
+ description: 'Openstack user domain name'
+ - string:
+ name: 'openstack_project_domain'
+ description: 'Openstack project domain'
+ - string:
+ name: 'openstack_region_name'
+ description: 'Openstack region name'
+ - string:
+ name: 'openstack_project_name'
+ description: 'Openstack project name'
+ - string:
+ name: 'openstack_tenant_name'
+ description: 'Openstack tenant name'
+ - string:
+ name: 'openstack_auth_version'
+ description: 'Openstack auth version'
+ - string:
+ name: 'openstack_identity_api_version'
+ default: '3'
+ description: 'Openstack identity API version'
- string:
name: 'openstack_network'
- default: 'onapnet-internal1'
description: 'Openstack Network name'
- string:
name: 'openstack_security_group'
- default: 'internal-sg-onap-offline'
description: 'Openstack Security Group'
- string:
name: 'openstack_ssh_key'
- default: 'offline_install_key'
description: 'Openstack SSH Key'
- string:
name: 'openstack_net_id'
- default: '11bc9b32-b581-48f7-a713-27deabcb6f1e'
description: 'Openstack Network ID'
- string:
name: 'openstack_net_subnet'
- default: 'onapnet-internal1-subnet-ipv4'
description: 'Openstack SubNetwork ID'
- string:
name: 'openstack_volume'
@@ -61,11 +89,11 @@
builders:
- shell: git clone "${offline_install_git_repo}" "${WORKSPACE}/."
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/1_build_stack_creation_1.sh
+ !include-raw: ${WORKSPACE}/build/scripts/1_build_stack_creation_1.sh
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/1_build_stack_creation_2.sh
+ !include-raw: ${WORKSPACE}/build/scripts/1_build_stack_creation_2.sh
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/1_build_stack_creation_3.sh
+ !include-raw: ${WORKSPACE}/build/scripts/1_build_stack_creation_3.sh
- build-name-setter:
template: '#${BUILD_NUMBER}'
macro: true
diff --git a/build/jjb/2_build_preparation.yaml b/build/jjb/2_build_preparation.yaml
index d3ca970..96a19ce 100644
--- a/build/jjb/2_build_preparation.yaml
+++ b/build/jjb/2_build_preparation.yaml
@@ -19,7 +19,7 @@
template: '#${BUILD_NUMBER}'
macro: true
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/2_build_preparation.sh
+ !include-raw: ${WORKSPACE}/build/scripts/2_build_preparation.sh
wrappers:
- timestamps
node: 'city-jumphost-onap-ubuntu1804'
diff --git a/build/jjb/3_build_download_artifacts.yaml b/build/jjb/3_build_download_artifacts.yaml
index e74ebe3..af4814c 100644
--- a/build/jjb/3_build_download_artifacts.yaml
+++ b/build/jjb/3_build_download_artifacts.yaml
@@ -25,6 +25,14 @@
default: '/tmp/resources'
description: 'Directory of resources'
- string:
+ name: 'source_registry'
+ default: 'registry.nordix.org/onap/online-install'
+ description: 'Source registry'
+ - string:
+ name: 'target_registry'
+ default: 'nexus3.onap.org:10001'
+ description: 'Target registry'
+ - string:
name: 'ssh_key'
default: ''
description: 'Key used to ssh onto the VM'
@@ -42,7 +50,7 @@
template: '#${BUILD_NUMBER}'
macro: true
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/3_build_download_artifacts.sh
+ !include-raw: ${WORKSPACE}/build/scripts/3_build_download_artifacts.sh
wrappers:
- timestamps
node: 'city-jumphost-onap-ubuntu1804'
diff --git a/build/jjb/4_build_nexus.yaml b/build/jjb/4_build_nexus.yaml
index bae4d05..469856b 100644
--- a/build/jjb/4_build_nexus.yaml
+++ b/build/jjb/4_build_nexus.yaml
@@ -29,7 +29,7 @@
template: '#${BUILD_NUMBER}'
macro: true
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/4_build_nexus.sh
+ !include-raw: ${WORKSPACE}/build/scripts/4_build_nexus.sh
wrappers:
- timestamps
node: 'city-jumphost-onap-ubuntu1804'
diff --git a/build/jjb/5_build_create_tarballs.yaml b/build/jjb/5_build_create_tarballs.yaml
index 3cf013a..c76bb85 100644
--- a/build/jjb/5_build_create_tarballs.yaml
+++ b/build/jjb/5_build_create_tarballs.yaml
@@ -25,10 +25,8 @@
template: '#${BUILD_NUMBER}'
macro: true
- shell:
- !include-raw: ${offline_install_git_repo}/build/scripts/5_build_create_package.sh
+ !include-raw: ${WORKSPACE}/build/scripts/5_build_create_package.sh
wrappers:
- timestamps
- publishers:
- - workspace-cleanup
node: 'city-jumphost-onap-ubuntu1804'
diff --git a/build/scripts/1_build_stack_creation_1.sh b/build/scripts/1_build_stack_creation_1.sh
old mode 100755
new mode 100644
index d8c3e02..d6a2f0c
--- a/build/scripts/1_build_stack_creation_1.sh
+++ b/build/scripts/1_build_stack_creation_1.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-# Create OpenStack heat environment file
+# Create OpenStack heat environment file
cat <<EOF > ${heat_environment}
parameters:
instance_name: ${openstack_build_stack_name}_instance
image_name: ${openstack_image}
- flavor_name: ${openstack_flavour}
+ flavor_name: ${openstack_flavor}
#network_name: ${openstack_network}
key: ${openstack_ssh_key}
public_net_id: ${openstack_net_id}
@@ -14,3 +14,17 @@
volume_name: ${openstack_volume}
volume_size: ${openstack_volume_size}
EOF
+
+# Create OpenStack RC file
+cat <<OPENSTACK > ${openstack_rc}
+export OS_USERNAME=${openstack_user}
+export OS_PASSWORD=${openstack_pwd}
+export OS_AUTH_URL=${openstack_auth_url}
+export OS_USER_DOMAIN_NAME=${openstack_user_domain}
+export OS_PROJECT_DOMAIN_NAME=${openstack_project_domain}
+export OS_REGION_NAME=${openstack_region_name}
+export OS_PROJECT_NAME=${openstack_project_name}
+export OS_TENANT_NAME=${openstack_tenant_name}
+export OS_AUTH_VERSION=${openstack_auth_version}
+export OS_IDENTITY_API_VERSION=${openstack_identity_api_version}
+OPENSTACK
diff --git a/build/scripts/1_build_stack_creation_2.sh b/build/scripts/1_build_stack_creation_2.sh
old mode 100755
new mode 100644
index c199779..1af8f4a
--- a/build/scripts/1_build_stack_creation_2.sh
+++ b/build/scripts/1_build_stack_creation_2.sh
@@ -63,11 +63,7 @@
networks:
- port: { get_resource: VMwithvolume_0_private_port }
user_data_format: RAW
- user_data:
- str_replace:
- params:
- __mount_dir__: "aa"
- template: |
+ user_data: |
#!/bin/bash
set -e
diff --git a/build/scripts/1_build_stack_creation_3.sh b/build/scripts/1_build_stack_creation_3.sh
old mode 100755
new mode 100644
index edac5b9..118fe0f
--- a/build/scripts/1_build_stack_creation_3.sh
+++ b/build/scripts/1_build_stack_creation_3.sh
@@ -26,7 +26,3 @@
echo
echo ${build_node_ip} > ${WORKSPACE}/build_node_ip.txt
cat "${WORKSPACE}/build_node_ip.txt"
-pwd
-
-deactivate
-
diff --git a/build/scripts/2_build_preparation.sh b/build/scripts/2_build_preparation.sh
old mode 100755
new mode 100644
index 87ac182..041c41d
--- a/build/scripts/2_build_preparation.sh
+++ b/build/scripts/2_build_preparation.sh
@@ -97,6 +97,3 @@
echo "Failed to install: $RESULT"
exit -1
fi
-
-deactivate
-
diff --git a/build/scripts/3_build_download_artifacts.sh b/build/scripts/3_build_download_artifacts.sh
old mode 100755
new mode 100644
index 1602477..f9c99a8
--- a/build/scripts/3_build_download_artifacts.sh
+++ b/build/scripts/3_build_download_artifacts.sh
@@ -5,6 +5,7 @@
### Setting up variables used in this script ###
build_node_ip=$(cat "${WORKSPACE}/build_node_ip.txt")
ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${build_node_ip}"
+onap_docker_images_dir="/tmp/resources/offline_data/docker_images_for_nexus"
${ssh_cmd} "${clone_oom_cmd}"
@@ -188,6 +189,9 @@
\"
"
+###TODO There is a script created in the offline installer to check rpms and create repo, but at the moment it is not usable, thus it is done manually###
+#${ssh_cmd} "sudo /tmp/onap-offline/build/create_repo.sh -d $(pwd)"
+
### RPM's for CentOS were changed, thus we need to remake the rpm.list file ###
${ssh_cmd} "sudo bash -c \"cat << EOF > ${build_dir}/data_lists/onap_rpm.list
#Renewed list of rpms
@@ -219,15 +223,22 @@
###Since we cannot login as root, and scripts presume you are a root user,need to set up correctly to be able to use helm ###
${ssh_cmd} "sudo ${build_dir}/download/download.py --http ${data_list_dir}/infra_bin_utils.list ${resources_dir}/downloads"
-${ssh_cmd} "sudo tar -xf ${resources_dir}/downloads/storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz linux-amd64/helm"
+${ssh_cmd} "sudo tar -xf ${resources_dir}/downloads/storage.googleapis.com/kubernetes-helm/*.tar.gz linux-amd64/helm"
${ssh_cmd} "sudo mv linux-amd64/helm /usr/local/bin/helm"
${ssh_cmd} "sudo sed -i -e '/secure_path/ s[=.*[&:/usr/local/bin[' /etc/sudoers"
${ssh_cmd} "sudo sed -i ${sed_cmd} ${build_dir}/creating_data/docker-images-collector.sh"
${ssh_cmd} "sudo ${build_dir}/creating_data/docker-images-collector.sh /tmp/oom/kubernetes/onap &>/dev/null"
+###*******Commands to incorporate Online PART 1*******###
+#${ssh_cmd} "sudo curl https://artifactory.nordix.org/artifactory/list/onap/online-install/master/latest/onap_docker_images.list -o ${onap_offline_dir}/onap_docker_images_online.list"
+#scp -o StrictHostKeychecking=no -i ${ssh_key} ${WORKSPACE}/build/scripts/docker_sha_downloader.py ${openstack_user}@${build_node_ip}:${onap_offline_dir}/docker_sha_downloader.py
+#${ssh_cmd} "sudo $(onap_offline_dir}/docker_sha_downloader.py -l onap_docker_images_online.list -d ${onap_docker_images_dir}"
+#${ssh_cmd} "sudo sed -i '/${target_registry}\/onap/d' ${data_list_dir}/onap_docker_images.list"
+
###Download Scripts run step by step instead of using a wrapper script###
${ssh_cmd} "sudo ${build_dir}/download/download.py \
--docker ${data_list_dir}/infra_docker_images.list ${resources_dir}/offline_data/docker_images_infra \
+--docker ${data_list_dir}/k8s_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
--docker ${data_list_dir}/rke_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
--docker ${data_list_dir}/onap_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus"
@@ -237,6 +248,7 @@
${ssh_cmd} "sudo sed -i '/registry.hub.docker.com\/onap\/multicloud\/framework-artifactbroker:1.4.2/d' ${build_dir}/data_lists/onap_docker_images.list"
${ssh_cmd} "sudo ${build_dir}/download/download.py \
--docker ${data_list_dir}/infra_docker_images.list ${resources_dir}/offline_data/docker_images_infra \
+ --docker ${data_list_dir}/k8s_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
--docker ${data_list_dir}/rke_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
--docker ${data_list_dir}/onap_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus"
if [ $? -ne 0 ]; then
@@ -245,28 +257,25 @@
fi
fi
-${ssh_cmd} "sudo ${build_dir}/download/download.py --git ${data_list_dir}/onap_git_repos.list ${resources_dir}/git-repo"
-if [ $? -ne 0 ]; then
- echo "Error downloading git repos"
- exit -1
-fi
-
-${ssh_cmd} "sudo ${build_dir}/download/download.py --npm ${data_list_dir}/onap_npm.list ${resources_dir}/offline_data/npm_tar"
-if [ $? -ne 0 ]; then
- echo "Error downloading npm files"
- exit -1
-fi
-
${ssh_cmd} "sudo ${build_dir}/download/download.py --rpm ${data_list_dir}/onap_rpm.list ${resources_dir}/pkg/rhel"
if [ $? -ne 0 ]; then
echo "Error downloading rpm files"
exit -1
fi
-${ssh_cmd} "sudo ${build_dir}/download/download.py --pypi ${data_list_dir}/onap_pip_packages.list ${resources_dir}/offline_data/pypi"
+###Need to create repo, as that is used in the deployment stage###
+${ssh_cmd} "sudo createrepo /tmp/resources/pkg/rhel"
if [ $? -ne 0 ]; then
- echo "Error downloading pypi files"
- exit -1
+ echo "Error creating repo"
+ exit -1
fi
-deactivate
+###*******Commands to incorporate Online PART 2 ( TO BE LEFT COMMENTED OUT FOR NIGHTLY PURPOSES ) *******###
+#${ssh_cmd} "sudo bash -c \"cat ${onap_offline_dir}/onap_docker_images_online.list | sed 's/#sha256.*//' > ${onap_offline_dir}/onap_docker_images_work.list\""
+#${ssh_cmd} "sudo sed -i 's/registry.nordix.org\/onap\/online-install\//nexus.onap.org:10001\//' ${onap_offline_dir}/onap_docker_images_work.list"
+#${ssh_cmd} "sudo bash -c \"cat ${onap_offline_dir}/onap_docker_images_work.list | sort >> ${data_list_dir}/onap_docker_images.list\""
+
+
+###SHOULD NOT BE NEEDED ANYMORE IF WE ARE DROPPING THE 4 DIGIT TAG, but will be kept just in case###
+#${ssh_cmd} "sudo sed -i 's/\.[0-9]*$//' ${onap_offline_dir}/onap_docker_images_work.list"
+#${ssh_cmd} "sudo sed -i 's/\.[0-9]*[[:blank:]]$//' ${onap_offline_dir}/onap_docker_images_work.list"
diff --git a/build/scripts/4_build_nexus.sh b/build/scripts/4_build_nexus.sh
old mode 100755
new mode 100644
index 23a9ee9..f8e162a
--- a/build/scripts/4_build_nexus.sh
+++ b/build/scripts/4_build_nexus.sh
@@ -36,6 +36,3 @@
deactivate
exit -1
fi
-
-deactivate
-
diff --git a/build/scripts/5_build_create_package.sh b/build/scripts/5_build_create_package.sh
old mode 100755
new mode 100644
index 7982d22..2724086
--- a/build/scripts/5_build_create_package.sh
+++ b/build/scripts/5_build_create_package.sh
@@ -53,6 +53,3 @@
echo "There is a custom package";
${ssh_cmd} "sudo bash -c \"mv /tmp/data/sw_packagecustom.tar /tmp/data/sw_package.tar\"";
fi
-
-deactivate
-
diff --git a/build/scripts/docker_sha_downloader.py b/build/scripts/docker_sha_downloader.py
new file mode 100644
index 0000000..dfe4ed7
--- /dev/null
+++ b/build/scripts/docker_sha_downloader.py
@@ -0,0 +1,124 @@
+#! /usr/bin/python2.7
+
+import argparse
+import os
+import re
+import subprocess
+
+# Parse command line arguments
+parser = argparse.ArgumentParser()
+parser.add_argument('-d', dest='directory', help='Directory location to store ONAP docker images')
+parser.add_argument('-l', dest='docker_images_list', help='List of ONAP docker images')
+parser.add_argument('-s', dest='source_registry', default=os.path.join("registry.nordix.org", "onap", "online-install"),
+ help='Source registry')
+parser.add_argument('-t', dest='target_registry', default="nexus3.onap.org:10001", help='Source registry')
+args = parser.parse_args()
+
+
+def get_image_list_variables(image_list):
+ variable_pattern = re.compile("^# @.*=(.*)$")
+ nordix_promotion_id = None
+ onap_integration_repo_sha = None
+ onap_oom_repo_sha = None
+ onap_offline_installer_repo_sha = None
+
+ with open(image_list) as f:
+ for line in f.readlines():
+ if line.startswith("# @NORDIX_PROMOTION_ID"):
+ matches = re.match(variable_pattern, line)
+ nordix_promotion_id = matches.group(0)
+ elif line.startswith("# @ONAP_INTEGRATION_REPO_SHA"):
+ matches = re.match(variable_pattern, line)
+ onap_integration_repo_sha = matches.group(0)
+ elif line.startswith("# @ONAP_OOM_REPO_SHA"):
+ matches = re.match(variable_pattern, line)
+ onap_oom_repo_sha = matches.group(0)
+ elif line.startswith("# @ONAP_OFFLINE_INSTALLER_REPO_SHA"):
+ matches = re.match(variable_pattern, line)
+ onap_offline_installer_repo_sha = matches.group(0)
+
+ print("NORDIX_PROMOTION_ID: {nordix_promo}\nONAP_INTEGRATION_REPO_SHA: {onap_int_repo}".format(
+ nordix_promo=nordix_promotion_id, onap_int_repo=onap_integration_repo_sha))
+ print("ONAP_OOM_REPO_SHA: {onap_oom_repo}\nONAP_OFFLINE_INSTALLER_REPO_SHA: {onap_offline_inst_repo}".format(
+ onap_oom_repo=onap_oom_repo_sha, onap_offline_inst_repo=onap_offline_installer_repo_sha))
+
+ return nordix_promotion_id, onap_integration_repo_sha, onap_oom_repo_sha, onap_offline_installer_repo_sha
+
+
+def get_image_tokens(image_list):
+ tokens = []
+ with open(image_list) as f:
+ for line in f.readlines():
+ if not line.startswith("#"):
+ pattern = re.compile("^(.*):([0-9.]*)\\s#(.*)$")
+ matches = re.match(pattern, line)
+ tokens.append(matches.groups())
+
+ return tokens
+
+
+def docker_save_images(tokens, directory, source, target):
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ for token in tokens:
+ docker_pull(token)
+ docker_tag(token, source, target)
+ docker_save(token, directory, source, target)
+ docker_remove_image(token)
+
+
+def docker_save(token, directory, source_reg, target_reg):
+ image, version, sha = token
+
+ print("Saving docker image: {image} (Version: {version})".format(image=image, version=version))
+
+ image_target = "{dir}{sep}{image}_{tag}.tar".format(dir=directory, sep=os.path.sep, image=image.replace(
+ source_reg, target_reg).replace(os.path.sep, "_").replace(":", "_"), tag=version)
+
+ if not os.path.exists(image_target):
+ try:
+ subprocess.check_output(['docker', 'save', '{image}:{tag}'.format(image=image, tag=version), '-o',
+ image_target])
+ except subprocess.CalledProcessError as e:
+ print("Error saving docker file ({file}): {error}".format(file=image_target, error=e.message))
+
+
+def docker_tag(token, source_reg, target_reg):
+ image, version, sha = token
+
+ target_image = image.replace(source_reg, target_reg)
+
+ print("Tagging docker image...\n\t/Source: {image} (SHA: {sha}, Version: {version}\n\tTarget: {target})".format(
+ image=image, sha=sha, version=version, target=target_image))
+ try:
+ subprocess.check_output(['docker', 'tag', '{image}@{tag}'.format(image=image, tag=sha),
+ '{image}:{tag}'.format(image=target_image, tag=version)])
+ except subprocess.CalledProcessError as e:
+ print("Error tagging docker image ({image}): {error}".format(image=image, error=e.message))
+
+
+def docker_remove_image(token):
+ image, version, sha = token
+
+ print("Removing docker image: {image}:{version})".format(image=image, version=version))
+ try:
+ subprocess.check_output(['docker', 'rmi', '{image}:{tag}'.format(image=image, tag=version)])
+ except subprocess.CalledProcessError as e:
+ print("Error tagging docker image ({image}): {error}".format(image=image, error=e.message))
+
+
+def docker_pull(token):
+ image, version, sha = token
+
+ print("Pulling docker image: {image} (Version: {version})".format(image=image, version=version))
+ try:
+ subprocess.check_output(['docker', 'pull', '{image}:{tag}'.format(image=image, tag=version)])
+ except subprocess.CalledProcessError as e:
+ print("Error pulling docker image ({image}): {error}".format(image=image, error=e.message))
+
+
+nordix_promotion_id, onap_integration_repo_sha, onap_oom_repo_sha, onap_offline_installer_repo_sha = \
+ get_image_list_variables(args.docker_images_list)
+image_tokens = get_image_tokens(args.docker_images_list)
+docker_save_images(image_tokens, args.directory, args.source_registry, args.target_registry)
diff --git a/install/jjb/1_install_stack_creation.yaml b/install/jjb/1_install_stack_creation.yaml
new file mode 100644
index 0000000..85809e3
--- /dev/null
+++ b/install/jjb/1_install_stack_creation.yaml
@@ -0,0 +1,60 @@
+- job:
+ name: '2.1_onap_offline_install_stack_creation'
+ description: 'This job creates a stack of 4 VMs and a volume for the infra node'
+ parameters:
+ - string:
+ name: 'heat_environment'
+ default: 'onap_offline_heat.env'
+ description: 'Name of the heat environment file'
+ - string:
+ name: 'heat_template'
+ default: 'onap_offline_heat.tmpl'
+ description: 'Name of the heat template file'
+ - string:
+ name: 'openstack_deploy_stack_name'
+ default: 'onap_offline_deploy_stack'
+ description: 'Name of ONAP Offline deploy stack'
+ - string:
+ name: 'instance_name'
+ default: 'onap_offline_deploy_instance'
+ description: 'Name of ONAP offline deploy instance'
+ - string:
+ name: 'openstack_image'
+ description: 'Openstack Image name - Tested on Centos7 (1901)'
+ - string:
+ name: 'openstack_flavor'
+ description: 'Openstack Flavor name'
+ - string:
+ name: 'openstack_network'
+ description: 'Openstack Network name'
+ - string:
+ name: 'openstack_ssh_key'
+ description: 'Openstack SSH Key'
+ - string:
+ name: 'openstack_net_id'
+ description: 'Openstack Network ID'
+ - string:
+ name: 'openstack_net_subnet'
+ description: 'Openstack SubNetwork ID'
+ - string:
+ name: 'offline_install_git_repo'
+ default: ''
+ description: 'Location of Offline Install script gerrit repository'
+ - string:
+ name: 'timeout'
+ default: 30
+ description: 'Default timeout'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell: git clone "${offline_install_git_repo}" "${WORKSPACE}/."
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/1_create_deploy_stack_1.sh
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/1_create_deploy_stack_2.sh
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/1_create_deploy_stack_3.sh
+ wrappers:
+ - timestamps
+ node: 'city-jumphost-onap-ubuntu1804'
diff --git a/install/jjb/2_install_preparation.yaml b/install/jjb/2_install_preparation.yaml
new file mode 100644
index 0000000..1b25f9f
--- /dev/null
+++ b/install/jjb/2_install_preparation.yaml
@@ -0,0 +1,27 @@
+- job:
+ name: '2.2_onap_offline_install_preparation'
+ description: 'This job deletes the existing offline installer directory (/tmp/package/onap-offline-installer)<br>
+ on the Build Server and re-extracts it to the same location.<br>
+ The reason for this is in case a new build has been created since the previous extraction.'
+ parameters:
+ - string:
+ name: 'instance_name'
+ default: 'onap_offline_deploy_instance'
+ description: 'Prefix name of the offline deploy instances'
+ - string:
+ name: 'openstack_user'
+ default: 'centos'
+ description: 'User of the VM, used when sshing'
+ - string:
+ name: 'ssh_key'
+ description: 'Key used to ssh onto the VM (Will need to specify once we know the details in Nordix)'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/2_preparation.sh
+ wrappers:
+ - timestamps
+ node: 'city-jumphost-onap-ubuntu1804'
+
diff --git a/install/jjb/3_install_configure_yaml.yaml b/install/jjb/3_install_configure_yaml.yaml
new file mode 100644
index 0000000..8c77fb5
--- /dev/null
+++ b/install/jjb/3_install_configure_yaml.yaml
@@ -0,0 +1,31 @@
+- job:
+ name: '2.3_onap_offline_install_configure_yaml'
+ description: 'This job concentrates on configuring the application_configuration.yml file and the hosts.yml file <br>
+ Firstly the app_config file and the hosts file are copied into the application directory <br>
+ The ONAP version and the resources directory gets updated inside the app_config.yml file <br>
+ The hosts file is then configured, this involves correcting the IP addresses and adding 3 kubernetes nodes to make up a 4 node cluster.'
+ parameters:
+ - string:
+ name: 'instance_name'
+ default: 'onap_offline_deploy_instance'
+ description: 'Name of the instance in the Cloud'
+ - string:
+ name: 'openstack_deploy_stack_name'
+ default: 'onap_offline_deploy_stack'
+ description: 'Name of the installation stack in the Cloud'
+ - string:
+ name: 'openstack_user'
+ default: 'centos'
+ description: 'User of the VM, used when SSHing'
+ - string:
+ name: 'ssh_key'
+ description: 'Key used to ssh onto the VM'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/3_configure_yaml.sh
+ wrappers:
+ - timestamps
+ node: 'city-jumphost-onap-ubuntu1804'
diff --git a/install/jjb/4_install_copy_ssh_keys.yaml b/install/jjb/4_install_copy_ssh_keys.yaml
new file mode 100644
index 0000000..5182353
--- /dev/null
+++ b/install/jjb/4_install_copy_ssh_keys.yaml
@@ -0,0 +1,31 @@
+- job:
+ name: '2.4_onap_offline_install_copy_ssh_keys'
+ description: 'Need to set up passwordless SSHing in order to run the ansible playbooks <br>
+ This job achieves that by creating a key in build VM and copying it over to the infra and worker VMs'
+ parameters:
+ - string:
+ name: 'instance_name'
+ default: 'onap_offline_deploy_instance'
+ description: 'This will need to be changed in nordix'
+ - string:
+ name: 'openstack_deploy_stack_name'
+ default: 'onap_offline_deploy_stack'
+ description: 'Name of the installation stack in the Cloud'
+ - string:
+ name: 'openstack_user'
+ default: 'centos'
+ description: 'User of the VM, used when SSHing'
+ - string:
+ name: 'ssh_key'
+ description: 'Key used to ssh onto the VM'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/4_copy_ssh_keys.sh
+ wrappers:
+ - timestamps
+ node: 'city-jumphost-onap-ubuntu1804'
+
+
diff --git a/install/jjb/5_install_deploy_onap.yaml b/install/jjb/5_install_deploy_onap.yaml
new file mode 100644
index 0000000..09f2c89
--- /dev/null
+++ b/install/jjb/5_install_deploy_onap.yaml
@@ -0,0 +1,23 @@
+- job:
+ name: '2.5_onap_offline_install_deploy'
+ description: 'This job deploys ONAP across the 4 node cluster <br>
+ The installation is done via the ansible playbooks.'
+ parameters:
+ - string:
+ name: 'openstack_user'
+ default: 'centos'
+ description: 'User of the VM, used when SSHing'
+ - string:
+ name: 'ssh_key'
+ description: 'Key used to ssh onto the VM (Will need to specify once we know the details in Nordix)'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/5_deploy.sh
+ wrappers:
+ - timestamps
+ node: 'city-jumphost-onap-ubuntu1804'
+
+
diff --git a/install/jjb/6_install_healthcheck_artifactory.yaml b/install/jjb/6_install_healthcheck_artifactory.yaml
new file mode 100644
index 0000000..24c3d3a
--- /dev/null
+++ b/install/jjb/6_install_healthcheck_artifactory.yaml
@@ -0,0 +1,42 @@
+- job:
+ name: '2.6_onap_offline_install_healthcheck_push_to_artifactory'
+ description: 'This job runs a healthcheck on the deployed onap after sleeping for a period of time, making sure onap is given enough time to be up.<br>
+ The healthcheck results are recorded and if they show a sufficiently healthy deployment we push the tar balls that we created in the build server'
+ parameters:
+ - string:
+ name: 'artifactory_push_dir'
+ default: 'https://artifactory.nordix.org/artifactory/onap/offline-install/El_Alto'
+ description: 'Then you need to add relating /package.tar in the results.sh script <br>
+ The directory should be changed, depending if you are pushing Dublin,El Alto or latest'
+ - string:
+ name: 'instance_name'
+ default: 'onap_offline_deploy_instance'
+ description: 'Instance name prefix'
+ - string:
+ name: 'openstack_deploy_stack_name'
+ default: 'onap_offline_deploy_stack'
+ description: 'Name of the installation stack in the Cloud'
+ - string:
+ name: 'openstack_user'
+ default: 'centos'
+ description: 'User of the VM, used when SSHing'
+ - string:
+ name: 'ssh_key'
+ description: 'Key used to ssh onto the VM (Will need to specify once we know the details in Nordix)'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw: ${WORKSPACE}/install/scripts/6_healthcheck_push_to_artifactory.sh
+ wrappers:
+ - timestamps
+ - credentials-binding:
+ - username-password-separated:
+ credential-id: nordixinfra-jjb-creds
+ username: NORDIX_ARM_USERNAME
+ password: NORDIX_ARM_TOKEN
+ publishers:
+ - workspace-cleanup
+ node: 'city-jumphost-onap-ubuntu1804'
+
diff --git a/install/scripts/1_create_deploy_stack_1.sh b/install/scripts/1_create_deploy_stack_1.sh
new file mode 100644
index 0000000..0b69a11
--- /dev/null
+++ b/install/scripts/1_create_deploy_stack_1.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Create OpenStack heat environment file
+
+cat <<EOF > ${heat_environment}
+parameters:
+ image_name: ${openstack_image}
+ flavor_name: ${openstack_flavor}
+ key: ${openstack_ssh_key}
+ network_id: ${openstack_net_id}
+EOF
diff --git a/install/scripts/1_create_deploy_stack_2.sh b/install/scripts/1_create_deploy_stack_2.sh
new file mode 100644
index 0000000..83c37c9
--- /dev/null
+++ b/install/scripts/1_create_deploy_stack_2.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+# Create OpenStack heat template file
+
+cat <<EOF > ${heat_template}
+heat_template_version: 2013-05-23
+description: >
+ Template to create multiple instances.
+
+parameters:
+ image_name:
+ type: string
+ description: Image used for servers
+ key:
+ type: string
+ description: Name of SSH key to connect to the servers
+ network_id:
+ type: string
+ description: ID of Network
+ flavor_name:
+ type: string
+ description: flavor used by the servers
+ constraints:
+ - custom_constraint: nova.flavor
+ volume_name:
+ type: string
+ default: onap_offline_infra_volume
+ volume_size:
+ type: number
+ default: 150
+
+resources:
+ instance_1:
+ type: OS::Nova::Server
+ properties:
+ flavor: { get_param: flavor_name }
+ name: "${instance_name}_infra_node"
+ image: { get_param: image_name }
+ key_name: { get_param: key }
+ networks:
+ - network: { get_param: network_id }
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -e
+
+ while [ ! -e /dev/vdb ]; do echo Waiting for volume /dev/sdb to attach; sleep 1; done
+
+ echo "Partitions not formated, format it as ext4"
+ # yes /dev/disk/by-id/, partprobe and hdparm show it is there, but no it is is not ready
+ sleep 1
+ mkfs.ext4 /dev/vdb
+ file -sL /dev/disk/by-id/*
+
+ mkdir -pv /opt
+ # mount on reboot
+ echo "/dev/vdb /opt ext4 defaults,nofail 0 0" >> /etc/fstab
+ # mount now
+ mount /opt
+
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ name: { get_param: volume_name }
+ size: { get_param: volume_size }
+
+ volume_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: cinder_volume }
+ instance_uuid: { get_resource: instance_1 }
+ mountpoint: /dev/vdb
+
+ instance_2:
+ type: OS::Nova::Server
+ properties:
+ name: "${instance_name}_worker_1"
+ flavor: { get_param: flavor_name }
+ image: { get_param: image_name }
+ key_name: { get_param: key }
+ networks:
+ - network: { get_param: network_id }
+
+ instance_3:
+ type: OS::Nova::Server
+ properties:
+ name: "${instance_name}_worker_2"
+ flavor: { get_param: flavor_name }
+ image: { get_param: image_name }
+ key_name: { get_param: key }
+ networks:
+ - network: { get_param: network_id }
+
+ instance_4:
+ type: OS::Nova::Server
+ properties:
+ name: "${instance_name}_worker_3"
+ flavor: { get_param: flavor_name }
+ image: { get_param: image_name }
+ key_name: { get_param: key }
+ networks:
+ - network: { get_param: network_id }
+
+EOF
diff --git a/install/scripts/1_create_deploy_stack_3.sh b/install/scripts/1_create_deploy_stack_3.sh
new file mode 100644
index 0000000..fefed5e
--- /dev/null
+++ b/install/scripts/1_create_deploy_stack_3.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -x
+
+### Deletes a stack if it exists already ###
+openstack stack show -c id "${openstack_deploy_stack_name}" && openstack stack delete -y --wait "${openstack_deploy_stack_name}"
+sleep "${timeout}"
+
+### Creates a stack with 4 VMs and a volume for infra VM ###
+openstack stack create -f yaml -e "${heat_environment}" -t "${heat_template}" --wait "${openstack_deploy_stack_name}"
+sleep "${timeout}"
+
+deploy_node_ips=$(openstack server list -c Name -c Networks -f value --name ${openstack_deploy_stack_name} | awk '{print $NF}')
+
+echo
+echo "========================================"
+echo "deploy node ips: ${deploy_node_ips}"
+echo "========================================"
+echo
+
diff --git a/install/scripts/2_preparation.sh b/install/scripts/2_preparation.sh
new file mode 100644
index 0000000..bb64bf7
--- /dev/null
+++ b/install/scripts/2_preparation.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -x
+
+### Setting up variables used in the script ###
+build_node_ip=$(cat "${WORKSPACE}/build_node_ip.txt")
+infra_node_ip=$(openstack server show ${instance_name}_infra_node -f value -c addresses | cut -d' ' -f2 )
+installer_dir="/root/onap-offline-installer"
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${build_node_ip}"
+
+### Pinging Infra server to see if its up ###
+for n in $(seq 1 40); do
+ timeout 1 ping -c 1 "${infra_node_ip}" > /dev/null 2>&1
+ if [[ $? -eq 0 ]] ; then
+ echo "Successful Ping"
+ n=40
+ break
+ else
+ echo "Failed to connect to "${infra_node_ip}" - Retrying in 10 seconds..."
+ if [[ n -eq 40 ]]; then
+ exit -1
+ else
+ sleep 10
+ fi
+ fi
+done
+
+### Removing the offline installer directory in case there was something already there and creating the directory anew ###
+${ssh_cmd} "sudo rm -rf ${installer_dir}"
+if [[ $? -ne 0 ]]; then
+ echo "Error cleaning up ${installer_dir}"
+ exit -1
+fi
+sleep 5
+
+${ssh_cmd} "sudo mkdir ${installer_dir}"
+if [[ $? -ne 0 ]]; then
+ echo "Failed to create installer directory"
+ exit -1
+fi
+sleep 5
+
+### Untar the software package into the offline installer directory ###
+${ssh_cmd} "sudo tar -C ${installer_dir} -xf /tmp/data/sw_package.tar"
+if [[ $? -ne 0 ]]; then
+ echo "Possible custom package, retrying untarring a custom software package"
+ ${ssh_cmd} "sudo tar -C ${installer_dir} -xf /tmp/data/sw_packagecustom.tar"
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to create installer directory"
+ exit -1
+ fi
+fi
diff --git a/install/scripts/3_configure_yaml.sh b/install/scripts/3_configure_yaml.sh
new file mode 100644
index 0000000..384c6e8
--- /dev/null
+++ b/install/scripts/3_configure_yaml.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+### Fetching ip adresses ###
+build_node_ip=$(cat "${WORKSPACE}/build_node_ip.txt")
+infra_node_ip=$(openstack server show ${instance_name}_infra_node -f value -c addresses | cut -d' ' -f2 )
+worker_1_ip=$(openstack server show ${instance_name}_worker_1 -f value -c addresses | cut -d' ' -f2 )
+worker_2_ip=$(openstack server show ${instance_name}_worker_2 -f value -c addresses | cut -d' ' -f2 )
+worker_3_ip=$(openstack server show ${instance_name}_worker_3 -f value -c addresses | cut -d' ' -f2 )
+
+### Setting variables used in this shell ###
+installer_dir="/root/onap-offline-installer"
+application_yaml="${installer_dir}/ansible/application/application_configuration.yml"
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${build_node_ip}"
+
+### Copying app config file ###
+${ssh_cmd} "if [[ -f ${application_yaml} ]];then sudo rm -f ${application_yaml}; fi; sudo cp /tmp/onap-offline/config/application_configuration.yml ${installer_dir}/ansible/application/"
+if [[ $? -ne 0 ]]; then
+ echo "Error moving app_yaml file: ${application_yaml}"
+ exit -1
+fi
+
+### Updating onap version in app config file ###
+${ssh_cmd} "sudo sed -i 's/offline.*resources\.tar/resources_package\.tar/' ${application_yaml}"
+if [[ $? -ne 0 ]]; then
+ echo "Error updating file: ${application_yaml}"
+ exit -1
+fi
+
+### Updating resouces dir in app config file ###
+${ssh_cmd} "sudo sed -i 's/\/data/\/tmp\/data\//g' ${application_yaml}"
+if [[ $? -ne 0 ]]; then
+ echo "Error updating file: ${application_yaml}"
+ exit -1
+fi
+
+### Need to add /usr/local/bin path to infra node for the installation in order to use helm and kubectl ###
+ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${infra_node_ip} "sudo sed -i -e '/secure_path/ s[=.*[&:/usr/local/bin[' /etc/sudoers"
+${ssh_cmd} "sudo rm -f ${installer_dir}/ansible/application/hosts.yml"
+${ssh_cmd} "sudo touch ${installer_dir}/ansible/application/hosts.yml"
+
+### Create new hosts file (if more worker nodes are to be added the jobs need to be changed according ot the amount of worker nodes) ###
+${ssh_cmd} "sudo bash -c \"echo '
+---
+# This group contains hosts with all resources (binaries, packages, etc.)
+# in tarball.
+all:
+ vars:
+ # this key is supposed to be generated during setup.yml playbook execution
+ # change it just when you have better one working for all nodes
+ ansible_ssh_private_key_file: /root/.ssh/offline_ssh_key
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+
+ children:
+ resources:
+ hosts:
+ resource-host:
+ ansible_host: \"${build_node_ip}\"
+
+ # This is group of hosts where nexus, nginx, dns and all other required
+ # services are running.
+ infrastructure:
+ hosts:
+ infrastructure-server:
+ ansible_host: \"${infra_node_ip}\"
+ #IP used for communication between infra and kubernetes nodes, must be specified.
+ cluster_ip: \"${infra_node_ip}\"
+
+ # This is group of hosts which are/will be part of Kubernetes cluster.
+ kubernetes:
+ children:
+ # This is a group of hosts containing kubernetes worker nodes.
+ kubernetes-node:
+ hosts:
+ kubernetes-node-1:
+ ansible_host: \"${worker_1_ip}\"
+ cluster_ip: \"${worker_1_ip}\"
+ kubernetes-node-2:
+ ansible_host: \"${worker_2_ip}\"
+ cluster_ip: \"${worker_2_ip}\"
+ kubernetes-node-3:
+ ansible_host: \"${worker_3_ip}\"
+ cluster_ip: \"${worker_3_ip}\"
+
+ # Group of hosts containing etcd cluster nodes.
+ # Defaults to infra.
+ kubernetes-etcd:
+ hosts:
+ infrastructure-server
+
+ # This is a group of hosts that are to be used as kubernetes control plane nodes.
+ # This means they host kubernetes api server, controller manager and scheduler.
+ # This example uses infra for this purpose, however note that any
+ # other host could be used including kubernetes nodes.
+ # cluster_ip needs to be set for hosts used as control planes.
+ kubernetes-control-plane:
+ hosts:
+ infrastructure-server
+
+ nfs-server:
+ hosts:
+ kubernetes-node-1
+' > ${installer_dir}/ansible/application/hosts.yml\"
+"
diff --git a/install/scripts/4_copy_ssh_keys.sh b/install/scripts/4_copy_ssh_keys.sh
new file mode 100644
index 0000000..c354e79
--- /dev/null
+++ b/install/scripts/4_copy_ssh_keys.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+###Fetching build node ip###
+build_node_ip=$(cat "${WORKSPACE}/build_node_ip.txt")
+
+###Set Variables###
+ssh_cmd_build_node="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${build_node_ip}"
+ssh_cmd_node="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}"
+
+remote_key="/root/.ssh/offline_ssh_key"
+authorized_keys="/root/.ssh/authorized_keys"
+identity_key="/root/.ssh/identity"
+installer_dir="/root/onap-offline-installer/ansible"
+
+### Fetching ip adresses ###
+infra_node_ip=$(openstack server show ${instance_name}_infra_node -f value -c addresses | cut -d' ' -f2 )
+worker_1_ip=$(openstack server show ${instance_name}_worker_1 -f value -c addresses | cut -d' ' -f2 )
+worker_2_ip=$(openstack server show ${instance_name}_worker_2 -f value -c addresses | cut -d' ' -f2 )
+worker_3_ip=$(openstack server show ${instance_name}_worker_3 -f value -c addresses | cut -d' ' -f2 )
+
+### Generating ssh keys to create correct ./ssh paths and authorized_keys file ###
+for node in ${infra_node_ip} ${worker_1_ip} ${worker_2_ip} ${worker_3_ip}; do
+ ${ssh_cmd_node}@${node} "sudo test -f ${authorized_keys}" || ${ssh_cmd_node}@${node} "sudo bash -c \"ssh-keygen -N \\\"\\\" -f ${identity_key} \""
+done
+
+### Generate_ssh key on the build node ###
+${ssh_cmd_build_node} "sudo test -f ${remote_key}.pub" || ${ssh_cmd_build_node} "sudo bash -c \"ssh-keygen -N \\\"\\\" -f ${remote_key} \""
+
+### Get remote key ###
+key_value=$(${ssh_cmd_build_node} "sudo cat ${remote_key}.pub")
+
+### Add key to build authorized_keys file ###
+${ssh_cmd_build_node} "sudo bash -c \"echo '${key_value}' >> ${authorized_keys} \""
+
+### Add the key to Infra and Worker VM's authorized_keys file ###
+for node in ${infra_node_ip} ${worker_1_ip} ${worker_2_ip} ${worker_3_ip}; do
+ ${ssh_cmd_node}@${node} "sudo bash -c \"echo '${key_value}' >> ${authorized_keys} \""
+done
+
+### Run the ssh setup.yml ###
+${ssh_cmd_build_node} "sudo ${installer_dir}/run_playbook.sh -i application/hosts.yml setup.yml"
+if [[ $? -ne 0 ]]; then
+ echo "Error: Failed to configure SSH Keys"
+ exit -1
+fi
diff --git a/install/scripts/5_deploy.sh b/install/scripts/5_deploy.sh
new file mode 100644
index 0000000..51f96ce
--- /dev/null
+++ b/install/scripts/5_deploy.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+set -x
+
+build_node_ip=$(cat "${WORKSPACE}/build_node_ip.txt")
+source_dir="/root/onap-offline-installer/ansible"
+hosts="application/hosts.yml"
+application_yaml="application/application_configuration.yml"
+site="site.yml"
+
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${build_node_ip}"
+playbook_cmd="sudo ${source_dir}/run_playbook.sh -i ${hosts} -e @${application_yaml} ${site}"
+
+${ssh_cmd} "${playbook_cmd}"
+if [[ $? -ne 0 ]]; then
+ #Currently offline installer deletes all repos, yet the oom introduced a need for another repo, thus need to add it and run the script again
+ ${ssh_cmd} "sudo helm repo add google_api https://kubernetes-charts.storage.googleapis.com/"
+ ${ssh_cmd} "sudo cp -R /tmp/onap-offline/patches/onap-patch-role ${source_dir}/roles/"
+ ${ssh_cmd} "sudo cp /tmp/onap-offline/patches/onap.patch ${source_dir}/application/onap.patch"
+ ${ssh_cmd} "${playbook_cmd}"
+ if [ $? -ne 0 ]; then
+ echo "Failed to install ONAP"
+ exit -1
+ fi
+fi
diff --git a/install/scripts/6_healthcheck_push_to_artifactory.sh b/install/scripts/6_healthcheck_push_to_artifactory.sh
new file mode 100644
index 0000000..fa85bff
--- /dev/null
+++ b/install/scripts/6_healthcheck_push_to_artifactory.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+### Fetching ip adresses ###
+infra_node_ip=$(openstack server show ${instance_name}_infra_node -f value -c addresses | cut -d' ' -f2 )
+build_node_ip=$(cat "${WORKSPACE}/build_node_ip.txt")
+
+### Setting up variables used in this script ###
+build_ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${build_node_ip}"
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${openstack_user}@${infra_node_ip}"
+
+### Cat-ing a script to run healthcheck and push to artifactory ###
+cat <<EOF> results.sh
+#!/bin/bash
+### Script to run the healthcheck and artifactory ###
+number_of_failed_tests=\$(grep '| FAIL |' /opt/onap/results.txt | grep -v "Testsuites" | grep -c .)
+number_of_successful_tests=\$(grep '| PASS |' /opt/onap/results.txt | grep -v "Testsuites" | grep -c .)
+total_number_of_tests=\$((\$number_of_failed_tests + \$number_of_successful_tests))
+echo "Total Number of Tests \$total_number_of_tests"
+
+percentage_of_passing=$(python -c "s=float($number_of_successful_tests); t=float($total_number_of_tests); print(s/t*100)")
+
+echo "***********************************************************"
+echo "Percentage of tests passing \$percentage_of_passing %"
+echo "************************************************************"
+
+echo "*******************************************************"
+echo "90 is currently the cutoff, this may change and other factors such as essential pods may also come to be a feature over time"
+echo "*******************************************************"
+is_percentage_over_cutoff=$(python -c "print($percentage_of_passing<=90)")
+
+if [[ \$is_percentage_over_cutoff -eq 1 ]]; then
+ echo "*******************************************************"
+ echo "There is less than 90% health, exiting"
+ echo "*******************************************************"
+ exit 1
+else
+ echo "*******************************************************"
+ echo "ONAP healthy enough, will proceed pushing tar files to artifactory"
+ echo "*******************************************************"
+ exit 0
+fi
+EOF
+
+### Need to sleep to make sure that pods are up after helm deploy finishes running, some pods may still be in init phase ###
+### At least one DCAE component seems to take longer than 10 minutes to be up ###
+sleep 900
+
+${ssh_cmd} "sudo bash -c \"/opt/onap/helm_charts/robot/ete-k8s.sh onap health > /opt/onap/results.txt\"" || echo "Health check has failed"
+
+### Removing the script in case it existed before hand and then copying it over and running it ###
+${ssh_cmd} "sudo rm -f /opt/onap/results.sh"
+scp -o StrictHostKeychecking=no -i ${ssh_key} results.sh ${openstack_user}@${infra_node_ip}:
+${ssh_cmd} "sudo mv results.sh /opt/onap/results.sh"
+${ssh_cmd} "sudo chmod +x /opt/onap/results.sh"
+${ssh_cmd} "sudo /opt/onap/results.sh"
+
+if [[ $? -eq 0 ]]; then
+ echo "******Pushing the software tar file*******"
+ ${build_ssh_cmd} "for i in {1..3}; do curl -u ${NORDIX_ARM_USERNAME}:${NORDIX_ARM_TOKEN} -T /tmp/data/sw_package.tar \"${artifactory_push_dir}/sw_package.tar\" && break || sleep 15; done"
+ echo "******Pushing the resources tar file*******"
+ ${build_ssh_cmd} "for i in {1..3}; do curl -u ${NORDIX_ARM_USERNAME}:${NORDIX_ARM_TOKEN} -T /tmp/data/resources_package.tar \"${artifactory_push_dir}/resources_package.tar\" && break || sleep 15; done"
+else
+ echo "*****The healthcheck did not meet the requirements and tar files will not be pushed to artifactory*****"
+ exit -1
+fi
diff --git a/project.yaml b/project.yaml
new file mode 100644
index 0000000..dc241f5
--- /dev/null
+++ b/project.yaml
@@ -0,0 +1,71 @@
+- job:
+ name: ONAP Build Tarball and Installation Job
+ description: 'This is a multijob that kicks off all the jobs to create the tarballs install the tarballs healthcheck them and push them to artifactory sequentially'
+ project-type: multijob
+ node: ONAP
+ wrappers:
+ - timestamps
+ builders:
+ - multijob:
+ name: Building The Tarball
+ condition: SUCCESSFUL
+ execution-type: SEQUENTIALLY
+ projects:
+ - name: 1.1_onap_offline_build_stack_creation
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 1.2_onap_offline_build_preparation
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 1.3_onap_offline_build_download_artifacts
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 1.4_onap_offline_build_nexus
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 1.5_onap_offline_build_create_packages
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - multijob:
+ name: ONAP Installation and Push to Artifactory
+ condition: SUCCESSFUL
+ execution-type: SEQUENTIALLY
+ projects:
+ - name: 2.1_onap_offline_install_stack_creation
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 2.2_onap_offline_install_preparation
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 2.3_onap_offline_install_configure_yaml
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 2.4_onap_offline_install_copy_ssh_keys
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 2.5_onap_offline_install_deploy
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ current-parameters: true
+ node-parameters: true
+ - name: 2.6_onap_offline_install_healthcheck_push_to_artifactory
+ kill-phase-on: FAILURE
+ abort-all-job: true
\ No newline at end of file