--- /dev/null
+- job:
+ name: '1.1_onap_offline_build_stack_creation'
+ description: 'This job creates a stack consisting of an instance and a volume attached to this instance to accommodate storage of the downloaded files and the created tar files'
+ node: onap-offline-ubuntu1804
+ parameters:
+ - string:
+ name: 'heat_environment'
+ default: 'onap_offline_heat.env'
+ description: 'Name of the heat environment file'
+ - string:
+ name: 'heat_template'
+ default: 'onap_offline_heat.tmpl'
+ description: 'Name of the heat template file'
+ - string:
+ name: 'openstack_rc'
+ default: $OPENRC_FILE
+ description: 'Openstack RC file'
+ - string:
+ name: 'remote_user'
+ default: 'centos'
+ description: 'User on target nodes'
+ - string:
+ name: 'openstack_build_stack_name'
+ default: 'onap_offline_auto_build'
+ description: 'Name of ONAP Offline build stack'
+ - string:
+ name: 'openstack_image'
+ default: 'est-centos7-1901'
+ description: 'Openstack Image name for Offline Build VM'
+ - string:
+ name: 'openstack_flavor'
+ default: '16C-32GB-500GB'
+ description: 'Openstack Flavor name for Offline Build VM'
+ - string:
+ name: 'openstack_network'
+ default: 'network.onap-offline'
+ description: 'Openstack Network name'
+ - string:
+ name: 'openstack_security_group'
+ default: 'sg.onap-offline'
+ description: 'Openstack Security Group'
+ - string:
+ name: 'openstack_ssh_key'
+ default: 'nordix-onap-offline-install'
+ description: 'Openstack SSH Key'
+ - string:
+ name: 'openstack_net_id'
+ default: 'network.onap-offline'
+ description: 'Openstack Network ID'
+ - string:
+ name: 'openstack_net_subnet'
+ default: 'network.onap-offline-subnet-ipv4'
+ description: 'Openstack SubNetwork ID'
+ - string:
+ name: 'openstack_volume'
+ default: 'onap_offline_build_volume'
+ description: 'Openstack Volume name'
+ - string:
+ name: 'openstack_volume_size'
+ default: '400'
+ description: 'Openstack Volume size (GB)'
+ - string:
+ name: 'offline_install_git_repo'
+ default: ''
+ description: 'Location of Offline Install script gerrit repository'
+ - string:
+ name: 'ssh_timeout'
+ default: '60'
+ description: 'SSH Timeout in seconds'
+ builders:
+ - shell:
+ !include-raw-escape: ../scripts/1_build_stack_creation_1.sh
+ - shell:
+ !include-raw-escape: ../scripts/1_build_stack_creation_2.sh
+ - shell:
+ !include-raw-escape: ../scripts/1_build_stack_creation_3.sh
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ wrappers:
+ - timestamps
+ - credentials-binding:
+ - file:
+ credential-id: 'est-jenkins-openrcfile-city-frankfurt-onap'
+ variable: OPENRC_FILE
--- /dev/null
+- job:
+ name: '1.2_onap_offline_build_preparation'
+ description: 'This job prepares the build server for the download of artifacts and creation of tarballs'
+ node: onap-offline-ubuntu1804
+ parameters:
+ - string:
+ name: offline_branch
+ default: master
+ description: 'oom/offline-installer git branch'
+ - string:
+ name: 'remote_user'
+ default: 'centos'
+ description: User of the VM, used when sshing
+ - string:
+ name: 'ssh_key'
+ default: '/home/ubuntu/.ssh/id_rsa'
+ description: 'Key used to ssh onto the VM (Will need to specify once we know the details in Nordix)'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw-escape: ../scripts/2_build_preparation.sh
+ wrappers:
+ - timestamps
--- /dev/null
+- job:
+ name: '1.3_onap_offline_build_download_artifacts'
+ description: 'This job creates a data list based on OOM helm charts. Create a script for download of the packages.'
+ node: onap-offline-ubuntu1804
+ parameters:
+ - string:
+ name: clone_oom_cmd
+ default: "sudo git clone -b master https://gerrit.onap.org/r/oom --recurse-submodules /tmp/oom"
+ description: 'Change branch (-b) to whichever branch you want to pull -dublin -master'
+ - string:
+ name: 'data_list_dir'
+ default: '/tmp/onap-offline/build/data_lists'
+ description: 'Directory of the data_lists'
+ - string:
+ name: 'remote_user'
+ default: 'centos'
+ description: User of the VM, used when sshing
+ - string:
+ name: 'resources_dir'
+ default: '/tmp/resources'
+ description: 'Directory of resources'
+ - string:
+ name: 'ssh_key'
+ default: '/home/ubuntu/.ssh/id_rsa'
+ description: 'Key used to ssh onto the VM'
+ - string:
+ name: 'sed_cmd'
+ default: s/exit 0/echo -e '\''another line'\'' \nexit 0
+ description: 'Adding another echo before the end of the script as the script would hang on exit 0'
+ - string:
+ name: 'build_dir'
+ default: '/tmp/onap-offline/build'
+ description: 'Build directory'
+
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw-escape: ../scripts/3_build_download_artifacts.sh
+ wrappers:
+ - timestamps
--- /dev/null
+- job:
+ name: '1.4_onap_offline_build_nexus'
+ description: 'This job takes in the downloaded images and builds the nexus blob. After nexus blob is built it deletes the images that were
+ downloaded during job 3 in order to free up space and allow for a correct creation of resource tar ball.'
+ node: onap-offline-ubuntu1804
+ parameters:
+ - string:
+ name: 'remote_user'
+ default: 'centos'
+ description: User of the VM, used when sshing
+ - string:
+ name: 'ssh_key'
+ default: '/home/ubuntu/.ssh/id_rsa'
+ description: 'Key used to ssh onto the VM'
+ - string:
+ name: 'data_list_dir'
+ default: '/tmp/onap-offline/build/data_lists'
+ description: 'Directory of the data_lists'
+ - string:
+ name: 'resources_dir'
+ default: '/tmp/resources'
+ description: 'Directory of resources'
+ - string:
+ name: 'build_dir'
+ default: '/tmp/onap-offline/build'
+ description: 'Build directory'
+
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw-escape: ../scripts/4_build_nexus.sh
+ wrappers:
+ - timestamps
--- /dev/null
+- job:
+ name: '1.5_onap_offline_build_create_packages'
+ description: 'This job creates the tar files which will be used for the installation of ONAP.'
+ node: onap-offline-ubuntu1804
+ parameters:
+ - string:
+ name: 'remote_user'
+ default: 'centos'
+ description: User of the VM, used when sshing
+ - string:
+ name: 'package_oom_cmd'
+ default: 'sudo /tmp/onap-offline/build/package.py https://gerrit.onap.org/r/oom --application-repository_reference master'
+ description: 'Change --application-repository_reference to whichever oom branch you want to use
+ -master
+ -dublin'
+ - string:
+ name: 'ssh_key'
+ default: '/home/ubuntu/.ssh/id_rsa'
+ description: 'Key used to ssh onto the VM'
+ - string:
+ name: 'resources_dir'
+ default: '/tmp/resources'
+ description: 'Directory of resources'
+ builders:
+ - build-name-setter:
+ template: '#${BUILD_NUMBER}'
+ macro: true
+ - shell:
+ !include-raw-escape: ../scripts/5_build_create_package.sh
+ wrappers:
+ - timestamps
--- /dev/null
+#!/bin/bash
+
+# Create OpenStack heat environment file
+cat <<EOF > ${heat_environment}
+parameters:
+ instance_name: ${openstack_build_stack_name}_instance
+ image_name: ${openstack_image}
+ flavor_name: ${openstack_flavor}
+ #network_name: ${openstack_network}
+ key: ${openstack_ssh_key}
+ public_net_id: ${openstack_net_id}
+ public_net_subnet: ${openstack_net_subnet}
+ onap_sg: ${openstack_security_group}
+ volume_name: ${openstack_volume}
+ volume_size: ${openstack_volume_size}
+EOF
--- /dev/null
+#!/bin/bash
+# Create OpenStack heat template file
+
+cat <<EOF > ${heat_template}
+heat_template_version: 2013-05-23
+
+description: Simple template to deploy a single compute instance
+
+parameters:
+ instance_name:
+ type: string
+ label: Instance Name
+ description: Name of the instance to be used
+ key:
+ type: string
+ label: Key Name
+ description: Name of key-pair to be used for compute instance
+ image_name:
+ type: string
+ label: Image ID
+ description: Image to be used for compute instance
+ flavor_name:
+ type: string
+ label: Instance Type
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Netid
+ description: public NetId
+ public_net_subnet:
+ type: string
+ label: subnet
+ description: public subnet NetId
+ onap_sg:
+ type: string
+ default: default
+ volume_name:
+ type: string
+ default: onap_offline_build_volume
+ volume_size:
+ type: number
+ default: 400
+
+
+resources:
+ VMwithvolume_0_private_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: public_net_id }
+ fixed_ips: [{"subnet": { get_param: public_net_subnet }}]
+ security_groups:
+ - { get_param: onap_sg }
+
+
+ VMwithvolume_0:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_name }
+ availability_zone: nova
+ key_name: { get_param: key }
+ image: { get_param: image_name }
+ flavor: { get_param: flavor_name }
+ networks:
+ - port: { get_resource: VMwithvolume_0_private_port }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __mount_dir__: "aa"
+ template: |
+ #!/bin/bash
+ set -e
+
+
+ while [ ! -e /dev/vdb ]; do echo Waiting for volume /dev/sdb to attach; sleep 1; done
+
+
+
+ echo "Partitions not formated, format it as ext4"
+ # yes /dev/disk/by-id/, partprobe and hdparm show it is there, but no it is is not ready
+ sleep 1
+ mkfs.ext4 /dev/vdb
+ file -sL /dev/disk/by-id/*
+
+ mkdir -pv /tmp
+ # mount on reboot
+ echo "/dev/vdb /tmp ext4 defaults,nofail 0 0" >> /etc/fstab
+ # mount now
+ mount /tmp
+
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ name: { get_param: volume_name }
+ size: { get_param: volume_size }
+
+ volume_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: cinder_volume }
+ instance_uuid: { get_resource: VMwithvolume_0 }
+ mountpoint: /dev/vdb
+EOF
--- /dev/null
+#!/bin/bash
+
+set -x
+
+# Delete the stack if exists
+openstack stack show -c id ${openstack_build_stack_name} && openstack stack delete -y --wait ${openstack_build_stack_name}
+sleep 1
+# Create the stack
+openstack stack create -f yaml -e ${heat_environment} -t ${heat_template} --wait ${openstack_build_stack_name}
+sleep ${ssh_timeout}
+
+#TODO: Check if need to remove reference to "om_ran" in line below [eronkeo: 10-10-2019] ** DONE **
+# build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${openstack_build_stack_name} | awk '{print $NF}' | sed 's/om_ran=//g')
+build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${openstack_build_stack_name} | awk '{print $NF}' | sed 's/.*=//g')
+
+### Checking if the VM is up, if not retry till it is and continue with the script ###
+for n in $(seq 1 40); do
+ timeout 1 ping -c 1 "${build_node_ip}" > /dev/null 2>&1
+ RESULT=$?
+ if [ "$RESULT" -eq "0" ] ; then
+ echo "RESULT: $RESULT"
+ n=40
+ break
+ else
+ echo "Failed to connect to "${build_node_ip}". RESULT: $RESULT - Retrying in 10 seconds..."
+ sleep 10
+ fi
+done
+
+echo
+echo "========================================"
+echo "build node ip: ${build_node_ip}"
+echo "========================================"
+echo
--- /dev/null
+#!/bin/bash
+
+set -x
+
+### Some other ways to get build_node_ip in case you don't want to make a file on Jenkins server ###
+#build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${stack_name} | awk '{print $NF}')
+#build_node_ip=$(openstack server show ${stack_name} -f value -c addresses | cut -d' ' -f2 )
+
+### Setting variables used in this script ###
+build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${openstack_build_stack_name} | awk '{print $NF}' | sed 's/.*=//g')
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${remote_user}@${build_node_ip}"
+docker_repo="https://download.docker.com/linux/centos/docker-ce.repo"
+
+####################################
+### PREPARATIONS FOR CENTOS ONLY ###
+####################################
+
+### Required by special centos docker recommended by ONAP ###
+RESULT=$(${ssh_cmd} "sudo yum-config-manager --add-repo ${docker_repo}")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install add repo successfully: $RESULT"
+ exit -1
+fi
+
+### Enable epel repo for npm and jq ###
+RESULT=$(${ssh_cmd} "sudo yum install -y epel-release")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install epel-release successfully: $RESULT"
+ exit -1
+fi
+
+### Install following packages ###
+RESULT=$(${ssh_cmd} "sudo yum install -y docker-ce-18.09.5 python-pip git createrepo expect nodejs npm jq")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install packages successfully: $RESULT"
+ exit -1
+fi
+
+### Offline Installer is using python3 now, thus need to install python3 and pip3 ###
+RESULT=$(${ssh_cmd} "sudo yum install -y python36 python36-pip")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install packages: $RESULT"
+ exit -1
+fi
+
+### Install Twine, specific version 1.15.0 used, because new version 2.0.0 breaks the installation ###
+RESULT=$(${ssh_cmd} "sudo pip install twine==1.15.0")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to pip install twine: $RESULT"
+ exit -1
+fi
+
+### Starting Docker Service ###
+RESULT=$(${ssh_cmd} "sudo service docker start")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to pip install twine: $RESULT"
+ exit -1
+fi
+
+### Clone the OFfline Installer repo ###
+RESULT=$(${ssh_cmd} " sudo git clone https://gerrit.onap.org/r/oom/offline-installer /tmp/onap-offline")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to clone onap/offline/install repo: $RESULT"
+ exit -1
+fi
+sleep 5
+
+### Install the required python packages ###
+RESULT=$(${ssh_cmd} "sudo pip3 install -r /tmp/onap-offline/build/requirements.txt")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install: $RESULT"
+ exit -1
+fi
+RESULT=$(${ssh_cmd} "sudo pip3 install -r /tmp/onap-offline/build/download/requirements.txt")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install: $RESULT"
+ exit -1
+fi
--- /dev/null
+#!/bin/bash
+
+set -x
+
+### Setting up variables used in this script ###
+build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${openstack_build_stack_name} | awk '{print $NF}' | sed 's/.*=//g')
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${remote_user}@${build_node_ip}"
+onap_docker_images_dir="/tmp/resources/offline_data/docker_images_for_nexus"
+
+${ssh_cmd} "${clone_oom_cmd}"
+
+### Changing onap values.yaml to include all the components of onap, as by default all are set to false and then the data_list creator script ###
+### would not work as intended ###
+
+#TODO: Is this the best way to do this? The values.yaml could change when new components are added and static content will not mirror this. [eronkeo: 10-10-2019]
+${ssh_cmd} "sudo bash -c \"cat << EOF > /tmp/oom/kubernetes/onap/values.yaml
+# Copyright © 2019 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration overrides.
+#
+# These overrides will affect all helm charts (ie. applications)
+# that are listed below and are 'enabled'.
+#################################################################
+global:
+ # Change to an unused port prefix range to prevent port conflicts
+ # with other instances running within the same k8s cluster
+ nodePortPrefix: 302
+ nodePortPrefixExt: 304
+
+ # ONAP Repository
+ # Uncomment the following to enable the use of a single docker
+ # repository but ONLY if your repository mirrors all ONAP
+ # docker images. This includes all images from dockerhub and
+ # any other repository that hosts images for ONAP components.
+ #repository: nexus3.onap.org:10001
+ repositoryCred:
+ user: docker
+ password: docker
+
+ # readiness check - temporary repo until images migrated to nexus3
+ readinessRepository: oomk8s
+ # logging agent - temporary repo until images migrated to nexus3
+ loggingRepository: docker.elastic.co
+
+ # image pull policy
+ pullPolicy: Always
+
+ # default mount path root directory referenced
+ # by persistent volumes and log files
+ persistence:
+ mountPath: /dockerdata-nfs
+ enableDefaultStorageclass: false
+ parameters: {}
+ storageclassProvisioner: kubernetes.io/no-provisioner
+ volumeReclaimPolicy: Retain
+
+ # override default resource limit flavor for all charts
+ # flag to enable debugging - application support required
+ debugEnabled: false
+
+#################################################################
+# Enable/disable and configure helm charts (ie. applications)
+# to customize the ONAP deployment.
+#################################################################
+aaf:
+ enabled: true
+aai:
+ enabled: true
+appc:
+ enabled: true
+ config:
+ openStackType: OpenStackProvider
+ openStackName: OpenStack
+ openStackKeyStoneUrl: http://localhost:8181/apidoc/explorer/index.html
+ openStackServiceTenantName: default
+ openStackDomain: default
+ openStackUserName: admin
+ openStackEncryptedPassword: admin
+cassandra:
+ enabled: true
+clamp:
+ enabled: true
+cli:
+ enabled: true
+consul:
+ enabled: true
+contrib:
+ enabled: true
+dcaegen2:
+ enabled: true
+#Pnda site is down and can't pull any version of pnda at the moment
+pnda:
+ enabled: false
+dmaap:
+ enabled: true
+esr:
+ enabled: true
+log:
+ enabled: true
+sniro-emulator:
+ enabled: true
+oof:
+ enabled: true
+mariadb-galera:
+ enabled: true
+msb:
+ enabled: true
+multicloud:
+ enabled: true
+nbi:
+ enabled: true
+ config:
+ # openstack configuration
+ openStackRegion: "Yolo"
+ openStackVNFTenantId: "1234"
+nfs-provisioner:
+ enabled: true
+policy:
+ enabled: true
+pomba:
+ enabled: true
+portal:
+ enabled: true
+robot:
+ enabled: true
+ config:
+ # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
+ openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+sdc:
+ enabled: true
+sdnc:
+ enabled: true
+
+ replicaCount: 1
+
+ mysql:
+ replicaCount: 1
+so:
+ enabled: true
+
+ replicaCount: 1
+
+ liveness:
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+ # so server configuration
+ config:
+ # message router configuration
+ dmaapTopic: "AUTO"
+ # openstack configuration
+ openStackUserName: "vnf_user"
+ openStackRegion: "RegionOne"
+ openStackKeyStoneUrl: "http://1.2.3.4:5000"
+ openStackServiceTenantName: "service"
+ openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+
+ # configure embedded mariadb
+ mariadb:
+ config:
+ mariadbRootPassword: password
+uui:
+ enabled: true
+vfc:
+ enabled: true
+vid:
+ enabled: true
+vnfsdk:
+ enabled: true
+modeling:
+ enabled: true
+nginx-ingress:
+ enabled: true
+EOF
+\"
+"
+
+###TODO There is a script created in the offline installer to check rpms and create repo, but at the moment it is not usable, thus it is done manually###
+#${ssh_cmd} "sudo /tmp/onap-offline/build/create_repo.sh -d $(pwd)"
+
+### RPM's for CentOS were changed, thus we need to remake the rpm.list file ###
+${ssh_cmd} "sudo bash -c \"cat << EOF > ${build_dir}/data_lists/onap_rpm.list
+#Renewed list of rpms
+containerd.io-1.2.5-3.1.el7.x86_64
+container-selinux-1.12.5-14.el7.x86_64
+container-selinux-2.107-3.el7.noarch
+docker-ce-18.09.5-3.el7.x86_64
+docker-ce-cli-18.09.6-3.el7.x86_64
+gssproxy-0.7.0-26.el7.x86_64
+keyutils-1.5.8-3.el7.x86_64
+libbasicobjects-0.1.1-32.el7.x86_64
+libcollection-0.7.0-32.el7.x86_64
+libevent-2.0.21-4.el7.x86_64
+libini_config-1.3.1-32.el7.x86_64
+libnfsidmap-0.25-19.el7.x86_64
+libpath_utils-0.2.1-32.el7.x86_64
+libref_array-0.1.5-32.el7.x86_64
+libverto-libevent-0.2.5-4.el7.x86_64
+nfs-utils-1.3.0-0.65.el7.x86_64
+python-docker-py-1.10.6-9.el7_6.noarch
+python-docker-pycreds-0.3.0-9.el7_6.noarch
+python-ipaddress-1.0.16-2.el7.noarch
+python-jsonpointer-1.9-2.el7.noarch
+python-websocket-client-0.56.0-3.git3c25814.el7.noarch
+EOF\"
+"
+
+###Need to download and install helm in order to populate data_list according to oom helm charts###
+###Since we cannot login as root, and scripts presume you are a root user,need to set up correctly to be able to use helm ###
+
+${ssh_cmd} "sudo ${build_dir}/download/download.py --http ${data_list_dir}/infra_bin_utils.list ${resources_dir}/downloads"
+${ssh_cmd} "sudo tar -xf ${resources_dir}/downloads/storage.googleapis.com/kubernetes-helm/*.tar.gz linux-amd64/helm"
+${ssh_cmd} "sudo mv linux-amd64/helm /usr/local/bin/helm"
+${ssh_cmd} "sudo sed -i -e '/secure_path/ s[=.*[&:/usr/local/bin[' /etc/sudoers"
+${ssh_cmd} "sudo sed -i ${sed_cmd} ${build_dir}/creating_data/docker-images-collector.sh"
+${ssh_cmd} "sudo ${build_dir}/creating_data/docker-images-collector.sh /tmp/oom/kubernetes/onap &>/dev/null"
+
+###*******Commands to incorporate Online PART 1*******###
+#${ssh_cmd} "sudo curl https://artifactory.nordix.org/artifactory/list/onap/online-install/master/latest/onap_docker_images.list -o ${onap_offline_dir}/onap_docker_images_online.list"
+#scp -o StrictHostKeychecking=no -i ${ssh_key} ${WORKSPACE}/build/scripts/docker_sha_downloader.py ${openstack_user}@${build_node_ip}:${onap_offline_dir}/docker_sha_downloader.py
+#${ssh_cmd} "sudo $(onap_offline_dir}/docker_sha_downloader.py -l onap_docker_images_online.list -d ${onap_docker_images_dir}"
+#${ssh_cmd} "sudo sed -i '/nexus3.onap.org:10001\/onap/d' ${data_list_dir}/onap_docker_images.list"
+
+###Download Scripts run step by step instead of using a wrapper script###
+${ssh_cmd} "sudo ${build_dir}/download/download.py \
+--docker ${data_list_dir}/infra_docker_images.list ${resources_dir}/offline_data/docker_images_infra \
+--docker ${data_list_dir}/k8s_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
+--docker ${data_list_dir}/rke_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
+--docker ${data_list_dir}/onap_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus"
+
+if [ $? -ne 0 ]; then
+ echo "Error downloading docker images: Retrying"
+ #There is a problem with the collector making a list with the same image being pulled twice, but once from a bad repo, thus it needs to be removed
+ ${ssh_cmd} "sudo sed -i '/registry.hub.docker.com\/onap\/multicloud\/framework-artifactbroker:1.4.2/d' ${build_dir}/data_lists/onap_docker_images.list"
+ ${ssh_cmd} "sudo ${build_dir}/download/download.py \
+ --docker ${data_list_dir}/infra_docker_images.list ${resources_dir}/offline_data/docker_images_infra \
+ --docker ${data_list_dir}/k8s_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
+ --docker ${data_list_dir}/rke_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus \
+ --docker ${data_list_dir}/onap_docker_images.list ${resources_dir}/offline_data/docker_images_for_nexus"
+ if [ $? -ne 0 ]; then
+ echo "Error downloading docker images -----RETRY FAILED-----"
+ exit -1
+ fi
+fi
+
+${ssh_cmd} "sudo ${build_dir}/download/download.py --rpm ${data_list_dir}/onap_rpm.list ${resources_dir}/pkg/rhel"
+if [ $? -ne 0 ]; then
+ echo "Error downloading rpm files"
+ exit -1
+fi
+
+###Need to create repo, as that is used in the deployment stage###
+${ssh_cmd} "sudo createrepo /tmp/resources/pkg/rhel"
+if [ $? -ne 0 ]; then
+ echo "Error creating repo"
+ exit -1
+fi
+
+###*******Commands to incorporate Online PART 2 ( TO BE LEFT COMMENTED OUT FOR NIGHTLY PURPOSES ) *******###
+#${ssh_cmd} "sudo bash -c \"cat ${onap_offline_dir}/onap_docker_images_online.list | sed 's/#sha256.*//' > ${onap_offline_dir}/onap_docker_images_work.list\""
+#${ssh_cmd} "sudo sed -i 's/registry.nordix.org\/onap\/online-install\//nexus.onap.org:10001\//' ${onap_offline_dir}/onap_docker_images_work.list"
+#${ssh_cmd} "sudo bash -c \"cat ${onap_offline_dir}/onap_docker_images_work.list | sort >> ${data_list_dir}/onap_docker_images.list\""
+
+
+###SHOULD NOT BE NEEDED ANYMORE IF WE ARE DROPPING THE 4 DIGIT TAG, but will be kept just in case###
+#${ssh_cmd} "sudo sed -i 's/\.[0-9]*$//' ${onap_offline_dir}/onap_docker_images_work.list"
+#${ssh_cmd} "sudo sed -i 's/\.[0-9]*[[:blank:]]$//' ${onap_offline_dir}/onap_docker_images_work.list"
--- /dev/null
+#!/bin/bash
+
+set -x
+
+### Setting up variables used in the script ###
+build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${openstack_build_stack_name} | awk '{print $NF}' | sed 's/.*=//g')
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${remote_user}@${build_node_ip}"
+
+### Concat'ing the onap and rke images lists ###
+RESULT=$(${ssh_cmd} "sudo bash -c \"cat ${data_list_dir}/rke_docker_images.list >> ${data_list_dir}/onap_docker_images.list\"")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to concat the lists: ${RESULT}"
+ exit -1
+fi
+
+### Building the nexus blob ###
+RESULT=$(${ssh_cmd} "sudo ${build_dir}/build_nexus_blob.sh")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to build nexus blob: ${RESULT}"
+ exit -1
+fi
+
+### Deleting docker images and npm files in order to free up space on the server ###
+RESULT=$(${ssh_cmd} "sudo rm -f ${resources_dir}/offline_data/docker_images_for_nexus/*")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to cleanup nexus docker images: ${RESULT}"
+ exit -1
+fi
+
+RESULT=$(${ssh_cmd} "sudo rm -rf ${resources_dir}/offline_data/npm_tar")
+if [[ $? -ne 0 ]]; then
+ echo "Failed to cleanup npm directory: ${RESULT}"
+ exit -1
+fi
--- /dev/null
+#!/bin/bash
+
+set -x
+
+### Setting up variables used in this script ###
+build_node_ip=$(openstack server list -c Name -c Networks -f value | egrep ${openstack_build_stack_name} | awk '{print $NF}' | sed 's/.*=//g')
+ssh_cmd="ssh -o StrictHostKeychecking=no -i ${ssh_key} ${remote_user}@${build_node_ip}"
+
+### Tarring up the files refer to package_oom_cmd for oom version used ###
+RESULT=$(${ssh_cmd} "${package_oom_cmd} --output-dir /tmp/data --resources-directory ${resources_dir}")
+if [[ $? -ne 0 ]]; then
+ ### /tmp/data might be already used, thus clean it up and restart again ###
+ ${ssh_cmd} "sudo bash -c \"rm -rf /tmp/data\""
+ ${ssh_cmd} "${package_oom_cmd} --output-dir /tmp/data --resources-directory ${resources_dir}"
+ if [[ $? -ne 0 ]]; then
+ echo "failed to successfully run package.py"
+ exit -1
+ fi
+fi
+
+### Since the values.yaml has pnda set to false (due to issues with the website) the package created contains a different name than used usually ###
+### Need to change the name of the tar files ###
+
+${ssh_cmd} "sudo bash -c \"test -f /tmp/data/resources_packagecustom.tar\""
+if [[ $? -eq 0 ]]; then
+ echo "There is a custom package";
+ ${ssh_cmd} "sudo bash -c \"mv /tmp/data/resources_packagecustom.tar /tmp/data/resources_package.tar\"";
+fi
+
+${ssh_cmd} "sudo bash -c \"test -f /tmp/data/sw_packagecustom.tar\""
+if [[ $? -eq 0 ]]; then
+ echo "There is a custom package";
+ ${ssh_cmd} "sudo bash -c \"mv /tmp/data/sw_packagecustom.tar /tmp/data/sw_package.tar\"";
+fi
--- /dev/null
+import argparse
+import os
+import re
+import subprocess
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument('-d', action='store', dest='directory', help='Directory location to store ONAP docker images')
+parser.add_argument('-l', action='store', dest='docker_images_list', help='List of ONAP docker images')
+args = parser.parse_args()
+
+
+def get_image_tokens(image_list):
+ tokens = []
+ with open(image_list) as f:
+ for line in f.readlines():
+ pattern = re.compile("^(.*):([0-9.]*)\\s#(.*)$")
+ matches = re.match(pattern, line)
+ tokens.append(matches.groups())
+
+ return tokens
+
+
+def docker_save_images(tokens, directory):
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ for token in tokens:
+ docker_pull(token)
+ docker_tag(token)
+ docker_save(token, directory)
+ docker_remove_image(token)
+
+
+def docker_save(token, directory):
+ image, version, sha = token
+
+ print("Saving docker image: {image} (Version: {version})".format(image=image, version=version))
+
+ src_str = os.path.join("registry.nordix.org", "onap", "online-install")
+ repl_str = "nexus.onap.org:10001"
+ image_target = "{dir}{sep}{image}_{tag}.tar".format(dir=directory, sep=os.path.sep, image=image.replace(
+ src_str, repl_str).replace(os.path.sep, "_").replace(":", "_"), tag=version)
+
+ if not os.path.exists(image_target):
+ try:
+ subprocess.check_output(['docker', 'save', '{image}:{tag}'.format(image=image, tag=version), '-o',
+ image_target])
+ except subprocess.CalledProcessError as e:
+ print("Error saving docker file ({file}): {error}".format(file=image_target, error=e.message))
+
+
+def docker_tag(token):
+ image, version, sha = token
+
+ target_image = image.replace("registry.nordix.org/onap/online-install", "nexus.onap.org:10001")
+
+ print("Tagging docker image...\n\t/Source: {image} (SHA: {sha}, Version: {version}\n\tTarget: {target})".format(
+ image=image, sha=sha, version=version, target=target_image))
+ try:
+ subprocess.check_output(['docker', 'tag', '{image}@{tag}'.format(image=image, tag=sha),
+ '{image}:{tag}'.format(image=target_image, tag=version)])
+ except subprocess.CalledProcessError as e:
+ print("Error tagging docker image ({image}): {error}".format(image=image, error=e.message))
+
+
+def docker_remove_image(token):
+ image, version, sha = token
+
+ print("Removing docker image: {image}:{version})".format(image=image, version=version))
+ try:
+ subprocess.check_output(['docker', 'rmi', '{image}:{tag}'.format(image=image, tag=version)])
+ except subprocess.CalledProcessError as e:
+ print("Error tagging docker image ({image}): {error}".format(image=image, error=e.message))
+
+
+def docker_pull(token):
+ image, version, sha = token
+
+ print("Pulling docker image: {image} (Version: {version})".format(image=image, version=version))
+ try:
+ subprocess.check_output(['docker', 'pull', '{image}:{tag}'.format(image=image, tag=version)])
+ except subprocess.CalledProcessError as e:
+ print("Error pulling docker image ({image}): {error}".format(image=image, error=e.message))
+
+
+image_tokens = get_image_tokens(args.docker_images_list)
+docker_save_images(image_tokens, args.directory)