Merge "Fix docker search"
diff --git a/ansible/group_vars/infrastructure.yml b/ansible/group_vars/infrastructure.yml
index 9985d25..08a2591 100755
--- a/ansible/group_vars/infrastructure.yml
+++ b/ansible/group_vars/infrastructure.yml
@@ -3,8 +3,6 @@
 simulated_hosts:
   git:
     - gerrit.onap.org
-    - git.rancher.io
-    - github.com
   http:
     - git.onap.org
     - nexus.onap.org
diff --git a/ansible/roles/application/defaults/main.yml b/ansible/roles/application/defaults/main.yml
index 84fffec..2ae668a 100644
--- a/ansible/roles/application/defaults/main.yml
+++ b/ansible/roles/application/defaults/main.yml
@@ -11,3 +11,5 @@
 app_skip_helm_override: false
 app_helm_override_role: application-override
 app_helm_override_file: "{{ app_data_path }}/override.yaml"
+helm_overide_files:
+  - "{{ app_helm_override_file }}"
diff --git a/ansible/roles/application/tasks/install.yml b/ansible/roles/application/tasks/install.yml
index bdf6e51..003631d 100644
--- a/ansible/roles/application/tasks/install.yml
+++ b/ansible/roles/application/tasks/install.yml
@@ -71,7 +71,7 @@
           {{ app_helm_release_name }}
           {{ helm_repository_name }}/{{ app_helm_chart_name }}
           --namespace {{ app_kubernetes_namespace }}
-          {{ '' if app_skip_helm_override else '-f ' + app_helm_override_file }}
+          {% if not app_skip_helm_override %} {% for arg in helm_overide_files %} {{ '-f ' + arg }} {% endfor %} {% endif %}
           {% for arg in helm_extra_install_options %} {{ arg.opt }} {% endfor %}
   changed_when: true  # when executed its a changed type of action
   register: helm_install
diff --git a/ansible/roles/dns/handlers/main.yml b/ansible/roles/dns/handlers/main.yml
index cd1e4b4..3d7570f 100644
--- a/ansible/roles/dns/handlers/main.yml
+++ b/ansible/roles/dns/handlers/main.yml
@@ -2,6 +2,7 @@
 - name: Run dns server container
   docker_container:
     name: dns-server
+    network_mode: host
     image: "{{ dns_server_image }}"
     command: -H /simulated_hosts --log-facility=- --dns-loop-detect
     capabilities: NET_ADMIN
diff --git a/ansible/roles/helm/.yamllint b/ansible/roles/helm/.yamllint
new file mode 100644
index 0000000..ad0be76
--- /dev/null
+++ b/ansible/roles/helm/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+  braces:
+    max-spaces-inside: 1
+    level: error
+  brackets:
+    max-spaces-inside: 1
+    level: error
+  line-length: disable
+  truthy: disable
diff --git a/ansible/roles/helm/molecule/default/molecule.yml b/ansible/roles/helm/molecule/default/molecule.yml
new file mode 100644
index 0000000..869f87f
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/molecule.yml
@@ -0,0 +1,32 @@
+---
+dependency:
+  name: galaxy
+driver:
+  name: docker
+lint:
+  name: yamllint
+platforms:
+  - name: infrastructure-server
+    image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+    pre_build_image: True
+    privileged: true
+    override_command: False
+    groups:
+      - infrastructure
+provisioner:
+  name: ansible
+  lint:
+    name: ansible-lint
+  env:
+    ANSIBLE_ROLES_PATH: ../../../../test/roles
+    ANSIBLE_LIBRARY: ../../../../library
+  inventory:
+    group_vars:
+      all:
+        app_name: onap
+        app_data_path: "/opt/{{ app_name }}"
+        helm_bin_dir: /usr/local/bin
+verifier:
+  name: testinfra
+  lint:
+    name: flake8
diff --git a/ansible/roles/helm/molecule/default/playbook.yml b/ansible/roles/helm/molecule/default/playbook.yml
new file mode 100644
index 0000000..2705b16
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+  hosts: all
+  roles:
+    - helm
diff --git a/ansible/roles/helm/molecule/default/prepare.yml b/ansible/roles/helm/molecule/default/prepare.yml
new file mode 100644
index 0000000..8a149b8
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/prepare.yml
@@ -0,0 +1,5 @@
+---
+- name: Prepare for helm tests
+  hosts: all
+  roles:
+    - prepare-helm
diff --git a/ansible/roles/helm/molecule/default/tests/test_default.py b/ansible/roles/helm/molecule/default/tests/test_default.py
new file mode 100644
index 0000000..2395183
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/tests/test_default.py
@@ -0,0 +1,11 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
+
+
+def test_helm(host):
+    assert host.file('/usr/local/bin/helm').exists
+    assert host.run('helm').rc != 127
diff --git a/ansible/roles/kubectl/.yamllint b/ansible/roles/kubectl/.yamllint
new file mode 100644
index 0000000..ad0be76
--- /dev/null
+++ b/ansible/roles/kubectl/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+  braces:
+    max-spaces-inside: 1
+    level: error
+  brackets:
+    max-spaces-inside: 1
+    level: error
+  line-length: disable
+  truthy: disable
diff --git a/ansible/roles/kubectl/molecule/default/molecule.yml b/ansible/roles/kubectl/molecule/default/molecule.yml
new file mode 100644
index 0000000..bffb29e
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/molecule.yml
@@ -0,0 +1,31 @@
+---
+dependency:
+  name: galaxy
+driver:
+  name: docker
+lint:
+  name: yamllint
+platforms:
+  - name: infrastructure-server
+    image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+    pre_build_image: True
+    privileged: true
+    override_command: False
+    groups:
+      - infrastructure
+provisioner:
+  name: ansible
+  lint:
+    name: ansible-lint
+  env:
+    ANSIBLE_ROLES_PATH: ../../../../test/roles
+    ANSIBLE_LIBRARY: ../../../../library
+  inventory:
+    group_vars:
+      all:
+        app_name: onap
+        app_data_path: "/opt/{{ app_name }}"
+verifier:
+  name: testinfra
+  lint:
+    name: flake8
diff --git a/ansible/roles/kubectl/molecule/default/playbook.yml b/ansible/roles/kubectl/molecule/default/playbook.yml
new file mode 100644
index 0000000..ab9c08a
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+  hosts: all
+  roles:
+    - kubectl
diff --git a/ansible/roles/kubectl/molecule/default/prepare.yml b/ansible/roles/kubectl/molecule/default/prepare.yml
new file mode 100644
index 0000000..ec17626
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/prepare.yml
@@ -0,0 +1,5 @@
+---
+- name: Prepare for kubectl tests
+  hosts: all
+  roles:
+    - prepare-kubectl
diff --git a/ansible/roles/kubectl/molecule/default/tests/test_default.py b/ansible/roles/kubectl/molecule/default/tests/test_default.py
new file mode 100644
index 0000000..4f799b9
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/tests/test_default.py
@@ -0,0 +1,11 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
+
+
+def test_kubectl(host):
+    assert host.file('/usr/local/bin/kubectl').exists
+    assert host.run('kubectl').rc != 127
diff --git a/ansible/roles/nfs/molecule/default/molecule.yml b/ansible/roles/nfs/molecule/default/molecule.yml
index 7bacf3c..a8ca6a3 100644
--- a/ansible/roles/nfs/molecule/default/molecule.yml
+++ b/ansible/roles/nfs/molecule/default/molecule.yml
@@ -19,7 +19,7 @@
       - name: nfs-net
     volumes:
       - /sys/fs/cgroup:/sys/fs/cgroup:ro
-      - ${HOME}/data:/dockerdata-nfs:rw
+      - /dockerdata-nfs
 
   - name: kubernetes-node-2
     image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
diff --git a/ansible/roles/rke/.yamllint b/ansible/roles/rke/.yamllint
new file mode 100644
index 0000000..ad0be76
--- /dev/null
+++ b/ansible/roles/rke/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+  braces:
+    max-spaces-inside: 1
+    level: error
+  brackets:
+    max-spaces-inside: 1
+    level: error
+  line-length: disable
+  truthy: disable
diff --git a/ansible/roles/rke/molecule/default/destroy.yml b/ansible/roles/rke/molecule/default/destroy.yml
new file mode 100644
index 0000000..591da82
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/destroy.yml
@@ -0,0 +1,34 @@
+---
+- name: Destroy
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  no_log: "{{ not (lookup('env', 'MOLECULE_DEBUG') | bool or molecule_yml.provisioner.log|default(false) | bool) }}"
+  tasks:
+    - name: Destroy molecule instance(s)
+      docker_container:
+        name: "{{ item.name }}"
+        docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
+        state: absent
+        force_kill: "{{ item.force_kill | default(true) }}"
+        # Modification: we want to clean up old volumes.
+        keep_volumes: false
+      register: server
+      with_items: "{{ molecule_yml.platforms }}"
+      async: 7200
+      poll: 0
+
+    - name: Wait for instance(s) deletion to complete
+      async_status:
+        jid: "{{ item.ansible_job_id }}"
+      register: docker_jobs
+      until: docker_jobs.finished
+      retries: 300
+      with_items: "{{ server.results }}"
+
+    - name: Delete docker network(s)
+      docker_network:
+        name: "{{ item }}"
+        docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
+        state: absent
+      with_items: "{{ molecule_yml.platforms | molecule_get_docker_networks }}"
diff --git a/ansible/roles/rke/molecule/default/molecule.yml b/ansible/roles/rke/molecule/default/molecule.yml
new file mode 100644
index 0000000..e8e5ad7
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/molecule.yml
@@ -0,0 +1,78 @@
+---
+dependency:
+  name: galaxy
+driver:
+  name: docker
+lint:
+  name: yamllint
+platforms:
+  - name: infrastructure-server
+    image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+    pre_build_image: true
+    privileged: true
+    override_command: false
+    restart_policy: unless-stopped
+    volumes:
+      - /var/lib/kubelet
+      - /var/lib/docker
+    env:
+      container: docker
+    groups:
+      - infrastructure
+      - kubernetes-control-plane
+    networks:
+      - name: rke
+    purge_networks: true
+
+  - name: kubernetes-node-1
+    image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+    pre_build_image: true
+    privileged: true
+    override_command: false
+    restart_policy: unless-stopped
+    env:
+      container: docker
+    volumes:
+      - /var/lib/kubelet
+      - /var/lib/docker
+    groups:
+      - kubernetes
+    networks:
+      - name: rke
+    purge_networks: true
+
+  - name: kubernetes-node-2
+    image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+    pre_build_image: true
+    privileged: true
+    override_command: false
+    restart_policy: unless-stopped
+    env:
+      container: docker
+    volumes:
+      - /var/lib/kubelet
+      - /var/lib/docker
+    groups:
+      - kubernetes
+    networks:
+      - name: rke
+    purge_networks: true
+
+provisioner:
+  name: ansible
+  env:
+    ANSIBLE_ROLES_PATH: ../../../../test/roles
+    ANSIBLE_LIBRARY: ../../../../library
+  inventory:
+    links:
+      group_vars: ../../../../group_vars
+  options:
+    e: "app_data_path=/opt/onap"
+  lint:
+    name: ansible-lint
+scenario:
+  name: default
+verifier:
+  name: testinfra
+  lint:
+    name: flake8
diff --git a/ansible/roles/rke/molecule/default/playbook.yml b/ansible/roles/rke/molecule/default/playbook.yml
new file mode 100644
index 0000000..09dbfb8
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/playbook.yml
@@ -0,0 +1,30 @@
+---
+- name: "Set cluster_ip"
+  hosts: all
+  tasks:
+    - name: "Set cluster_ip fact"
+      set_fact:
+        cluster_ip: "{{ ansible_default_ipv4.address }}"
+
+- name: Configure kubernetes cluster (RKE)
+  hosts: infrastructure
+  roles:
+    - role: rke
+      vars:
+        mode: config
+
+- name: Prepare kubernetes nodes (RKE)
+  hosts:
+    - kubernetes
+    - kubernetes-control-plane
+  roles:
+    - role: rke
+      vars:
+        mode: node
+
+- name: Deploy kubernetes cluster (RKE)
+  hosts: infrastructure
+  roles:
+    - role: rke
+      vars:
+        mode: deploy
diff --git a/ansible/roles/rke/molecule/default/prepare.yml b/ansible/roles/rke/molecule/default/prepare.yml
new file mode 100644
index 0000000..6bad2b8
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/prepare.yml
@@ -0,0 +1,15 @@
+---
+- name: "Prepare hosts"
+  hosts: all
+  roles:
+    - role: prepare-rke
+      vars:
+        mode: all
+    - prepare-docker-dind
+
+- name: "Infra specific preparations"
+  hosts: infrastructure
+  roles:
+    - role: prepare-rke
+      vars:
+        mode: infra
diff --git a/ansible/roles/rke/molecule/default/tests/test_controlplane.py b/ansible/roles/rke/molecule/default/tests/test_controlplane.py
new file mode 100644
index 0000000..0bfbca2
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/tests/test_controlplane.py
@@ -0,0 +1,14 @@
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(
+      'kubernetes-control-plane')
+
+
+@pytest.mark.parametrize('container_name', [
+  'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'kubelet'])
+def test_container_running(host, container_name):
+    assert host.docker(container_name).is_running
diff --git a/ansible/roles/rke/molecule/default/tests/test_infrastructure.py b/ansible/roles/rke/molecule/default/tests/test_infrastructure.py
new file mode 100644
index 0000000..9ba11d6
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/tests/test_infrastructure.py
@@ -0,0 +1,56 @@
+import os
+import pytest
+import json
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('infrastructure')
+
+
+@pytest.mark.parametrize('filename', [
+  '/root/.kube/config',
+  '/opt/onap/cluster/cluster.yml',
+  '/opt/onap/cluster/cluster.rkestate'])
+def test_file_existence(host, filename):
+    assert host.file(filename).exists
+
+
+def test_rke_in_path(host):
+    assert host.find_command('rke') == '/usr/local/bin/rke'
+
+
+def test_rke_version_works(host):
+    # Note that we need to cd to the cluster data dir first, really.
+    assert host.run('cd /opt/onap/cluster && rke version').rc == 0
+
+
+def test_nodes_ready(host):
+    # Retrieve all node names.
+    nodecmdres = host.run('kubectl get nodes -o name')
+    assert nodecmdres.rc == 0
+    nodes = nodecmdres.stdout.split('\n')
+    for node in nodes:
+        assert host.run(
+          'kubectl wait --timeout=0 --for=condition=ready ' + node).rc == 0
+
+
+def test_pods_ready(host):
+    # Retrieve all pods from all namespaces.
+    # Because we need pod and namespace name, we get full json representation.
+    podcmdres = host.run('kubectl get pods --all-namespaces -o json')
+    assert podcmdres.rc == 0
+    pods = json.loads(podcmdres.stdout)['items']
+    for pod in pods:
+        # Each pod may be either created by a job or not.
+        # In job case they should already be completed
+        # when we are here so we ignore them.
+        namespace = pod['metadata']['namespace']
+        podname = pod['metadata']['name']
+        condition = 'Ready'
+        if len(pod['metadata']['ownerReferences']) == 1 and pod[
+          'metadata']['ownerReferences'][0]['kind'] == 'Job':
+            continue
+        assert host.run(
+          'kubectl wait --timeout=120s --for=condition=' + condition + ' -n ' +
+          namespace + ' pods/' + podname).rc == 0
diff --git a/ansible/roles/rke/molecule/default/tests/test_kubernetes.py b/ansible/roles/rke/molecule/default/tests/test_kubernetes.py
new file mode 100644
index 0000000..887494f
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/tests/test_kubernetes.py
@@ -0,0 +1,13 @@
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+    os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('kubernetes')
+
+
+@pytest.mark.parametrize('container_name', [
+  'etcd', 'kubelet', 'kube-proxy'])
+def test_container_running(host, container_name):
+    assert host.docker(container_name).is_running
diff --git a/ansible/roles/rke/tasks/rke_config.yml b/ansible/roles/rke/tasks/rke_config.yml
index 4112e10..9dc0d8c 100644
--- a/ansible/roles/rke/tasks/rke_config.yml
+++ b/ansible/roles/rke/tasks/rke_config.yml
@@ -37,6 +37,7 @@
   template:
     src: cluster.yml.j2
     dest: "{{ cluster_config_dir }}/cluster.yml"
+  register: cluster_yml
 
 - name: Prepare rke addon manifest (dashboard)
   template:
diff --git a/ansible/roles/rke/tasks/rke_deploy.yml b/ansible/roles/rke/tasks/rke_deploy.yml
index 9983d08..7b36f55 100644
--- a/ansible/roles/rke/tasks/rke_deploy.yml
+++ b/ansible/roles/rke/tasks/rke_deploy.yml
@@ -1,8 +1,17 @@
 ---
+- name: "Check if rke is deployed"
+  command: "rke version"
+  args:
+    chdir: "{{ cluster_config_dir }}"
+  failed_when: false
+  changed_when: false
+  register: rke_deployed
+
 - name: Run rke up
   command: "{{ rke_bin_dir }}/rke up --config cluster.yml"
   args:
     chdir: "{{ cluster_config_dir }}"
+  when: rke_deployed.rc != 0 or cluster_yml.changed  # noqa 503
 
 - name: Ensure .kube directory is present
   file:
diff --git a/ansible/test/play-resources/molecule/default/cleanup.yml b/ansible/test/play-resources/molecule/default/cleanup.yml
deleted file mode 100644
index e0c0b62..0000000
--- a/ansible/test/play-resources/molecule/default/cleanup.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Cleanup data from instance (doing it from hosts requires root access).
-  hosts: resources
-  gather_facts: false
-  ignore_unreachable: true
-  pre_tasks:
-    - name: Find files and dirs to delete
-      find:
-        paths: /data/  # Only deleting content not dir itself as we get "Device or resource busy" error as it's mounted to container doing the deletion
-        patterns: "*"
-        recurse: true
-        file_type: any
-      register: files_to_delete
-    - name: Make file/dir path list
-      set_fact:
-        to_delete_paths: "{{ to_delete_paths | default([]) + [item.path] }}"
-      loop: "{{ files_to_delete.files }}"
-      when: files_to_delete.files is defined
-  roles:
-    - role: cleanup-directories
-      vars:
-        directories_files_list_to_remove: "{{ to_delete_paths }}"
-      when: to_delete_paths is defined
diff --git a/ansible/test/play-resources/molecule/nfs/molecule.yml b/ansible/test/play-resources/molecule/nfs/molecule.yml
index ffaabb0..9cff6b8 100644
--- a/ansible/test/play-resources/molecule/nfs/molecule.yml
+++ b/ansible/test/play-resources/molecule/nfs/molecule.yml
@@ -13,7 +13,7 @@
     override_command: false
     volumes:
       - /sys/fs/cgroup:/sys/fs/cgroup:ro
-      - ${HOME}/data:/data:rw  # mount fs from host to get nfs exportfs task working
+      - /data
     groups:
       - resources
     networks:
diff --git a/ansible/test/roles/cleanup-containers/tasks/main.yml b/ansible/test/roles/cleanup-containers/tasks/main.yml
deleted file mode 100644
index 3a800c9..0000000
--- a/ansible/test/roles/cleanup-containers/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Remove containers
-  docker_container:
-    name: "{{ item }}"
-    state: absent
-  loop: "{{ container_list }}"
diff --git a/ansible/test/roles/cleanup-directories/tasks/main.yml b/ansible/test/roles/cleanup-directories/tasks/main.yml
deleted file mode 100644
index 8e79ea0..0000000
--- a/ansible/test/roles/cleanup-directories/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Remove directories/files
-  file:
-    path: "{{ item }}"
-    state: absent
-  become: true
-  loop: "{{ directories_files_list_to_remove }}"
diff --git a/ansible/test/roles/cleanup-rancher/tasks/main.yml b/ansible/test/roles/cleanup-rancher/tasks/main.yml
deleted file mode 100644
index 21b0298..0000000
--- a/ansible/test/roles/cleanup-rancher/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# Remove containers spawned by Rancher Agent
-- name: Get list of containers spawned by Rancher Agent
-  docker_list_containers:
-    label_name: "{{ item.label }}"
-    label_value: "{{ item.value }}"
-  loop: "{{ container_list_by_label }}"
-  register: docker_list_containers_var
-
-- name: set fact # save a list of containers found by previous task to orphaned_containers var
-  set_fact: orphaned_containers="{{ orphaned_containers|default([]) + item.containers }}"
-  loop: "{{ docker_list_containers_var.results }}"
-
-- name: Remove orphaned containers
-  docker_container:
-    name: "{{ item }}"
-    state: absent
-  loop: "{{ orphaned_containers }}"
diff --git a/ansible/test/roles/prepare-docker/tasks/docker-packages.yml b/ansible/test/roles/prepare-docker/tasks/docker-packages.yml
deleted file mode 100644
index 8f55c5c..0000000
--- a/ansible/test/roles/prepare-docker/tasks/docker-packages.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: Download docker related packages
-  command: yumdownloader --destdir="{{ rpm_dir }}" "{{ docker_ce_rpm }}"
-  args:
-    creates: "{{ rpm_dir }}/{{ docker_ce_rpm }}"
-
-- name: Install docker related packages
-  yum:
-    name: "{{ rpm_dir + '/' + docker_ce_rpm + '.rpm'}}"
diff --git a/ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml b/ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml
deleted file mode 100644
index 0cabadf..0000000
--- a/ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Create docker.socket systemd dir for override
-  file:
-    path: /etc/systemd/system/docker.socket.d
-    state: directory
-
-- name: Fake dockerd dependent docker.socket service not to actually listen the docker socket as dockerd is not used in container only docker client
-  copy:
-    content: |
-      [Socket]
-      ListenStream=
-      ListenStream=/tmp/fake
-    dest: /etc/systemd/system/docker.socket.d/override.conf
diff --git a/ansible/test/roles/prepare-docker/tasks/enable-repos.yml b/ansible/test/roles/prepare-docker/tasks/enable-repos.yml
deleted file mode 100644
index 204bf03..0000000
--- a/ansible/test/roles/prepare-docker/tasks/enable-repos.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Enable docker repos back for Molecule testing purposes
-  copy:
-    remote_src: yes
-    src: "{{ item }}"
-    dest: "{{ (item | splitext)[0] }}"
-  loop: "{{ docker_needed_repos }}"
-
-- name: Disable offline repo for molecule testing purposes
-  lineinfile:
-    path: "{{ offline_repo_file }}"
-    regexp: 'enabled = 1'
-    line: 'enabled = 0'
diff --git a/ansible/test/roles/prepare-docker/tasks/main.yml b/ansible/test/roles/prepare-docker/tasks/main.yml
deleted file mode 100644
index e7d8706..0000000
--- a/ansible/test/roles/prepare-docker/tasks/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Check repositories
-  stat:
-    path: "{{ item }}"
-  loop: "{{ docker_needed_repos + [offline_repo_file] }}"
-  register: repos
-
-- name: Set fact for offline repos created
-  set_fact:
-    offline_repo_created: "{{ (repos.results | map(attribute='stat.exists') | list) is all }}"
-
-- debug:
-    var: offline_repo_created
-
-- name: Enable repos back (if package-repository role already setup offline onap repo) for molecule testing purposes to install docker
-  include_tasks: enable-repos.yml
-  when: offline_repo_created
-
-- name: Prepare docker repos normally
-  include_tasks: prepare-docker-repos.yml
-  when: not offline_repo_created
diff --git a/ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml b/ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml
deleted file mode 100644
index 01ea72c..0000000
--- a/ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Install Docker-CE repo
-  yum_repository:
-    name: docker-ce
-    description: Docker-ce YUM repo
-    baseurl: https://download.docker.com/linux/centos/7/x86_64/stable/
-    gpgcheck: true
-    gpgkey: https://download.docker.com/linux/centos/gpg
-
-- name: Create rpm dir
-  file:
-    path: "{{ rpm_dir }}"
-    state: directory
-
-- name: Handle docker-ce packages
-  import_tasks: docker-packages.yml
-
-- name: Fake dockerd on container
-  import_tasks: docker-socket-override.yml
-  when: ansible_env.container == 'docker'
diff --git a/ansible/test/roles/prepare-docker/vars/main.yml b/ansible/test/roles/prepare-docker/vars/main.yml
deleted file mode 100644
index bcd7f36..0000000
--- a/ansible/test/roles/prepare-docker/vars/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-docker_needed_repos:
-  - /etc/yum.repos.d/CentOS-Base.repo.disabled
-  - /etc/yum.repos.d/docker-ce.repo.disabled
-offline_repo_file: /etc/yum.repos.d/onap.repo
-rpm_dir: /root/rpm
-docker_ce_rpm: docker-ce-18.09.5-3.el7.x86_64
diff --git a/ansible/test/roles/prepare-helm/defaults/main.yml b/ansible/test/roles/prepare-helm/defaults/main.yml
new file mode 100644
index 0000000..8ab9ed3
--- /dev/null
+++ b/ansible/test/roles/prepare-helm/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# Helm version to download.
+helm_version: 2.12.3
diff --git a/ansible/test/roles/prepare-helm/tasks/main.yml b/ansible/test/roles/prepare-helm/tasks/main.yml
new file mode 100644
index 0000000..aa01e28
--- /dev/null
+++ b/ansible/test/roles/prepare-helm/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: "Ensure {{ app_data_path }}/downloads directory exists"
+  file:
+    path: "{{ app_data_path }}/downloads"
+    recurse: true
+    state: directory
+
+- name: "Download and unarchive helm-{{ helm_version }}"
+  unarchive:
+    src: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz"
+    dest: "/tmp"
+    remote_src: true
+
+- name: "Copy helm binary"
+  copy:
+    src: /tmp/linux-amd64/helm
+    dest: "{{ app_data_path }}/downloads/helm"
+    remote_src: true
diff --git a/ansible/test/roles/prepare-kubectl/defaults/main.yml b/ansible/test/roles/prepare-kubectl/defaults/main.yml
new file mode 100644
index 0000000..d4e8ef9
--- /dev/null
+++ b/ansible/test/roles/prepare-kubectl/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Set to false to download kubectl in preparation for kubectl role that
+# needs to install it, set to true to immediately install (needed for
+# cases where it is used by verification tests of other roles).
+kubectl_install: false
+# Kubectl version.
+kubectl_version: 1.13.5
diff --git a/ansible/test/roles/prepare-kubectl/tasks/main.yml b/ansible/test/roles/prepare-kubectl/tasks/main.yml
new file mode 100644
index 0000000..b563185
--- /dev/null
+++ b/ansible/test/roles/prepare-kubectl/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: "Ensure {{ app_data_path }} exists"
+  file:
+    path: "{{ app_data_path }}/downloads"
+    state: directory
+  when: not kubectl_install
+
+- name: "Install kubectl-{{ kubectl_version }}"
+  get_url:
+    url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl"
+    dest: "{{ '/usr/local/bin/kubectl' if kubectl_install else app_data_path+'/downloads/kubectl' }}"
+    # This mode conditional allows checking if kubectl role, when installing kubectl, correctly
+    # adds executable bit (bonus).
+    mode: "{{ 0755 if kubectl_install else omit }}"
diff --git a/ansible/test/roles/prepare-rke/defaults/main.yml b/ansible/test/roles/prepare-rke/defaults/main.yml
new file mode 100644
index 0000000..2cf8563
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+#The rke version.
+rke_version: 0.2.0
+#The kubectl version.
+kubectl_version: 1.13.5
diff --git a/ansible/test/roles/prepare-rke/tasks/all.yml b/ansible/test/roles/prepare-rke/tasks/all.yml
new file mode 100644
index 0000000..d4b67c1
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/tasks/all.yml
@@ -0,0 +1,6 @@
+#This is needed because login from non root is blocked by default.
+- name: "Allow non root logins"
+  service:
+    name: systemd-user-sessions
+    state: started
+
diff --git a/ansible/test/roles/prepare-rke/tasks/infra.yml b/ansible/test/roles/prepare-rke/tasks/infra.yml
new file mode 100644
index 0000000..55ab7f1
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/tasks/infra.yml
@@ -0,0 +1,16 @@
+---
+- name: "Ensure {{ app_data_path }} exists"
+  file:
+    path: "{{ app_data_path }}/downloads"
+    state: directory
+
+- name: "Install rke-{{ rke_version }}"
+  get_url:
+    url: "https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64"
+    dest: "{{ app_data_path }}/downloads/rke"
+
+- name: "Install kubectl-{{ kubectl_version }}"
+  get_url:
+    url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl"
+    dest: "/usr/local/bin/kubectl"
+    mode: 0755
diff --git a/ansible/test/roles/prepare-rke/tasks/main.yml b/ansible/test/roles/prepare-rke/tasks/main.yml
new file mode 100644
index 0000000..210c9b5
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ mode }}.yml"
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh
index 09ed896..f3edb48 100755
--- a/build/build_nexus_blob.sh
+++ b/build/build_nexus_blob.sh
@@ -20,7 +20,7 @@
 
 ### This script prepares Nexus repositories data blobs for ONAP
 
-## The script requires following dependencies are installed: nodejs, jq, docker
+## The script requires following dependencies are installed: nodejs, jq, docker, twine
 ## All required resources are expected in the upper directory created during
 ## download procedure as DATA_DIR or in the directory given as --input-directory
 ## All lists used must be in project data_lists directory or in the directory given
@@ -58,22 +58,18 @@
 LISTS_DIR="${LOCAL_PATH}/data_lists"
 
 usage () {
-    echo "   Example usage: build_nexus_blob.sh -t <tag> --input-directory </path/to/downloaded/files/dir>  --output-directory
+    echo "   Example usage: build_nexus_blob.sh --input-directory </path/to/downloaded/files/dir> --output-directory
            </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list>
 
-     -t | --tag release tag, taken from available on git or placed by data generating script (mandatory) must fallow scheme onap_<semver>
      -i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide
      -o | --output-directory
-    -rl | --resource-list-directory directory with files containing docker, pypi and rpm lists
+    -rl | --resource-list-directory directory with files containing docker, pypi and npm lists
     "
     exit 1
 }
 
 while [ "$1" != "" ]; do
     case $1 in
-        -t | --tag )                       shift
-                                           TAG=$1
-                                           ;;
         -i | --input-directory )           shift
                                            DATA_DIR=$1
                                            ;;
@@ -90,22 +86,15 @@
     shift
 done
 
-
-# exit if no tag given
-if [ -z ${TAG} ]; then
-    usage
-    exit 1
-fi
-
 # Setup directories with resources for docker, npm and pypi
 NXS_SRC_DOCKER_IMG_DIR="${DATA_DIR}/offline_data/docker_images_for_nexus"
 NXS_SRC_NPM_DIR="${DATA_DIR}/offline_data/npm_tar"
 NXS_SRC_PYPI_DIR="${DATA_DIR}/offline_data/pypi"
 
-# Setup specific resources list based on the tag provided
-NXS_DOCKER_IMG_LIST="${LISTS_DIR}/${TAG}-docker_images.list"
-NXS_NPM_LIST="${LISTS_DIR}/$(sed 's/.$/x/' <<< ${TAG})-npm.list"
-NXS_PYPI_LIST="${LISTS_DIR}/$(sed 's/.$/x/' <<< ${TAG})-pip_packages.list"
+# Setup specific resources lists
+NXS_DOCKER_IMG_LIST="${LISTS_DIR}/onap_docker_images.list"
+NXS_NPM_LIST="${LISTS_DIR}/onap_npm.list"
+NXS_PYPI_LIST="${LISTS_DIR}/onap_pip_packages.list"
 
 # Setup Nexus image used for build and install infra
 INFRA_LIST="${LISTS_DIR}/infra_docker_images.list"
@@ -340,4 +329,3 @@
 
 echo "Nexus blob is built"
 exit 0
-
diff --git a/build/creating_data/create-rhel-repo.sh b/build/creating_data/create-rhel-repo.sh
deleted file mode 100755
index 43709a7..0000000
--- a/build/creating_data/create-rhel-repo.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018-2019 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-OUTDIR="${1}"
-if [[ -z "${OUTDIR}" ]]; then
-    echo "Missing output dir"
-    exit 1
-fi
-
-# if onap.repo does not exists create it
-mkdir -p "${OUTDIR}"
-if [ ! -f "${OUTDIR}/onap.repo" ]; then
-   cat > "${OUTDIR}/onap.repo" <<EOF
-[ONAP]
-name=Offline ONAP repository
-baseurl=PATH
-enabled=1
-gpgcheck=0
-EOF
-fi
-
-# this exact docker version is required by ONAP/beijing
-# it should be available in centos docker repo
-yumdownloader --resolve --destdir="${OUTDIR}" docker-ce-18.09.5 container-selinux docker-ce-cli \
-containerd.io nfs-utils python-jsonpointer python-docker-py python-docker-pycreds python-ipaddress \
-python-websocket-client
-
-createrepo "${OUTDIR}"
-
-exit 0
\ No newline at end of file
diff --git a/build/creating_data/create-ubuntu-repo.sh b/build/creating_data/create-ubuntu-repo.sh
deleted file mode 100755
index ac7de65..0000000
--- a/build/creating_data/create-ubuntu-repo.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-OUTDIR="${1}"
-if [[ -z "${OUTDIR}" ]]; then
-    echo "Missing output dir"
-    exit 1
-fi
-
-
-# create the package index
-dpkg-scanpackages -m "${OUTDIR}" > "${OUTDIR}/Packages"
-cat "${OUTDIR}/Packages" | gzip -9c > "${OUTDIR}/Packages.gz"
-
-# create the Release file
-echo 'deb [trusted=yes] http://repo.infra-server/ubuntu/xenial /' > "${OUTDIR}/onap.list"
-
-exit 0
diff --git a/build/creating_data/docker-images-collector.sh b/build/creating_data/docker-images-collector.sh
index e13b915..9206b0b 100755
--- a/build/creating_data/docker-images-collector.sh
+++ b/build/creating_data/docker-images-collector.sh
@@ -30,9 +30,9 @@
     echo "      "
     echo "  This script is preparing docker images list based on kubernetes project"
     echo "      Usage:"
-    echo "        ./$(basename $0) <project version> <path to project> [<output list file>]"
+    echo "        ./$(basename $0) <path to project> [<output list file>]"
     echo "      "
-    echo "      Example: ./$(basename $0) onap_3.0.2 /root/oom/kubernetes/onap"
+    echo "      Example: ./$(basename $0) /root/oom/kubernetes/onap"
     echo "      "
     echo "      Dependencies: helm, python-yaml, make"
     echo "      "
@@ -55,26 +55,35 @@
 }
 
 create_list() {
-    helm template "${PROJECT_DIR}/../${1}" | grep 'image:\ \|tag_version:\ \|h._image' |
+    if [ -d "${PROJECT_DIR}/../${1}" ]; then
+        SUBSYS_DIR="${PROJECT_DIR}/../${1}"
+    elif [ -d "${PROJECT_DIR}/../common/${1}" ]; then
+        SUBSYS_DIR="${PROJECT_DIR}/../common/${1}"
+    else
+        >&2 echo -e \n"    !!! ${1} sybsystem does not exist !!!"\n
+    fi
+    helm template "${SUBSYS_DIR}" | grep 'image:\ \|tag_version:\ \|h._image' |
         sed -e 's/^.*\"h._image\"\ :\ //; s/^.*\"\(.*\)\".*$/\1/' \
             -e 's/\x27\|,//g; s/^.*\(image\|tag_version\):\ //' | tr -d '\r'
 }
 
 # Configuration
-TAG="${1}"
-PROJECT_DIR="${2}"
-LIST="${3}"
+if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 1 ]; then
+    usage
+fi
+
+PROJECT_DIR="${1}"
+LIST="${2}"
 LISTS_DIR="$(readlink -f $(dirname ${0}))/../data_lists"
 HELM_REPO="local http://127.0.0.1:8879"
+PROJECT="$(basename ${1})"
 
-if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 2 ]; then
-    usage
-elif [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
+if [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
     echo "Wrong path to project directory entered"
     exit 1
 elif [ -z "${LIST}" ]; then
     mkdir -p ${LISTS_DIR}
-    LIST="${LISTS_DIR}/${TAG}-docker_images.list"
+    LIST="${LISTS_DIR}/${PROJECT}_docker_images.list"
 fi
 
 if [ -e "${LIST}" ]; then
@@ -82,8 +91,6 @@
     MSG="$(realpath ${LIST}) already existed\nCreated backup $(realpath ${LIST}).bk\n"
 fi
 
-PROJECT="$(basename ${2})"
-
 # Setup helm
 if pgrep -x "helm" > /dev/null; then
     echo "helm is already running"
@@ -106,12 +113,17 @@
 # Create the list from all enabled subsystems
 echo "Creating the list..."
 if [ "${PROJECT}" == "onap" ]; then
+    COMMENT="OOM commit $(git --git-dir="${PROJECT_DIR}/../../.git" rev-parse HEAD)"
     for subsystem in `parse_yaml "${PROJECT_DIR}/values.yaml"`; do
         create_list ${subsystem}
-    done
+    done | sort -u > ${LIST}
 else
-    create_list ${PROJECT}
-fi | sort -u > ${LIST}
+    COMMENT="${PROJECT}"
+    create_list ${PROJECT} | sort -u > ${LIST}
+fi
+
+# Add comment reffering to the project
+sed -i "1i# generated from ${COMMENT}" "${LIST}"
 
 echo -e ${MSG}
 echo -e 'The list has been created:\n '"${LIST}"
diff --git a/build/creating_data/download-docker-images.sh b/build/creating_data/download-docker-images.sh
deleted file mode 100755
index c0a0bed..0000000
--- a/build/creating_data/download-docker-images.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#! /usr/bin/env bash
-
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-if [[ -z "$LIST_FILE" ]]; then
-    LIST_FILE="docker_image_list.txt"
-fi
-
-echo "Download all images"
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-line=1
-for image in $(clean_list "$LIST_FILE"); do
-    echo "== pkg #$line of $lines =="
-    echo "$image"
-    retry docker -l error pull "$image"
-    line=$((line+1))
-done
diff --git a/build/creating_data/download-files.sh b/build/creating_data/download-files.sh
deleted file mode 100755
index f687fda..0000000
--- a/build/creating_data/download-files.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-if [[ -z "$LIST_FILE" ]]; then
-    echo "Missing list file"
-    exit 1
-fi
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
-    echo "Missing output directory"
-    exit 1
-fi
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-
-# create output dir if not exists
-mkdir -p "$outdir"
-
-for line in $(clean_list "$LIST_FILE"); do
-    # www.springframework.org/schema/tool/spring-tool-4.3.xsd
-    file="${line%%\?*}"
-    filename=$(basename "$file")
-    echo "Downloading $cnt / $lines: $file"
-    # following curl params are ensurring 5 reties and cut-off if connectivity will
-    # drop below 10b/10s
-    curl --retry 5 -y 10 -Y 10 --location  "$line" -o "$outdir/$filename" &>/dev/null
-    cnt=$((cnt+1))
-done
\ No newline at end of file
diff --git a/build/creating_data/download-git-repos.sh b/build/creating_data/download-git-repos.sh
deleted file mode 100755
index 7853a14..0000000
--- a/build/creating_data/download-git-repos.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env bash
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-# fail fast
-set -e
-
-usage () {
-    echo "Usage:"
-    echo -e "./$(basename $0) <repository list> [destination directory]\n"
-    echo "Examples:"
-    echo "  ./$(basename $0) onap_3.0.x-git_repos.list ./git-repo"
-}
-
-LIST="${1}"
-
-if [[ -z "${LIST}" ]]; then
-    echo "Missing argument for repository list"
-    exit 1
-fi
-
-OUTDIR="${2}"
-if [[ -z "${OUTDIR}" ]]; then
-    OUTDIR="./git-repo"
-fi
-
-mkdir -p "${OUTDIR}"
-cd "${OUTDIR}"
-
-
-while IFS=" " read -r REPO BRANCH remainder
-do
-        if [[ -z "${BRANCH}" ]]; then
-                git clone https://${REPO} --bare ${REPO}
-        else
-                git clone -b ${BRANCH} --single-branch https://${REPO} --bare ${REPO}
-        fi
-done < <(awk '$1 ~ /^[^;#]/' ${LIST})
-
-
-exit 0
diff --git a/build/creating_data/download-http-files.sh b/build/creating_data/download-http-files.sh
deleted file mode 100755
index 1144c66..0000000
--- a/build/creating_data/download-http-files.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-if [[ -z "$LIST_FILE" ]]; then
-    echo "Missing list file"
-    exit 1
-fi
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
-    echo "Missing output directory"
-    exit 1
-fi
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-
-# create output dir if not exists
-mkdir -p "$outdir"
-
-for line in $(clean_list "$LIST_FILE"); do
-    # www.springframework.org/schema/tool/spring-tool-4.3.xsd
-    file="${line%%\?*}"
-    echo "Downloading $cnt / $lines: $file"
-    fdir=$(dirname "$file")
-    mkdir -p $outdir/$fdir
-    # following curl params are ensurring 5 reties and cut-off if connectivity will
-    # drop below 10b/10s
-    curl --retry 5 -y 10 -Y 10 --location  "$line" -o "$outdir/$file" &>/dev/null
-    cnt=$((cnt+1))
-done
\ No newline at end of file
diff --git a/build/creating_data/download-npm-pkgs.sh b/build/creating_data/download-npm-pkgs.sh
deleted file mode 100755
index 191dd5d..0000000
--- a/build/creating_data/download-npm-pkgs.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-
-if [[ -z "$LIST_FILE" ]]; then
-    LIST_FILE="all_npm_list.txt"
-fi
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
-    echo "Missing arg outdir"
-    exit 1
-fi
-
-mkdir -p "$outdir"
-cd "$outdir"
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-for line in $(clean_list "$LIST_FILE"); do
-    echo "== pkg #$cnt of $lines =="
-    npm pack $line
-    cnt=$((cnt+1))
-done
\ No newline at end of file
diff --git a/build/creating_data/save-docker-images.sh b/build/creating_data/save-docker-images.sh
deleted file mode 100755
index 0a72d15..0000000
--- a/build/creating_data/save-docker-images.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#! /usr/bin/env bash
-
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-IMG_DIR="${2}"
-
-if [[ -z "$IMG_DIR" ]]; then
-    IMG_DIR="./images"
-fi
-
-echo "Creating ${IMG_DIR}"
-if [[ ! -d "${IMG_DIR}" ]]; then
-    mkdir -p "${IMG_DIR}"
-fi
-
-save_image() {
-    local name_tag=$1
-    echo "$name_tag"
-    local img_name=$(echo "${name_tag}" | tr /: __)
-    local img_path="${IMG_DIR}/${img_name}.tar"
-
-    if [[ ! -f "${img_path}" ]] ; then
-        echo "[DEBUG] save ${name_tag} to ${img_path}"
-        echo "${name_tag}" >> $IMG_DIR/_image_list.txt
-        retry docker -l error save -o "${img_path}" ${name_tag}
-    else
-        echo "[DEBUG] ${name_tag} already saved"
-    fi
-}
-
-echo "Save all images"
-line=1
-lines=$(clean_list "$LIST_FILE" | wc -l)
-for image in $(clean_list "$LIST_FILE"); do
-    echo "== pkg #$line of $lines =="
-    save_image "${image}"
-    line=$((line+1))
-done
\ No newline at end of file
diff --git a/build/data_lists/onap_docker_images.list b/build/data_lists/onap_docker_images.list
index 3ac67ed..451f617 100644
--- a/build/data_lists/onap_docker_images.list
+++ b/build/data_lists/onap_docker_images.list
@@ -1,13 +1,13 @@
-# generated from OOM commit 94664fb4457c61076cc7e65ed40dda5cf696bcbe
+# generated from OOM commit 0b904977dde761d189874d6dc6c527cd45928d92
 alpine:3.6
 busybox
 crunchydata/crunchy-pgpool:centos7-10.4-2.0.0
 crunchydata/crunchy-postgres:centos7-10.3-1.8.2
 crunchydata/crunchy-postgres:centos7-10.4-2.0.0
 docker.elastic.co/beats/filebeat:5.5.0
+docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
 docker.elastic.co/elasticsearch/elasticsearch:5.5.0
 docker.elastic.co/elasticsearch/elasticsearch:6.6.2
-docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
 docker.elastic.co/kibana/kibana:5.5.0
 docker.elastic.co/kibana/kibana:6.6.2
 docker.elastic.co/logstash/logstash:5.4.3
@@ -45,8 +45,8 @@
 nexus3.onap.org:10001/onap/aaf/aaf_oauth:2.1.13
 nexus3.onap.org:10001/onap/aaf/aaf_service:2.1.13
 nexus3.onap.org:10001/onap/aaf/distcenter:4.0.0
-nexus3.onap.org:10001/onap/aaf/sms:4.0.0
 nexus3.onap.org:10001/onap/aaf/smsquorumclient:4.0.0
+nexus3.onap.org:10001/onap/aaf/sms:4.0.0
 nexus3.onap.org:10001/onap/aaf/testcaservice:4.0.0
 nexus3.onap.org:10001/onap/aai/esr-gui:1.4.0
 nexus3.onap.org:10001/onap/aai/esr-server:1.4.0
@@ -54,23 +54,22 @@
 nexus3.onap.org:10001/onap/aai-resources:1.4.0
 nexus3.onap.org:10001/onap/aai-schema-service:1.0.6
 nexus3.onap.org:10001/onap/aai-traversal:1.4.1
-nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.1
-nexus3.onap.org:10001/onap/appc-cdt-image:1.5.0
-nexus3.onap.org:10001/onap/appc-image:1.5.0
+nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.2
+nexus3.onap.org:10001/onap/appc-cdt-image:1.5.1
+nexus3.onap.org:10001/onap/appc-image:1.5.1
 nexus3.onap.org:10001/onap/babel:1.4.2
 nexus3.onap.org:10001/onap/ccsdk-ansible-server-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.2
-nexus3.onap.org:10001/onap/champ:1.4.0
-nexus3.onap.org:10001/onap/clamp:4.0.2
+nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.3
 nexus3.onap.org:10001/onap/clamp-dashboard-kibana:4.0.1
 nexus3.onap.org:10001/onap/clamp-dashboard-logstash:4.0.1
-nexus3.onap.org:10001/onap/cli:2.0.4
+nexus3.onap.org:10001/onap/clamp:4.0.2
+nexus3.onap.org:10001/onap/cli:3.0.0
 nexus3.onap.org:10001/onap/data-router:1.3.3
 nexus3.onap.org:10001/onap/data-router:1.4.0
 nexus3.onap.org:10001/onap/dcae-be:1.3.0
@@ -90,24 +89,25 @@
 nexus3.onap.org:10001/onap/gizmo:1.4.0
 nexus3.onap.org:10001/onap/holmes/engine-management:1.2.5
 nexus3.onap.org:10001/onap/holmes/rule-management:1.2.6
+nexus3.onap.org:10001/onap/champ:1.4.0
 nexus3.onap.org:10001/onap/modeling/genericparser:1.0.2
 nexus3.onap.org:10001/onap/model-loader:1.4.0
 nexus3.onap.org:10001/onap/msb/msb_apigateway:1.2.4
 nexus3.onap.org:10001/onap/msb/msb_discovery:1.2.3
-nexus3.onap.org:10001/onap/multicloud/azure:1.2.1
-nexus3.onap.org:10001/onap/multicloud/framework:1.3.1
+nexus3.onap.org:10001/onap/multicloud/azure:1.2.2
 nexus3.onap.org:10001/onap/multicloud/framework-artifactbroker:1.3.3
-nexus3.onap.org:10001/onap/multicloud/k8s:0.2.0
-nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.1
-nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.2
+nexus3.onap.org:10001/onap/multicloud/framework:1.3.3
+nexus3.onap.org:10001/onap/multicloud/k8s:0.4.0
+nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.4
 nexus3.onap.org:10001/onap/multicloud/vio:1.3.1
-nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
 nexus3.onap.org:10001/onap/music/cassandra_job:3.0.24
 nexus3.onap.org:10001/onap/music/cassandra_music:3.0.0
+nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
 nexus3.onap.org:10001/onap/music/music:3.0.24
 nexus3.onap.org:10001/onap/network-discovery:1.5.1
 nexus3.onap.org:10001/onap/oom/kube2msb:1.1.0
@@ -118,17 +118,17 @@
 nexus3.onap.org:10001/onap/optf-cmso-topology:2.0.0
 nexus3.onap.org:10001/onap/optf-has:1.3.0
 nexus3.onap.org:10001/onap/optf-osdf:1.3.0
-nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0-SNAPSHOT-latest
+nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.1.0
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.snmptrap:1.4.0
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.4.4
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.cm-container:1.6.2
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.17
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.18
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.multisite-init-container:1.0.0
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.redis-cluster-container:1.0.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.1
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.2
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.configbinding.app-app:2.3.0
 nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.deployment-handler:4.0.1
@@ -152,19 +152,19 @@
 nexus3.onap.org:10001/onap/portal-db:2.5.0
 nexus3.onap.org:10001/onap/portal-sdk:2.5.0
 nexus3.onap.org:10001/onap/portal-wms:2.5.0
-nexus3.onap.org:10001/onap/sdc-backend:1.4.0
-nexus3.onap.org:10001/onap/sdc-backend-init:1.4.0
-nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.0
-nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.0
-nexus3.onap.org:10001/onap/sdc-frontend:1.4.0
-nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.0
-nexus3.onap.org:10001/onap/sdc-kibana:1.4.0
-nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.0
-nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.0
-nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.1
+nexus3.onap.org:10001/onap/sdc-backend-init:1.4.1
+nexus3.onap.org:10001/onap/sdc-backend:1.4.1
+nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.1
+nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.1
+nexus3.onap.org:10001/onap/sdc-frontend:1.4.1
+nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.1
+nexus3.onap.org:10001/onap/sdc-kibana:1.4.1
+nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.1
+nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.1
+nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.2
 nexus3.onap.org:10001/onap/search-data-service:1.3.1
 nexus3.onap.org:10001/onap/search-data-service:1.4.3
 nexus3.onap.org:10001/onap/service-decomposition:1.5.1
@@ -182,8 +182,8 @@
 nexus3.onap.org:10001/onap/sparky-be:1.4.0
 nexus3.onap.org:10001/onap/spike:1.4.0
 nexus3.onap.org:10001/onap/testsuite:1.4.0
-nexus3.onap.org:10001/onap/usecase-ui:1.2.2
 nexus3.onap.org:10001/onap/usecase-ui-server:1.2.1
+nexus3.onap.org:10001/onap/usecase-ui:1.2.2
 nexus3.onap.org:10001/onap/validation:1.3.1
 nexus3.onap.org:10001/onap/vfc/catalog:1.3.1
 nexus3.onap.org:10001/onap/vfc/db:1.3.0
@@ -202,11 +202,11 @@
 nexus3.onap.org:10001/onap/vfc/wfengine-mgrservice:1.3.0
 nexus3.onap.org:10001/onap/vfc/ztesdncdriver:1.3.0
 nexus3.onap.org:10001/onap/vfc/ztevnfmdriver:1.3.1
-nexus3.onap.org:10001/onap/vid:4.0.0
-nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.2.0
-nexus3.onap.org:10001/onap/workflow-backend:1.4.0
-nexus3.onap.org:10001/onap/workflow-frontend:1.4.0
-nexus3.onap.org:10001/onap/workflow-init:1.4.0
+nexus3.onap.org:10001/onap/vid:4.2.0
+nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.3.0
+nexus3.onap.org:10001/onap/workflow-backend:1.4.1
+nexus3.onap.org:10001/onap/workflow-frontend:1.4.1
+nexus3.onap.org:10001/onap/workflow-init:1.4.1
 nexus3.onap.org:10001/sonatype/nexus:2.14.8-01
 nexus3.onap.org:10001/zookeeper:3.4
 oomk8s/mariadb-client-init:3.0.0
diff --git a/build/data_lists/onap_pip_packages.list b/build/data_lists/onap_pip_packages.list
index ba5cdf7..ab4949b 100644
--- a/build/data_lists/onap_pip_packages.list
+++ b/build/data_lists/onap_pip_packages.list
@@ -7,6 +7,5 @@
 Jinja2==2.10.1
 MarkupSafe==1.1.1
 requests==2.22.0
-setuptools==40.7.1
 urllib3==1.25.3
 Werkzeug==0.15.4
diff --git a/build/data_lists/onap_rpm.list b/build/data_lists/onap_rpm.list
new file mode 100644
index 0000000..4595d4b
--- /dev/null
+++ b/build/data_lists/onap_rpm.list
@@ -0,0 +1,21 @@
+containerd.io-1.2.5-3.1.el7.x86_64
+container-selinux-1.12.5-14.el7.x86_64
+container-selinux-2.95-2.el7_6.noarch
+docker-ce-18.09.5-3.el7.x86_64
+docker-ce-cli-18.09.6-3.el7.x86_64
+gssproxy-0.7.0-21.el7.x86_64
+keyutils-1.5.8-3.el7.x86_64
+libbasicobjects-0.1.1-32.el7.x86_64
+libcollection-0.7.0-32.el7.x86_64
+libevent-2.0.21-4.el7.x86_64
+libini_config-1.3.1-32.el7.x86_64
+libnfsidmap-0.25-19.el7.x86_64
+libpath_utils-0.2.1-32.el7.x86_64
+libref_array-0.1.5-32.el7.x86_64
+libverto-libevent-0.2.5-4.el7.x86_64
+nfs-utils-1.3.0-0.61.el7.x86_64
+python-docker-py-1.10.6-9.el7_6.noarch
+python-docker-pycreds-0.3.0-9.el7_6.noarch
+python-ipaddress-1.0.16-2.el7.noarch
+python-jsonpointer-1.9-2.el7.noarch
+python-websocket-client-0.32.0-116.el7.noarch
diff --git a/build/download/base.py b/build/download/base.py
index 5bcd0ef..d8b4483 100644
--- a/build/download/base.py
+++ b/build/download/base.py
@@ -38,7 +38,8 @@
     :return: set of items from file
     """
     with open(item_list, 'r') as f:
-        return {item for item in (line.strip() for line in f) if item}
+        return {item for item in (line.strip() for line in f)
+                if item and not item.startswith('#')}
 
 
 def init_progress(items_name):
diff --git a/build/download/docker_images.py b/build/download/docker_images.py
index e4e742b..d8138dd 100755
--- a/build/download/docker_images.py
+++ b/build/download/docker_images.py
@@ -180,7 +180,7 @@
         if save:
             save_image(image, pulled_image, output_dir)
     except Exception as err:
-        log.error('Error downloading {}: {}'.format(image, err))
+        log.exception('Error downloading {}: {}'.format(image, err))
         raise err
 
 
@@ -195,10 +195,10 @@
     :return: None
     """
     try:
-        docker_client = docker.client.DockerClient(version='auto')
+        # big timeout in case of massive images like pnda-mirror-container:5.0.0 (11.4GB)
+        docker_client = docker.client.DockerClient(version='auto', timeout=300)
     except docker.errors.DockerException as err:
-        log.error(err)
-        log.error('Error creating docker client. Check if is docker installed and running'
+        log.exception('Error creating docker client. Check if is docker installed and running'
                   ' or if you have right permissions.')
         raise err
 
@@ -221,14 +221,12 @@
                                        missing_images['not_saved'] - missing_images['not_pulled'],
                                        None, output_dir, docker_client)
 
+    base.finish_progress(progress, error_count, log)
     if error_count > 0:
         log.error('{} images were not downloaded'.format(error_count))
         missing_images = missing(docker_client, target_images, save, output_dir)
         log.info(check_table(merge_dict_sets(missing_images), missing_images, save))
-
-    base.finish_progress(progress, error_count, log)
-
-    return error_count
+        raise RuntimeError()
 
 
 def run_cli():
@@ -256,11 +254,13 @@
 
     progress = base.init_progress('Docker images') if not args.check else None
     try:
-        sys.exit(download(args.image_list, args.save, args.output_dir, args.check,
-                 progress, args.workers))
+        download(args.image_list, args.save, args.output_dir, args.check,
+                 progress, args.workers)
     except docker.errors.DockerException:
-        log.error('Irrecoverable error detected.')
+        log.exception('Irrecoverable error detected.')
         sys.exit(1)
+    except RuntimeError as err:
+        log.exception(err)
 
 
 if __name__ == '__main__':
diff --git a/build/creating_data/download-bin-tools.sh b/build/download/download-bin-tools.sh
similarity index 100%
rename from build/creating_data/download-bin-tools.sh
rename to build/download/download-bin-tools.sh
diff --git a/build/creating_data/download-pip.sh b/build/download/download-pip.sh
similarity index 100%
rename from build/creating_data/download-pip.sh
rename to build/download/download-pip.sh
diff --git a/build/download/download.py b/build/download/download.py
new file mode 100755
index 0000000..ebce931
--- /dev/null
+++ b/build/download/download.py
@@ -0,0 +1,158 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+#   COPYRIGHT NOTICE STARTS HERE
+
+#   Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+#   COPYRIGHT NOTICE ENDS HERE
+
+import argparse
+import logging
+import sys
+import datetime
+import timeit
+
+import base
+import docker_images
+import git_repos
+import http_files
+import npm_packages
+import rpm_packages
+
+log = logging.getLogger(name=__name__)
+
+def parse_args():
+    parser=argparse.ArgumentParser(description='Download data from lists')
+    list_group = parser.add_argument_group()
+    list_group.add_argument('--docker', action='append', nargs='+', default=[],
+                        metavar=('list', 'dir-name'),
+                        help='Docker type list. If second argument is specified '
+                             'it is treated as directory where images will be saved '
+                             'otherwise only pull operation is executed')
+    list_group.add_argument('--http', action='append', nargs=2, default=[],
+                        metavar=('list', 'dir-name'),
+                        help='Http type list and directory to save downloaded files')
+    list_group.add_argument('--npm', action='append', nargs=2, default=[],
+                        metavar=('list', 'dir-name'),
+                        help='npm type list and directory to save downloaded files')
+    list_group.add_argument('--rpm', action='append', nargs=2, default=[],
+                        metavar=('list', 'dir-name'),
+                        help='rpm type list and directory to save downloaded files')
+    list_group.add_argument('--git', action='append', nargs=2, default=[],
+                        metavar=('list', 'dir-name'),
+                        help='git repo type list and directory to save downloaded files')
+    parser.add_argument('--npm-registry', default='https://registry.npmjs.org',
+                        help='npm registry to use (default: https://registry.npmjs.org)')
+    parser.add_argument('--check', '-c', action='store_true', default=False,
+                        help='Check what is missing. No download.')
+    parser.add_argument('--debug', action='store_true', default=False,
+                        help='Turn on debug output')
+
+    args = parser.parse_args()
+
+    for arg in ('docker', 'npm', 'http', 'rpm', 'git'):
+        if getattr(args, arg):
+            return args
+
+    parser.error('One of --docker, --npm, --http, --rpm, --git must be specified')
+
+
+def run_cli():
+    args = parse_args()
+
+    console_handler = logging.StreamHandler(sys.stdout)
+    console_formatter = logging.Formatter('%(message)s')
+    console_handler.setFormatter(console_formatter)
+    now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+    log_file = 'download_data-{}.log'.format(now)
+    file_format = "%(asctime)s: %(filename)s: %(levelname)s: %(message)s"
+
+    if args.debug:
+        logging.basicConfig(level=logging.DEBUG, filename=log_file, format=file_format)
+    else:
+        logging.basicConfig(level=logging.INFO, filename=log_file, format=file_format)
+    root_logger = logging.getLogger()
+    root_logger.addHandler(console_handler)
+
+    list_with_errors = []
+    timer_start = timeit.default_timer()
+
+    for docker_list in args.docker:
+        log.info('Processing {}.'.format(docker_list[0]))
+        progress = None if args.check else base.init_progress('docker images')
+        save = False
+        if len(docker_list) > 1:
+            save = True
+        else:
+            docker_list.append(None)
+        try:
+            docker_images.download(docker_list[0], save,
+                                   docker_list[1], args.check, progress)
+        except RuntimeError:
+            list_with_errors.append(docker_list[0])
+
+    for http_list in args.http:
+        progress = None if args.check else base.init_progress('http files')
+        log.info('Processing {}.'.format(http_list[0]))
+        try:
+            http_files.download(http_list[0], http_list[1], args.check,
+                                progress)
+        except RuntimeError:
+            list_with_errors.append(http_list[0])
+
+    for npm_list in args.npm:
+        progress = None if args.check else base.init_progress('npm packages')
+        log.info('Processing {}.'.format(npm_list[0]))
+        try:
+            npm_packages.download(npm_list[0], args.npm_registry, npm_list[1],
+                                  args.check, progress)
+        except RuntimeError:
+            list_with_errors.append(npm_list[0])
+
+    for rpm_list in args.rpm:
+        if args.check:
+            log.info('Check mode for rpm packages is not implemented')
+            break
+        log.info('Processing {}.'.format(rpm_list[0]))
+        try:
+            rpm_packages.download(rpm_list[0], rpm_list[1])
+        except RuntimeError:
+            list_with_errors.append(rpm_list[0])
+
+    for git_list in args.git:
+        if args.check:
+            log.info('Check mode for git repositories is not implemented')
+            break
+        progress = None if args.check else base.init_progress('git repositories')
+        log.info('Processing {}.'.format(git_list[0]))
+        try:
+            git_repos.download(git_list[0], git_list[1], progress)
+        except RuntimeError:
+            list_with_errors.append(git_list[0])
+
+    e_time = datetime.timedelta(seconds=timeit.default_timer() - timer_start)
+    log.info(timeit.default_timer() - timer_start)
+    log.info('Execution ended. Total elapsed time {}'.format(e_time))
+
+    if list_with_errors:
+        log.error('Errors encountered while processing these lists:'
+                  '\n{}'.format('\n'.join(list_with_errors)))
+        sys.exit(1)
+
+
+
+if __name__ == '__main__':
+    run_cli()
diff --git a/build/download/git_repos.py b/build/download/git_repos.py
index e388e94..aff01b8 100755
--- a/build/download/git_repos.py
+++ b/build/download/git_repos.py
@@ -45,10 +45,9 @@
     if not base.check_tool('git'):
         log.error('ERROR: git is not installed')
         progress.finish(dirty=True)
-        return 1
+        raise RuntimeError('git missing')
 
-    git_set = {tuple(item.split()) for item in base.load_list(git_list)
-               if not item.startswith('#')}
+    git_set = {tuple(item.split()) for item in base.load_list(git_list)}
 
     error_count = 0
 
@@ -64,14 +63,13 @@
             clone_repo(dst, *repo)
             progress.update(progress.value + 1)
         except subprocess.CalledProcessError as err:
-            log.error(err.output.decode())
+            log.exception(err.output.decode())
             error_count += 1
 
     base.finish_progress(progress, error_count, log)
     if error_count > 0:
         log.error('{} were not downloaded. Check logs for details'.format(error_count))
-    return error_count
-
+        raise RuntimeError('Download unsuccesfull')
 
 def run_cli():
     parser = argparse.ArgumentParser(description='Download git repositories from list')
@@ -85,8 +83,11 @@
     logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
 
     progress = base.init_progress('git repositories')
-
-    sys.exit(download(args.git_list, args.output_dir, progress))
+    try:
+        download(args.git_list, args.output_dir, progress)
+    except RuntimeError as err:
+        log.exception(err)
+        sys.exit(1)
 
 
 if __name__ == '__main__':
diff --git a/build/download/http_files.py b/build/download/http_files.py
index f5b1e59..c83158d 100755
--- a/build/download/http_files.py
+++ b/build/download/http_files.py
@@ -83,7 +83,7 @@
 
     if check:
         log.info(base.simple_check_table(file_set, missing_files))
-        return 0
+        return
 
     skipping = file_set - missing_files
 
@@ -91,12 +91,11 @@
 
     error_count = base.run_concurrent(workers, progress, download_file, missing_files, dst_dir)
 
+    base.finish_progress(progress, error_count, log)
     if error_count > 0:
         log.error('{} files were not downloaded. Check log for specific failures.'.format(error_count))
+        raise RuntimeError()
 
-    base.finish_progress(progress, error_count, log)
-
-    return error_count
 
 def run_cli():
     """
@@ -123,7 +122,10 @@
 
     progress = base.init_progress('http files') if not args.check else None
 
-    sys.exit(download(args.file_list, args.output_dir, args.check, progress, args.workers))
+    try:
+        download(args.file_list, args.output_dir, args.check, progress, args.workers)
+    except RuntimeError:
+        sys.exit(1)
 
 
 if __name__ == '__main__':
diff --git a/build/download/npm_packages.py b/build/download/npm_packages.py
index c174e2c..70c03ad 100755
--- a/build/download/npm_packages.py
+++ b/build/download/npm_packages.py
@@ -57,7 +57,7 @@
     except Exception as err:
         if os.path.isfile(dst_path):
             os.remove(dst_path)
-        log.error('Failed: {}: {}'.format(npm, err))
+        log.exception('Failed: {}'.format(npm))
         raise err
     log.info('Downloaded: {}'.format(npm))
 
@@ -81,12 +81,10 @@
     base.start_progress(progress, len(npm_set), skipping, log)
     error_count = base.run_concurrent(workers, progress, download_npm, missing_npms, registry, dst_dir)
 
+    base.finish_progress(progress, error_count, log)
     if error_count > 0:
         log.error('{} packages were not downloaded. Check log for specific failures.'.format(error_count))
-
-    base.finish_progress(progress, error_count, log)
-
-    return error_count
+        raise RuntimeError()
 
 
 def run_cli():
diff --git a/build/download/rpm_packages.py b/build/download/rpm_packages.py
index 7f9700a..732af0e 100755
--- a/build/download/rpm_packages.py
+++ b/build/download/rpm_packages.py
@@ -33,7 +33,7 @@
 def download(rpm_list, dst_dir):
     if not base.check_tool('yumdownloader'):
         log.error('ERROR: yumdownloader is not installed')
-        return 1
+        raise RuntimeError('yumdownloader missing')
 
     rpm_set = base.load_list(rpm_list)
 
@@ -41,11 +41,10 @@
     log.info('Running command: {}'.format(command))
     try:
         subprocess.check_call(command.split())
-        log.info('Downloaded')
     except subprocess.CalledProcessError as err:
-        log.error(err.output)
-        return err.returncode
-
+        log.exception(err.output)
+        raise err
+    log.info('Downloaded')
 
 
 def run_cli():
@@ -59,7 +58,11 @@
 
     logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
 
-    sys.exit(download(args.rpm_list, args.output_dir))
+    try:
+        download(args.rpm_list, args.output_dir)
+    except (subprocess.CalledProcessError, RuntimeError):
+        sys.exit(1)
+
 
 
 if __name__ == '__main__':
diff --git a/build/download_offline_data_by_lists.sh b/build/download_offline_data_by_lists.sh
deleted file mode 100755
index b2afd17..0000000
--- a/build/download_offline_data_by_lists.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#! /usr/bin/env bash
-
-#   COPYRIGHT NOTICE STARTS HERE
-#
-#   Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-#   COPYRIGHT NOTICE ENDS HERE
-
-
-# fail fast
-set -e
-
-usage () {
-    echo "Usage:"
-    echo -e "./$(basename $0) <project version>\n"
-    echo "onap_3.0.0 for casablanca                                (sign-off 30/11/2018)"
-    echo "onap_3.0.1 for casablanca maintenance release            (sign-off 10/12/2018)"
-    echo "onap_3.0.2 for latest casablanca with fixed certificates (sign-off 25/04/2019)"
-    echo ""
-    echo "Example:"
-    echo "  ./$(basename $0) onap_3.0.2"
-}
-
-# boilerplate
-RELATIVE_PATH=./ # relative path from this script to 'common-functions.sh'
-if [ "$IS_COMMON_FUNCTIONS_SOURCED" != YES ] ; then
-    SCRIPT_DIR=$(dirname "${0}")
-    LOCAL_PATH=$(readlink -f "$SCRIPT_DIR")
-    . "${LOCAL_PATH}"/"${RELATIVE_PATH}"/common-functions.sh
-fi
-
-if [ "${1}" == "-h" ] || [ -z "${1}" ]; then
-    usage
-    exit 0
-else
-    TAG="${1}"
-fi
-
-CTOOLS="${LOCAL_PATH}/creating_data"
-LISTS_DIR="${LOCAL_PATH}/data_lists"
-DATA_DIR="${LOCAL_PATH}/../../resources"
-TOTAL=12
-CURR=1
-
-message info "Downloading started: $(date)"
-
-echo "[Step $((CURR++))/$TOTAL Download collected docker images]"
-$CTOOLS/download-docker-images.sh "${LISTS_DIR}/${TAG}-docker_images.list"
-
-echo "[Step $((CURR++))/$TOTAL Download docker images for infra-server]"
-$CTOOLS/download-docker-images.sh "${LISTS_DIR}/infra_docker_images.list"
-
-echo "[Step $((CURR++))/$TOTAL Build own nginx image]"
-$CTOOLS/create_nginx_image/01create-image.sh "${DATA_DIR}/offline_data/docker_images_infra"
-
-echo "[Step $((CURR++))/$TOTAL Save docker images from docker cache to tarfiles]"
-$CTOOLS/save-docker-images.sh "${LISTS_DIR}/${TAG}-docker_images.list" "${DATA_DIR}/offline_data/docker_images_for_nexus"
-
-echo "[Step $((CURR++))/$TOTAL Prepare infra related images to infra folder]"
-$CTOOLS/save-docker-images.sh "${LISTS_DIR}/infra_docker_images.list" "${DATA_DIR}/offline_data/docker_images_infra"
-
-echo "[Step $((CURR++))/$TOTAL Download git repos]"
-$CTOOLS/download-git-repos.sh "${LISTS_DIR}/onap_3.0.x-git_repos.list" "${DATA_DIR}/git-repo"
-
-echo "[Step $((CURR++))/$TOTAL Download http files]"
-$CTOOLS/download-http-files.sh "${LISTS_DIR}/onap_3.0.x-http_files.list" "${DATA_DIR}/http"
-
-echo "[Step $((CURR++))/$TOTAL Download npm pkgs]"
-$CTOOLS/download-npm-pkgs.sh "${LISTS_DIR}/onap_3.0.x-npm.list" "${DATA_DIR}/offline_data/npm_tar"
-
-echo "[Step $((CURR++))/$TOTAL Download bin tools]"
-$CTOOLS/download-bin-tools.sh "${DATA_DIR}/downloads"
-
-echo "[Step $((CURR++))/$TOTAL Create RHEL repository]"
-$CTOOLS/create-rhel-repo.sh "${DATA_DIR}/pkg/rhel"
-
-echo "[Step $((CURR++))/$TOTAL Download sdnc-ansible-server packages]"
-$CTOOLS/download-pip.sh "${LISTS_DIR}/onap_3.0.x-pip_packages.list" "${DATA_DIR}/offline_data/pypi"
-$CTOOLS/download-files.sh "${LISTS_DIR}/deb_packages.list" "${DATA_DIR}/pkg/ubuntu/xenial"
-
-echo "[Step $((CURR++))/$TOTAL Create APT repository]"
-$CTOOLS/create-ubuntu-repo.sh "${DATA_DIR}/pkg/ubuntu/xenial"
-
-message info "Downloading finished: $(date)"
diff --git a/build/fetch_and_patch_charts.sh b/build/fetch_and_patch_charts.sh
index 79d7a01..22d45e6 100755
--- a/build/fetch_and_patch_charts.sh
+++ b/build/fetch_and_patch_charts.sh
@@ -54,7 +54,7 @@
 PATCH_FILE=$(realpath "${3}")
 
 echo -e "${_G}[Step $((CURR++))/${TOTAL} cloning repo with charts to be patched]${C_}"
-git clone "${1}" "${4}"
+git clone --recurse-submodules "${1}" "${4}"
 
 echo -e "${_G}[Step $((CURR++))/${TOTAL} setting working dir to ${4}]${C_}"
 pushd "${4}"
diff --git a/docs/BuildGuide.rst b/docs/BuildGuide.rst
index bb0e4cc..043e429 100644
--- a/docs/BuildGuide.rst
+++ b/docs/BuildGuide.rst
@@ -15,29 +15,9 @@
 
 We assume that procedure is executed on RHEL 7.6 server with \~300G disc space, 16G+ RAM and internet connectivity
 
-More-over following sw packages has to be installed:
+Some additional sw packages are required by ONAP Offline platform building tooling. in order to install them
+following repos has to be configured for RHEL 7.6 platform.
 
-* for the Preparation (Part 1), the Download artifacts for offline installer (Part 2) and the application helm charts preparation and patching (Part 4)
-    -  git
-    -  wget
-
-* for the Download artifacts for offline installer (Part 2) only
-    -  createrepo
-    -  dpkg-dev
-    -  python2-pip
-
-* for the Download artifacts for offline installer (Part 2) and the Populate local nexus (Part 3)
-    -  nodejs
-    -  jq
-    -  docker (exact version docker-ce-18.09.5)
-
-* for the Download artifacts for offline installer (Part 2) and for the Application helm charts preparation and patching (Part 4)
-    -  patch
-
-* for the Populate local nexus (Part 3)
-    -  twine
-
-Configure repos for downloading all needed rpms for download/packaging tooling:
 
 
 ::
@@ -49,19 +29,28 @@
     # Register server
     subscription-manager register --username <rhel licence name> --password <password> --auto-attach
 
-    # enable epel for npm and jq
-    rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+    # required by special centos docker recommended by ONAP
+    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
 
-    # enable rhel-7-server-e4s-optional-rpms in /etc/yum.repos.d/redhat.repo
+    # required by docker dependencies i.e. docker-selinux
+    subscription-manager repos --enable=rhel-7-server-extras-rpms
+
+    # epel is required by npm within blob build
+    rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
 
 Alternatively
 
 ::
 
+   ToDo: newer download scripts needs to be verified on Centos with ONAP Dublin
+
    ##############
    # Centos 7.6 #
    ##############
 
+   # required by special centos docker recommended by ONAP
+   yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
    # enable epel repo for npm and jq
    yum install -y epel-release
 
@@ -70,12 +59,13 @@
 ::
 
     # install following packages
-    yum install -y expect nodejs git wget createrepo python2-pip jq patch dpkg-dev
+    yum install -y docker-ce-18.09.5 python-pip git createrepo expect nodejs npm jq
 
+    # twine package is needed by nexus blob build script
     pip install twine
 
-    # install docker
-    curl https://releases.rancher.com/install-docker/18.09.sh | sh
+    # docker daemon must be running on host
+    service docker start
 
 Then it is necessary to clone all installer and build related repositories and prepare the directory structure.
 
@@ -86,142 +76,71 @@
     git clone https://gerrit.onap.org/r/oom/offline-installer onap-offline
     cd onap-offline
 
+    # install required pip packages for download scripts
+    pip install -r ./build/download/requirements.txt
+
 Part 2. Download artifacts for offline installer
 ------------------------------------------------
 
 .. note:: Skip this step if you have already all necessary resources and continue with Part 3. Populate local nexus
 
-All artifacts should be downloaded by running the download script as follows:
+It's possible to download all artifacts in single ./download.py execution. Recently we improved reliability of download scripts
+so one might try following command to download most of the required artifacts in single shot.
 
-./build/download_offline_data_by_lists.sh <project>
-
-For example:
+**Step1 - download wrapper script execution**
 
 ::
 
-  # onap_3.0.0 for casablanca                                (sign-off 30/11/2018)
-  # onap_3.0.1 for casablanca maintenance release            (sign-off 10/12/2018)
-  # onap_3.0.2 for latest casablanca with fixed certificates (sign-off 25/04/2019)
+        # following arguments are provided
+        # all data lists are taken in ./build/data_lists/ folder
+        # all resources will be stored in expected folder structure within ../resources folder
+        # for more details refer to Appendix 1.
 
-  $ ./build/download_offline_data_by_lists.sh onap_3.0.2
+        ./build/download/download.py --docker ./build/data_lists/infra_docker_images.list ../resources/offline_data/docker_images_infra --docker ./build/data_lists/rke_docker_images.list ../resources/offline_data/docker_images_for_nexus --docker ./build/data_lists/onap_docker_images.list ../resources/offline_data/docker_images_for_nexus --git ./build/data_lists/onap_git_repos.list ../resources/git-repo --npm ./build/data_lists/onap_npm.list ../resources/offline_data/npm_tar --rpm ./build/data_lists/onap_rpm.list ../resources/pkg/rhel
 
-Download is as reliable as network connectivity to internet, it is highly recommended to run it in screen and save log file from this script execution for checking if all artifacts were successfully collected. Each start and end of script call should contain timestamp in console output. Downloading consists of 10 steps, which should be checked at the end one-by-one.
 
-**Verify:** *Please take a look on following comments to respective
-parts of download script*
+Alternatively, step-by-step procedure is described in Appendix 1.
 
-[Step 1/10 Download collected docker images]
+Following steps are still required and are not supported by current version of download.py script.
 
-=> image download step is quite reliable and contain retry logic
-
-E.g
+**Step 2 - Building own dns image**
 
 ::
 
-    == pkg #143 of 163 ==
-    rancher/etc-host-updater:v0.0.3
-    digest:sha256:bc156a5ae480d6d6d536aa454a9cc2a88385988617a388808b271e06dc309ce8
-    Error response from daemon: Get https://registry-1.docker.io/v2/rancher/etc-host-updater/manifests/v0.0.3: Get
-    https://auth.docker.io/token?scope=repository%3Arancher%2Fetc-host-updater%3Apull&service=registry.docker.io: net/http: TLS handshake timeout
-    WARNING [!]: warning Command docker -l error pull rancher/etc-host-updater:v0.0.3 failed.
-    Attempt: 2/5
-    INFO: info waiting 10s for another try...
-    v0.0.3: Pulling from rancher/etc-host-updater
-    b3e1c725a85f: Already exists
-    6a710864a9fc: Already exists
-    d0ac3b234321: Already exists
-    87f567b5cf58: Already exists
-    16914729cfd3: Already exists
-    83c2da5790af: Pulling fs layer
-    83c2da5790af: Verifying Checksum
-    83c2da5790af: Download complete
-    83c2da5790af: Pull complete
+        # We are building our own dns image within our offline infrastructure
+        ./build/creating_data/create_nginx_image/01create-image.sh /tmp/resources/offline_data/docker_images_infra
 
-[Step 2/10 Build own nginx image]
 
-=> there is no hardening in this step, if it fails it needs to be
-retriggered. It should end with
+**Step 3 - Http files**
+
+ToDo: complete and verified list of http files will come just during/after vFWCL testcase
+
+
+**Step 4 - Binaries**
 
 ::
 
-  Successfully built <id>
+       # Following step will download and prepare rke, kubectl and helm binaries
+       # there is some post-processing needed therefore its not very convenient to add support for this step into main download.py script
+       ./build/download/download-bin-tools.sh ../resources/downloads
 
-[Step 3/10 Save docker images from docker cache to tarfiles]
-
-=> quite reliable, retry logic in place
-
-[Step 4/10 move infra related images to infra folder]
-
-=> should be safe, precondition is not failing step(3)
-
-[Step 5/10 Download git repos]
-
-=> potentially unsafe, no hardening in place. If it not download all git repos. It has to be executed again. Easiest way is probably to comment-out other steps in load script and run it again.
-
-E.g.
+**Step 5 - Create repo**
 
 ::
 
-    Cloning into bare repository
-    'github.com/rancher/community-catalog.git'...
-    error: RPC failed; result=28, HTTP code = 0
-    fatal: The remote end hung up unexpectedly
-    Cloning into bare repository 'git.rancher.io/rancher-catalog.git'...
-    Cloning into bare repository
-    'gerrit.onap.org/r/testsuite/properties.git'...
-    Cloning into bare repository 'gerrit.onap.org/r/portal.git'...
-    Cloning into bare repository 'gerrit.onap.org/r/aaf/authz.git'...
-    Cloning into bare repository 'gerrit.onap.org/r/demo.git'...
-    Cloning into bare repository
-    'gerrit.onap.org/r/dmaap/messagerouter/messageservice.git'...
-    Cloning into bare repository 'gerrit.onap.org/r/so/docker-config.git'...
+      createrepo ../resources/pkg/rhel
 
-[Step 6/10 Download http files]
+**Step 6 - pip packages**
 
-[Step 7/10 Download npm pkgs]
-
-[Step 8/10 Download bin tools]
-
-=> work quite reliably, If it not download all artifacts. Easiest way is probably to comment-out other steps in load script and run it again.
-
-[Step 9/10 Download rhel pkgs]
-
-=> this is the step which will work on rhel only, for other platform different packages has to be downloaded.
-
-Following is considered as sucessfull run of this part:
+Todo: will be incorporated into download.py in near future
 
 ::
 
-      Available: 1:net-snmp-devel-5.7.2-32.el7.i686 (rhel-7-server-rpms)
-        net-snmp-devel = 1:5.7.2-32.el7
-      Available: 1:net-snmp-devel-5.7.2-33.el7_5.2.i686 (rhel-7-server-rpms)
-        net-snmp-devel = 1:5.7.2-33.el7_5.2
-    Dependency resolution failed, some packages will not be downloaded.
-    No Presto metadata available for rhel-7-server-rpms
-    https://ftp.icm.edu.pl/pub/Linux/fedora/linux/epel/7/x86_64/Packages/p/perl-CDB_File-0.98-9.el7.x86_64.rpm:
-    [Errno 12\] Timeout on
-    https://ftp.icm.edu.pl/pub/Linux/fedora/linux/epel/7/x86_64/Packages/p/perl-CDB_File-0.98-9.el7.x86_64.rpm:
-    (28, 'Operation timed out after 30001 milliseconds with 0 out of 0 bytes
-    received')
-    Trying other mirror.
-    Spawning worker 0 with 230 pkgs
-    Spawning worker 1 with 230 pkgs
-    Spawning worker 2 with 230 pkgs
-    Spawning worker 3 with 230 pkgs
-    Spawning worker 4 with 229 pkgs
-    Spawning worker 5 with 229 pkgs
-    Spawning worker 6 with 229 pkgs
-    Spawning worker 7 with 229 pkgs
-    Workers Finished
-    Saving Primary metadata
-    Saving file lists metadata
-    Saving other metadata
-    Generating sqlite DBs
-    Sqlite DBs complete
+      # Following step will download all pip packages
+      ./build/download/download-pip.sh ./build/data_lists/onap_pip_packages.list ../resources/offline_data/pypi
 
-[Step 10/10 Download sdnc-ansible-server packages]
 
-=> there is again no retry logic in this part, it is collecting packages for sdnc-ansible-server in the exactly same way how that container is doing it, however there is a bug in upstream that image in place will not work with those packages as old ones are not available and newer are not compatible with other stuff inside that image
+This concludes SW download part required for ONAP offline platform creating.
 
 Part 3. Populate local nexus
 ----------------------------
@@ -234,18 +153,24 @@
 
 .. note:: In case you skipped the Part 2 for the artifacts download, please ensure that the copy of resources data are untarred in *./onap-offline/../resources/*
 
-Whole nexus blob data will be created by running script build\_nexus\_blob.sh.
+Whole nexus blob data will be created by running script build_nexus_blob.sh.
 It will load the listed docker images, run the Nexus, configure it as npm, pypi
 and docker repositories. Then it will push all listed npm and pypi packages and
 docker images to the repositories. After all is done the repository container
 is stopped.
 
+.. note:: build_nexus_blob.sh script is using docker, npm and pip data lists for building nexus blob. Unfortunatelly we now have 2 different docker data lists (RKE & ONAP). So we need to merge them as visible from following snippet. This problem will be fixed in OOM-1890
+
 You can run the script as following example:
 
-``$ ./install/onap-offline/build_nexus_blob.sh onap_3.0.2``
+::
 
-Where the onap_3.0.2 is the tag to specify which lists will be used for the
-resources
+        # merge RKE and ONAP app data lists
+        cat ./build/data_lists/rke_docker_images.list >> ./build/data_lists/onap_docker_images.list
+
+        ./build/build_nexus_blob.sh
+
+.. note:: in current release scope we aim to maintain just single example data lists set, tags used in previous releases are not needed. Datalists are also covering latest versions verified by us despite user is allowed to build data lists on his own.
 
 Once the Nexus data blob is created, the docker images and npm and pypi
 packages can be deleted to reduce the package size as they won't be needed in
@@ -255,9 +180,9 @@
 
 ::
 
-    rm -f /tmp/onap-offline/resources/offline_data/docker_images_for_nexus/*
-    rm -rf /tmp/onap-offline/resources/offline_data/npm_tar
-    rm -rf /tmp/onap-offline/resources/offline_data/pypi
+    rm -f /tmp/resources/offline_data/docker_images_for_nexus/*
+    rm -rf /tmp/resources/offline_data/npm_tar
+    rm -rf /tmp/resources/offline_data/pypi
 
 Part 4. Application helm charts preparation and patching
 --------------------------------------------------------
@@ -267,13 +192,13 @@
 
 ::
 
-  ./build/fetch\_and\_patch\_charts.sh <helm charts repo> <commit/tag/branch> <patchfile> <target\_dir>
+  ./build/fetch_and_patch_charts.sh <helm charts repo> <commit/tag/branch> <patchfile> <target\_dir>
 
 For example:
 
 ::
 
-  ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom master /tmp/onap-offline/patches/onap.patch /tmp/oom-clone
+  ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom 0b904977dde761d189874d6dc6c527cd45928 /tmp/onap-offline/patches/onap.patch /tmp/oom-clone
 
 Part 5. Creating offline installation package
 ---------------------------------------------
@@ -288,11 +213,11 @@
 +---------------------------------------+------------------------------------------------------------------------------+
 | Parameter                             | Description                                                                  |
 +=======================================+==============================================================================+
-| HELM\_CHARTS\_DIR                     | directory with Helm charts for the application                               |
+| HELM_CHARTS_DIR                       | directory with Helm charts for the application                               |
 |                                       |                                                                              |
 |                                       | Example: /tmp/oom-clone/kubernetes                                           |
 +---------------------------------------+------------------------------------------------------------------------------+
-| APP\_CONFIGURATION                    | application install configuration (application_configuration.yml) for        |
+| APP_CONFIGURATION                     | application install configuration (application_configuration.yml) for        |
 |                                       | ansible installer and custom ansible role code directories if any.           |
 |                                       |                                                                              |
 |                                       | Example::                                                                    |
@@ -303,11 +228,11 @@
 |                                       |  )                                                                           |
 |                                       |                                                                              |
 +---------------------------------------+------------------------------------------------------------------------------+
-| APP\_BINARY\_RESOURCES\_DIR           | directory with all (binary) resources for offline infra and application      |
+| APP_BINARY_RESOURCES_DIR              | directory with all (binary) resources for offline infra and application      |
 |                                       |                                                                              |
-|                                       | Example: /tmp/onap-offline/resources                                         |
+|                                       | Example: /tmp/resources                                                      |
 +---------------------------------------+------------------------------------------------------------------------------+
-| APP\_AUX\_BINARIES                    | additional binaries such as docker images loaded during runtime   [optional] |
+| APP_AUX_BINARIES                      | additional binaries such as docker images loaded during runtime   [optional] |
 +---------------------------------------+------------------------------------------------------------------------------+
 
 Offline installer packages are created with prepopulated data via
@@ -321,13 +246,78 @@
 
 ::
 
-  ./build/package.sh onap 3.0.2 /tmp/package
+  ./build/package.sh onap 4.0.0 /tmp/package
 
 
 So in the target directory you should find tar files with
 
 ::
 
-  offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-sw.tar
-  offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-resources.tar
-  offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-aux-resources.tar
+  offline-<PROJECT_NAME>-<PROJECT_VERSION>-sw.tar
+  offline-<PROJECT_NAME>-<PROJECT_VERSION>-resources.tar
+  offline-<PROJECT_NAME>-<PROJECT_VERSION>-aux-resources.tar
+
+
+Appendix 1. Step-by-step download procedure
+-------------------------------------------
+
+**Step 1 - docker images**
+
+::
+
+        # This step will parse all 3 docker datalists (offline infrastructure images, rke k8s images & onap images)
+        # and start building onap offline platform in /tmp/resources folder
+
+        ./build/download/download.py --docker ./build/data_lists/infra_docker_images.list ../resources/offline_data/docker_images_infra --docker ./build/data_lists/rke_docker_images.list ../resources/offline_data/docker_images_for_nexus --docker ./build/data_lists/onap_docker_images.list ../resources/offline_data/docker_images_for_nexus
+
+
+**Step 2 - building own dns image**
+
+::
+
+        # We are building our own dns image within our offline infrastructure
+        ./build/creating_data/create_nginx_image/01create-image.sh /tmp/resources/offline_data/docker_images_infra
+
+**Step 3 - git repos**
+
+::
+
+        # Following step will download all git repos
+        ./build/download/download.py --git ./build/data_lists/onap_git_repos.list ../resources/git-repo
+
+**Step 4 - http files**
+
+ToDo: complete and verified list of http files will come just during/after vFWCL testcase
+
+**Step 5 - npm packages**
+
+::
+
+        # Following step will download all npm packages
+        ./build/download/download.py --npm ./build/data_lists/onap_npm.list ../resources/offline_data/npm_tar
+
+**Step 6 - binaries**
+
+::
+
+       # Following step will download and prepare rke, kubectl and helm binaries
+       ./build/download/download-bin-tools.sh ../resources/downloads
+
+**Step 7 - rpms**
+
+::
+
+      # Following step will download all rpms and create repo
+      ./build/download/download.py --rpm ./build/data_lists/onap_rpm.list ../resources/pkg/rhel
+
+      createrepo ../resources/pkg/rhel
+
+**Step 8 - pip packages**
+
+Todo: new python script might be created for that part as well
+
+::
+
+      # Following step will download all pip packages
+      ./build/download/download-pip.sh ./build/data_lists/onap_pip_packages.list ../resources/offline_data/pypi
+