Merge "Add control variables for the ansible deploy"
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
new file mode 100755
index 0000000..fad84e0
--- /dev/null
+++ b/ansible/group_vars/all.yml
@@ -0,0 +1,129 @@
+---
+###################################
+# Resources configuration entries #
+###################################
+
+# Resource host information
+
+# folder on resource host where tars with resources are present
+resources_dir:
+
+# tarfile name within this folder with offline infrastructure sw
+resources_filename:
+
+# the purpose of auxiliary resources is to provide user an interface
+# of how to distribute to infra node another big tar which might be
+# usefull later on in application playbooks, optional param
+aux_resources_filename:
+
+# resources can be exported via nfs
+# default is no - client will use ssh
+# if set yes but nfs-utils is missing then fallback to ssh
+resources_on_nfs: no
+
+# Infra node specific information
+
+# offline solution source data binaries will be decompressed in following dir on infra
+# e.g. app_data_path: /opt/onap
+app_data_path:
+
+# additional data path for auxiliary data transfer
+# e.g. aux_data_path: /opt/onap/onap_me_docker_images
+aux_data_path:
+
+
+
+##########################################
+# Offline Infrastructure specific params #
+##########################################
+
+# information from which rootCA is created
+# e.g.
+# organization_name: Samsung
+# state_or_province_name: Poland
+# country_name: PL
+# locality_name: Krakow
+certificates:
+  organization_name:
+  state_or_province_name:
+  country_name:
+  locality_name:
+
+# Force k8s cluster redeploy if it exists already
+# Default value is to allow redeploy
+redeploy_k8s_env: yes
+
+# Distribute offline rpm repository
+# Default value is to distribute rpm
+deploy_rpm_repository: yes
+
+# Offline solution is deploying app specific rpm repository and requires some name
+# also for k8s cluster
+# e.g. app_name: ONAP
+app_name:
+
+# as nexus blob is prepopulated during build time following block
+# of runtime_images code provides an alternative way how to insert
+# specified images into nexus during infrastructure playbook execution
+# images specified in there must be available inside aux_resources_filename
+# tar file
+# if runtime_images are not specified nothing is inserted on top of existing
+# prebuilt nexus blob in installation time
+# Component name must match with tar filename
+# e.g.
+# aaiadapter-0.0.1.tar is expected in aux_data_path for aaiadapter image
+#runtime_images:
+#  aaiadapter-0.0.1:
+#    registry: "nexus3.onap.org:10001"
+#    path:     "/onap/aaiadapter/aaiadapter"
+#    tag:      "latest"
+runtime_images:
+
+
+###############################
+# Application specific params #
+###############################
+
+# Project name to utilize same codebase
+# e.g. project_configuration: onap-me
+project_configuration:
+
+# App Helm charts dir. E.g. application/helm_charts/<xxx> where xxx is a charts folder name.
+# Helm charts are expected to be inside SW package somewhere inside ./ansible/application
+# those will be available for offline installer under /ansible/application/<helm_charts_name>
+# for OOM project helm charts are usually within kubernetes sub-folder
+# so the path for them can be:
+# e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes"
+app_helm_charts_install_directory:
+
+# to specify target dir where helm charts should be copied into on infra node
+# this should be directory with all charts and Makefile
+# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
+app_helm_charts_infra_directory:
+
+# Main Helm chart to install
+# e.g. app_helm_chart_name: onap
+app_helm_chart_name:
+
+# Helm release name (visible in POD names) used by Helm
+# e.g. app_helm_release_name: "{{ project_configuration }}"
+app_helm_release_name:
+
+# Kubernetes namespace where application is installed
+# e.g. app_kubernetes_namespace: onap
+app_kubernetes_namespace:
+
+# Optional application custom Ansible roles name for pre and post install logic.
+# Location of additional custom roles is defined in ansible.cfg with roles_path.
+# e.g. application_pre_install_role: "{{ project_configuration }}-patch-role"
+application_pre_install_role:
+application_post_install_role:
+
+# any other application specific params can be specified in this file
+# e.g.
+# onap_values:
+#  openStackKeyStoneUrl: "http://1.2.3.4:5000"
+#  openStackServiceTenantName: "services"
+#  openStackDomain: "Default"
+#  openStackUserName: "admin"
+#  openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"
diff --git a/ansible/group_vars/infrastructure.yml b/ansible/group_vars/infrastructure.yml
new file mode 100755
index 0000000..ab31405
--- /dev/null
+++ b/ansible/group_vars/infrastructure.yml
@@ -0,0 +1,36 @@
+---
+nfs_mount_path: /dockerdata-nfs
+vnc_passwd: samsung
+simulated_hosts:
+  git:
+    - gerrit.onap.org
+    - git.rancher.io
+    - github.com
+  http:
+    - git.onap.org
+    - nexus.onap.org
+    - repo.infra-server
+    - www.getcloudify.org
+    - www.springframework.org
+    - repo.maven.apache.org
+    - repo1.maven.org
+  nexus:
+    - docker.elastic.co
+    - docker.io
+    - gcr.io
+    - nexus.{{ ansible_nodename }}
+    - nexus3.onap.org
+    - registry-1.docker.io
+    - registry.hub.docker.com
+    - registry.npmjs.org
+all_simulated_hosts:
+    "{{ simulated_hosts.git + simulated_hosts.http + simulated_hosts.nexus }}"
+rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].ansible_host }}:8080"
+rancher_remove_other_env: yes
+rancher_redeploy_k8s_env: yes
+populate_nexus: no
+kube_directory: /root/.kube
+kubectl_bin_dir: /usr/local/bin
+helm_bin_dir: /usr/local/bin
+helm_repository_name: local
+helm_repository_url: http://127.0.0.1:8879