Updates to match OOM onap-parameters.yaml changes
Change-Id: I76149fa3762221a8c1fc0cee6a44d83238ebdb89
Issue-ID: INT-381
Signed-off-by: Gary Wu <gary.i.wu@huawei.com>
diff --git a/deployment/heat/onap-oom/env/gwu/onap-openrc b/deployment/heat/onap-oom/env/gwu/onap-openrc
new file mode 120000
index 0000000..c08bcbd
--- /dev/null
+++ b/deployment/heat/onap-oom/env/gwu/onap-openrc
@@ -0,0 +1 @@
+../../../../../test/ete/labs/gwu/onap-openrc
\ No newline at end of file
diff --git a/deployment/heat/onap-oom/env/gwu/onap.env b/deployment/heat/onap-oom/env/gwu/onap.env
index 6c03e94..3ca447d 100644
--- a/deployment/heat/onap-oom/env/gwu/onap.env
+++ b/deployment/heat/onap-oom/env/gwu/onap.env
@@ -19,15 +19,15 @@
public_net_id: 024582bd-ef9b-48b9-9e70-e6732559d9df
public_net_name: provider
- dcae_keystone_url: http://192.168.1.11:5000/v2.0
oam_network_cidr: 172.16.0.0/16
- dcae_ip_addr: 172.16.0.4
dns_forwarder: 192.168.1.11
external_dns: 192.168.1.3
- dnsaas_proxy_enable: false
- dnsaas_keystone_url: http://192.168.1.11:5000/v2.0
+ dnsaas_proxy_enable: "false"
+ dnsaas_proxied_keystone_url_path: "v2.0"
+ dnsaas_keystone_url: http://192.168.1.11:5000
dnsaas_region: RegionOne
+ dnsaas_tenant_id: 4c93f99551604bf7af25a8f80c7f34cb
dnsaas_tenant_name: onap
dnsaas_username: demo
dnsaas_password: demo
diff --git a/deployment/heat/onap-oom/env/huawei/onap-beijing-oom-openrc b/deployment/heat/onap-oom/env/huawei/onap-beijing-oom-openrc
new file mode 120000
index 0000000..38d9b3f
--- /dev/null
+++ b/deployment/heat/onap-oom/env/huawei/onap-beijing-oom-openrc
@@ -0,0 +1 @@
+../../../../../test/ete/labs/huawei/onap-beijing-oom-openrc
\ No newline at end of file
diff --git a/deployment/heat/onap-oom/env/huawei/onap-beijing-oom.env b/deployment/heat/onap-oom/env/huawei/onap-beijing-oom.env
index 80d65de..0365c75 100644
--- a/deployment/heat/onap-oom/env/huawei/onap-beijing-oom.env
+++ b/deployment/heat/onap-oom/env/huawei/onap-beijing-oom.env
@@ -19,15 +19,15 @@
public_net_id: 3a6247f1-fac6-4167-a49f-33cc8415ccf4
public_net_name: provider
- dcae_keystone_url: http://10.145.122.117:5000/v2.0
oam_network_cidr: 172.16.0.0/16
- dcae_ip_addr: 172.16.0.4
dns_forwarder: 10.145.122.117
external_dns: 10.145.122.118
- dnsaas_proxy_enable: false
- dnsaas_keystone_url: http://10.145.122.117:5000/v2.0
+ dnsaas_proxy_enable: "false"
+ dnsaas_proxied_keystone_url_path: "v2.0"
+ dnsaas_keystone_url: http://10.145.122.117:5000
dnsaas_region: RegionOne
+ dnsaas_tenant_id: 3d228d2fcbb7447bbba3cde703431bc1
dnsaas_tenant_name: onap-beijing-oom
dnsaas_username: demo
dnsaas_password: demo
diff --git a/deployment/heat/onap-oom/env/windriver/Integration-Jenkins-openrc b/deployment/heat/onap-oom/env/windriver/Integration-Jenkins-openrc
new file mode 120000
index 0000000..7f4cc25
--- /dev/null
+++ b/deployment/heat/onap-oom/env/windriver/Integration-Jenkins-openrc
@@ -0,0 +1 @@
+../../../../../test/ete/labs/windriver/Integration-Jenkins-openrc
\ No newline at end of file
diff --git a/deployment/heat/onap-oom/env/windriver/Integration-Jenkins.env b/deployment/heat/onap-oom/env/windriver/Integration-Jenkins.env
index 867d2de..a3290a3 100644
--- a/deployment/heat/onap-oom/env/windriver/Integration-Jenkins.env
+++ b/deployment/heat/onap-oom/env/windriver/Integration-Jenkins.env
@@ -19,15 +19,15 @@
public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4
public_net_name: external
- dcae_keystone_url: "http://10.0.14.1/api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0"
oam_network_cidr: 10.0.0.0/16
- dcae_ip_addr: 10.0.4.1
dns_forwarder: 10.12.25.5
external_dns: 8.8.8.8
- dnsaas_proxy_enable: true
- dnsaas_keystone_url: http://10.12.25.5:5000/v2.0
+ dnsaas_proxy_enable: "true"
+ dnsaas_proxied_keystone_url_path: "api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0"
+ dnsaas_keystone_url: http://10.12.25.5:5000
dnsaas_region: RegionOne
+ dnsaas_tenant_id: bf80d09fbc804b42b3bc727d6cf1fcbe
dnsaas_tenant_name: Integration-Jenkins
dnsaas_username: demo
dnsaas_password: onapdemo
diff --git a/deployment/heat/onap-oom/k8s_vm_entrypoint.sh b/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
index db2428b..216a187 100644
--- a/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
+++ b/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
@@ -15,7 +15,7 @@
Acquire::https::Proxy "DIRECT";
EOF
apt-get -y update
-apt-get -y install jq
+apt-get -y install linux-image-extra-$(uname -r) jq
cd ~
@@ -48,7 +48,7 @@
done
OLD_PID=$(jq -r '.data[0].id' projects.json)
-curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys | tee apikeys.json
+curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys > apikeys.json
echo export RANCHER_ACCESS_KEY=`jq -r '.publicValue' apikeys.json` >> api-keys-rc
echo export RANCHER_SECRET_KEY=`jq -r '.secretValue' apikeys.json` >> api-keys-rc
source api-keys-rc
@@ -57,28 +57,28 @@
until [ ! -z "$TEMPLATE_ID" ] && [ "$TEMPLATE_ID" != "null" ]; do
sleep 5
- curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes | tee projectTemplatesKubernetes.json
+ curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes > projectTemplatesKubernetes.json
TEMPLATE_ID=$(jq -r '.data[0].id' projectTemplatesKubernetes.json)
done
-curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" | tee project.json
+curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" > project.json
PID=`jq -r '.id' project.json`
echo export RANCHER_URL=http://$RANCHER_IP:8080/v1/projects/$PID >> api-keys-rc
source api-keys-rc
until [ $(jq -r '.state' project.json) == "active" ]; do
sleep 5
- curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID | tee project.json
+ curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID > project.json
done
TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id')
touch token.json
while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do
sleep 5
- curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID | tee token.json
+ curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID > token.json
done
-CMD=$(jq -r .command token.json)
-eval $CMD
+RANCHER_AGENT_CMD=$(jq -r .command token.json)
+eval $RANCHER_AGENT_CMD
# download rancher CLI
wget -q https://github.com/rancher/cli/releases/download/v0.6.7/rancher-linux-amd64-v0.6.7.tar.xz
@@ -126,36 +126,166 @@
# Put your onap_key ssh private key in ~/.ssh/onap_key
# Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
-cp ~/oom/kubernetes/config/onap-parameters-sample.yaml ~/oom/kubernetes/config/onap-parameters.yaml
-cat >> ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
+cat > ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
+# For information regarding those parameters, please visit http://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/installation_heat.html
-####################################
-# Overridden by k8s_vm_entrypoint.sh
-####################################
+#################
+# COMMON CONFIG #
+#################
-OPENSTACK_UBUNTU_14_IMAGE: "__ubuntu_1404_image__"
-OPENSTACK_UBUNTU_16_IMAGE: "__ubuntu_1604_image__"
-OPENSTACK_CENTOS_7_IMAGE: "__centos_7_image__"
-OPENSTACK_PUBLIC_NET_ID: "__public_net_id__"
-OPENSTACK_PUBLIC_NET_NAME: "__public_net_name__"
-OPENSTACK_OAM_NETWORK_CIDR: "__oam_network_cidr__"
+# NEXUS
+NEXUS_HTTP_REPO: https://nexus.onap.org/content/sites/raw
+NEXUS_DOCKER_REPO: nexus3.onap.org:10001
+NEXUS_USERNAME: docker
+NEXUS_PASSWORD: docker
+
+# ONAP config
+# Do not change unless you know what you're doing
+DMAAP_TOPIC: "AUTO"
+DEMO_ARTIFACTS_VERSION: "1.1.1"
+
+# ------------------------------------------------#
+# OpenStack Config on which VNFs will be deployed #
+# ------------------------------------------------#
+
+# The four below parameters are only used by Robot.
+# As Robot is able to perform some automated actions,
+# e.g. onboard/distribute/instantiate, it has to be
+# configured with four below parameters (in addition
+# to the OPENSTACK ones).
+# If you don't intend to use Robot for those actions,
+# you can put dummy values, but you will have to provide
+# those values when deploying VNF anyway.
+# --------------------------------------------------
+# This is the OAM Network ID used for internal network by VNFs.
+# You could create 10.10.10.0/24 (256 IPs should be enough) in your cloud instance.
+OPENSTACK_OAM_NETWORK_ID: "__oam_network_id__"
+# This is the public Network ID. Public = external network in OpenStack.
+# Floating IPs will be created and assigned to VNFs from this network,
+# to provide external reachability.
+OPENSTACK_PUBLIC_NETWORK_ID: "__public_net_id__"
+# VM Flavor to be used by VNF.
+OPENSTACK_FLAVOR: "m1.medium"
+# VM image to be used by VNF. Here ubuntu 14.04 is provided.
+OPENSTACK_IMAGE: "__ubuntu_1604_image__"
+
+OPENSTACK_USERNAME: "__openstack_username__"
+OPENSTACK_PASSWORD: "__openstack_api_key__"
OPENSTACK_TENANT_NAME: "__openstack_tenant_name__"
OPENSTACK_TENANT_ID: "__openstack_tenant_id__"
-OPENSTACK_USERNAME: "__openstack_username__"
-OPENSTACK_API_KEY: "__openstack_api_key__"
+OPENSTACK_REGION: "RegionOne"
+# Either v2.0 or v3
+OPENSTACK_API_VERSION: "v2.0"
OPENSTACK_KEYSTONE_URL: "__keystone_url__"
-DCAE_IP_ADDR: "__dcae_ip_addr__"
-DCAE_KEYSTONE_URL: "__dcae_keystone_url__"
-DNS_LIST: "__dns_forwarder__"
+# Don't change this if you don't know what it is
+OPENSTACK_SERVICE_TENANT_NAME: "service"
+
+########
+# DCAE #
+########
+
+# Whether or not to deploy DCAE
+# If set to false, all the parameters below can be left empty or removed
+# If set to false, update ../dcaegen2/values.yaml disableDcae value to true,
+# this is to avoid deploying the DCAE deployments and services.
+DEPLOY_DCAE: "true"
+
+# DCAE Config
+DCAE_DOCKER_VERSION: v1.1.1
+DCAE_VM_BASE_NAME: "dcae"
+
+# ------------------------------------------------#
+# OpenStack Config on which DCAE will be deployed #
+# ------------------------------------------------#
+
+# Whether to have DCAE deployed on the same OpenStack instance on which VNF will be deployed.
+# (e.g. re-use the same config as defined above)
+# If set to true, discard the next config block, else provide the values.
+IS_SAME_OPENSTACK_AS_VNF: "true"
+
+# Fill in the values in below block only if IS_SAME_OPENSTACK_AS_VNF set to "false"
+# ---
+# Either v2.0 or v3
+DCAE_OS_API_VERSION: "v2.0"
+DCAE_OS_KEYSTONE_URL: "__keystone_url__"
+DCAE_OS_USERNAME: ""
+DCAE_OS_PASSWORD: ""
+DCAE_OS_TENANT_NAME: ""
+DCAE_OS_TENANT_ID: ""
+DCAE_OS_REGION: ""
+# ---
+
+# We need to provide the config of the public network here, because the DCAE VMs will be
+# assigned a floating IP on this network so one can access them, to debug for instance.
+# The ID of the public network.
+DCAE_OS_PUBLIC_NET_ID: "__public_net_id__"
+# The name of the public network.
+DCAE_OS_PUBLIC_NET_NAME: "__public_net_name__"
+# This is the private network that will be used by DCAE VMs. The network will be created during the DCAE boostrap process,
+# and will the subnet created will use this CIDR. (/28 provides 16 IPs, DCAE requires 15.)
+DCAE_OS_OAM_NETWORK_CIDR: "10.99.0.0/16"
+# This will be the private ip of the DCAE boostrap VM. This VM is responsible for spinning up the whole DCAE stack (14 VMs total)
+DCAE_IP_ADDR: "10.99.4.1"
+
+# The flavors' name to be used by DCAE VMs
+DCAE_OS_FLAVOR_SMALL: "m1.small"
+DCAE_OS_FLAVOR_MEDIUM: "m1.medium"
+DCAE_OS_FLAVOR_LARGE: "m1.large"
+# The images' name to be used by DCAE VMs
+DCAE_OS_UBUNTU_14_IMAGE: "__ubuntu_1404_image__"
+DCAE_OS_UBUNTU_16_IMAGE: "__ubuntu_1604_image__"
+DCAE_OS_CENTOS_7_IMAGE: "__centos_7_image__"
+
+# This is the keypair that will be created in OpenStack, and that one can use to access DCAE VMs using ssh.
+# The private key needs to be in a specific format so at the end of the process, it's formatted properly
+# when ending up in the DCAE HEAT stack. The best way is to do the following:
+# - copy paste your key
+# - surround it with quote
+# - add \n at the end of each line
+# - escape the result using https://www.freeformatter.com/java-dotnet-escape.html#ad-output
+DCAE_OS_KEY_NAME: "onap_key"
+DCAE_OS_PUB_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+DCAE_OS_PRIVATE_KEY: \"-----BEGIN RSA PRIVATE KEY-----\\n\r\nMIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\\n\r\nNGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\\n\r\nNhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\\n\r\nyzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\\n\r\n+ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\\n\r\nfiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\\n\r\nqFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\\n\r\nlMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\\n\r\nKqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\\n\r\nF2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\\n\r\nOjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\\n\r\n4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\\n\r\n6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\\n\r\nbe9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\\n\r\nUbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\\n\r\ngMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\\n\r\nY63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\\n\r\n9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\\n\r\naWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\\n\r\nxGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\\n\r\nfMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\\n\r\n22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\\n\r\nYOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\\n\r\nitqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\\n\r\ny7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\\n\r\n-----END RSA PRIVATE KEY-----\\n\r\n\"
+
+# This below settings allows one to configure the /etc/resolv.conf nameserver resolution for all the DCAE VMs.
+# -
+# In the HEAT setup, it's meant to be a DNS list, as the HEAT setup deploys a DNS Server VM in addition to DNS Designate
+# and this DNS Server is setup to forward request to the DNS Designate backend when it cannot resolve, hence the
+# DNS_FORWARDER config here. The DCAE Boostrap requires both inputs, even though they are now similar, we have to pass
+# them.
+# -
+# ATTENTION: Assumption is made the DNS Designate backend is configure to forward request to a public DNS (e.g. 8.8.8.8)
+# -
+# Put the IP of the DNS Designate backend (e.g. the OpenStack IP supporting DNS Designate)
+DNS_IP: "__dns_forwarder__"
DNS_FORWARDER: "__dns_forwarder__"
+
+# Public DNS - not used but required by the DCAE boostrap container
EXTERNAL_DNS: "__external_dns__"
+
+# DNS domain for the DCAE VMs
+DCAE_DOMAIN: "dcaeg2.onap.org"
+
+# Proxy DNS Designate. This means DCAE will run in an instance not support Designate, and Designate will be provided by another instance.
+# Set to true if you wish to use it
DNSAAS_PROXY_ENABLE: "__dnsaas_proxy_enable__"
+# Provide this only if DNSAAS_PROXY_ENABLE set to true. The IP has to be the IP of one of the K8S hosts.
+# e.g. http://10.195.197.164/api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0
+DCAE_PROXIED_KEYSTONE_URL: "http://__k8s_ip_addr__/__dnsaas_proxied_keystone_url_path__"
+
+# -----------------------------------------------------#
+# OpenStack Config on which DNS Designate is supported #
+# -----------------------------------------------------#
+
+# If this is the same OpenStack used for the VNF or DCAE, please re-enter the values here.
+
+DNSAAS_API_VERSION: "v3"
+DNSAAS_REGION: "RegionOne"
DNSAAS_KEYSTONE_URL: "__dnsaas_keystone_url__"
-DNSAAS_REGION: "__dnsaas_region__"
+DNSAAS_TENANT_ID: "__dnsaas_tenant_id__"
DNSAAS_TENANT_NAME: "__dnsaas_tenant_name__"
DNSAAS_USERNAME: "__dnsaas_username__"
DNSAAS_PASSWORD: "__dnsaas_password__"
-
EOF
cat ~/oom/kubernetes/config/onap-parameters.yaml
@@ -175,7 +305,7 @@
./createConfig.sh -n onap
# Wait until the config container completes.
-sleep 200
+sleep 20
until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do
sleep 10
done
@@ -188,6 +318,8 @@
git add -A
git commit -m "initial commit"
+cat /dockerdata-nfs/onap/dcaegen2/heat/onap_dcae.env
+
# Run ONAP:
cd ~/oom/kubernetes/oneclick/
./createAll.bash -n onap
diff --git a/deployment/heat/onap-oom/onap-oom.yaml b/deployment/heat/onap-oom/onap-oom.yaml
index b594194..3675bd9 100644
--- a/deployment/heat/onap-oom/onap-oom.yaml
+++ b/deployment/heat/onap-oom/onap-oom.yaml
@@ -60,9 +60,6 @@
type: string
description: Name of the Ubuntu 14.04 image
- dcae_ip_addr:
- type: string
-
dns_forwarder:
type: string
description: the forwarder address for setting up ONAP's private DNS server
@@ -79,6 +76,10 @@
type: string
description: the region of the cloud instance providing the Designate DNS as a Service
+ dnsaas_proxied_keystone_url_path:
+ type: string
+ description: the proxy keystone URL path for DCAE to use (via MultiCloud)
+
dnsaas_keystone_url:
type: string
description: the keystone URL of the cloud instance providing the Designate DNS as a Service
@@ -91,20 +92,54 @@
type: string
description: the password of the cloud instance providing the Designate DNS as a Service
+ dnsaas_tenant_id:
+ type: string
+ description: the ID of the tenant in the cloud instance providing the Designate DNS as a Service
+
dnsaas_tenant_name:
type: string
description: the name of the tenant in the cloud instance providing the Designate DNS as a Service
- dcae_keystone_url:
- type: string
- description: the keystone URL for DCAE to use (via MultiCloud)
-
resources:
random-str:
type: OS::Heat::RandomString
properties:
length: 4
+ # ONAP management private network
+ oam_network:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: oam_network_rand
+ params:
+ rand: { get_resource: random-str }
+
+ oam_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: oam_network_rand
+ params:
+ rand: { get_resource: random-str }
+ network_id: { get_resource: oam_network }
+ cidr: { get_param: oam_network_cidr }
+ dns_nameservers: [ get_param: dns_forwarder ]
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_id }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: oam_subnet }
+
rancher_vm:
type: OS::Nova::Server
properties:
@@ -123,6 +158,18 @@
template:
get_file: rancher_vm_entrypoint.sh
+ k8s_private_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: oam_network }
+ fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+
+ k8s_floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: k8s_private_port }
+
k8s_vm:
type: OS::Nova::Server
properties:
@@ -131,7 +178,7 @@
flavor: { get_param: k8s_vm_flavor }
key_name: onap_key
networks:
- - network: { get_param: public_net_id }
+ - port: { get_resource: k8s_private_port }
user_data_format: RAW
user_data:
str_replace:
@@ -139,24 +186,26 @@
__docker_proxy__: { get_param: docker_proxy }
__apt_proxy__: { get_param: apt_proxy }
__rancher_ip_addr__: { get_attr: [rancher_vm, first_address] }
+ __k8s_ip_addr__: { get_attr: [k8s_floating_ip, floating_ip_address] }
__openstack_tenant_id__: { get_param: openstack_tenant_id }
__openstack_tenant_name__: { get_param: openstack_tenant_name }
__openstack_username__: { get_param: openstack_username }
__openstack_api_key__: { get_param : openstack_api_key }
__public_net_id__: { get_param: public_net_id }
__public_net_name__: { get_param: public_net_name }
+ __oam_network_id__: { get_resource: oam_network }
__oam_network_cidr__: { get_param: oam_network_cidr }
__ubuntu_1404_image__: { get_param: ubuntu_1404_image }
__ubuntu_1604_image__: { get_param: ubuntu_1604_image }
__centos_7_image__: { get_param: centos_7_image }
__keystone_url__: { get_param: keystone_url }
- __dcae_keystone_url__: { get_param: dcae_keystone_url }
- __dcae_ip_addr__: { get_param: dcae_ip_addr }
__dns_forwarder__: { get_param: dns_forwarder }
__external_dns__: { get_param: external_dns }
__dnsaas_proxy_enable__: { get_param: dnsaas_proxy_enable }
+ __dnsaas_proxied_keystone_url_path__: { get_param: dnsaas_proxied_keystone_url_path }
__dnsaas_keystone_url__: { get_param: dnsaas_keystone_url }
__dnsaas_region__: { get_param: dnsaas_region }
+ __dnsaas_tenant_id__: { get_param: dnsaas_tenant_id }
__dnsaas_tenant_name__: { get_param: dnsaas_tenant_name }
__dnsaas_username__: { get_param: dnsaas_username }
__dnsaas_password__: { get_param: dnsaas_password }
@@ -170,4 +219,4 @@
k8s_vm_ip:
description: The IP address of the k8s instance
- value: { get_attr: [k8s_vm, first_address] }
+ value: { get_attr: [k8s_floating_ip, floating_ip_address] }