blob: 9118c8e2484852d1b5a33a3655ee56b3682c19ba [file] [log] [blame]
Gary Wu1ff56672018-01-17 20:51:45 -08001#!/bin/bash -x
2printenv
3
4mkdir -p /opt/config
5echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
6echo `hostname -I` `hostname` >> /etc/hosts
7mkdir -p /etc/docker
Gary Wu374498a2018-02-06 17:31:04 -08008if [ ! -z "__docker_proxy__" ]; then
9 cat > /etc/docker/daemon.json <<EOF
Gary Wu1ff56672018-01-17 20:51:45 -080010{
11 "insecure-registries" : ["__docker_proxy__"]
12}
13EOF
Gary Wu374498a2018-02-06 17:31:04 -080014fi
15if [ ! -z "__apt_proxy__" ]; then
16 cat > /etc/apt/apt.conf.d/30proxy<<EOF
Gary Wu1ff56672018-01-17 20:51:45 -080017Acquire::http { Proxy "http://__apt_proxy__"; };
18Acquire::https::Proxy "DIRECT";
19EOF
Gary Wu374498a2018-02-06 17:31:04 -080020fi
Gary Wu1ff56672018-01-17 20:51:45 -080021apt-get -y update
Gary Wu52dfdcb2018-01-25 11:02:48 -080022apt-get -y install linux-image-extra-$(uname -r) jq
Gary Wu1ff56672018-01-17 20:51:45 -080023
24cd ~
25
26# install docker 1.12
27curl -s https://releases.rancher.com/install-docker/1.12.sh | sh
28usermod -aG docker ubuntu
29
30# install kubernetes 1.8.6
31curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.6/bin/linux/amd64/kubectl
32chmod +x ./kubectl
33sudo mv ./kubectl /usr/local/bin/kubectl
34mkdir ~/.kube
35
36# install helm 2.3
37wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz
38tar -zxvf helm-v2.3.0-linux-amd64.tar.gz
39sudo mv linux-amd64/helm /usr/local/bin/helm
40
41# Fix virtual memory allocation for onap-log:elasticsearch:
42echo "vm.max_map_count=262144" >> /etc/sysctl.conf
43sysctl -p
44
45# install rancher agent
46echo export RANCHER_IP=__rancher_ip_addr__ > api-keys-rc
47source api-keys-rc
48
49sleep 50
50until curl -s -o projects.json -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projects; do
51 sleep 10
52done
53OLD_PID=$(jq -r '.data[0].id' projects.json)
54
Gary Wu52dfdcb2018-01-25 11:02:48 -080055curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys > apikeys.json
Gary Wu1ff56672018-01-17 20:51:45 -080056echo export RANCHER_ACCESS_KEY=`jq -r '.publicValue' apikeys.json` >> api-keys-rc
57echo export RANCHER_SECRET_KEY=`jq -r '.secretValue' apikeys.json` >> api-keys-rc
58source api-keys-rc
59
Gary Wu60dd0d82018-01-18 14:54:47 -080060curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X DELETE -H 'Content-Type: application/json' "http://$RANCHER_IP:8080/v2-beta/projects/$OLD_PID"
Gary Wu1ff56672018-01-17 20:51:45 -080061
62until [ ! -z "$TEMPLATE_ID" ] && [ "$TEMPLATE_ID" != "null" ]; do
63 sleep 5
Gary Wu52dfdcb2018-01-25 11:02:48 -080064 curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes > projectTemplatesKubernetes.json
Gary Wu1ff56672018-01-17 20:51:45 -080065 TEMPLATE_ID=$(jq -r '.data[0].id' projectTemplatesKubernetes.json)
66done
67
Gary Wu52dfdcb2018-01-25 11:02:48 -080068curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" > project.json
Gary Wu1ff56672018-01-17 20:51:45 -080069PID=`jq -r '.id' project.json`
70echo export RANCHER_URL=http://$RANCHER_IP:8080/v1/projects/$PID >> api-keys-rc
71source api-keys-rc
72
73until [ $(jq -r '.state' project.json) == "active" ]; do
74 sleep 5
Gary Wu52dfdcb2018-01-25 11:02:48 -080075 curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID > project.json
Gary Wu1ff56672018-01-17 20:51:45 -080076done
77
78TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id')
79touch token.json
80while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do
81 sleep 5
Gary Wu52dfdcb2018-01-25 11:02:48 -080082 curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID > token.json
Gary Wu1ff56672018-01-17 20:51:45 -080083done
Gary Wu52dfdcb2018-01-25 11:02:48 -080084RANCHER_AGENT_CMD=$(jq -r .command token.json)
85eval $RANCHER_AGENT_CMD
Gary Wu1ff56672018-01-17 20:51:45 -080086
87# download rancher CLI
88wget -q https://github.com/rancher/cli/releases/download/v0.6.7/rancher-linux-amd64-v0.6.7.tar.xz
89unxz rancher-linux-amd64-v0.6.7.tar.xz
90tar xvf rancher-linux-amd64-v0.6.7.tar
91
92# Clone OOM:
93cd ~
94git clone -b amsterdam http://gerrit.onap.org/r/oom
95
96# Update values.yaml to point to docker-proxy instead of nexus3:
97cd ~/oom/kubernetes
Gary Wuc98156d2018-01-18 12:03:26 -080098perl -p -i -e 's/nexus3.onap.org:10001/__docker_proxy__/g' `find ./ -name values.yaml` oneclick/setenv.bash
Gary Wu1ff56672018-01-17 20:51:45 -080099
100KUBETOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
101
102# create .kube/config
103cat > ~/.kube/config <<EOF
104apiVersion: v1
105kind: Config
106clusters:
107- cluster:
108 api-version: v1
109 insecure-skip-tls-verify: true
110 server: "https://$RANCHER_IP:8080/r/projects/$PID/kubernetes:6443"
111 name: "oom"
112contexts:
113- context:
114 cluster: "oom"
115 user: "oom"
116 name: "oom"
117current-context: "oom"
118users:
119- name: "oom"
120 user:
121 token: "$KUBETOKEN"
122EOF
Gary Wu1ff56672018-01-17 20:51:45 -0800123
124export KUBECONFIG=/root/.kube/config
125kubectl config view
126
Gary Wuc98156d2018-01-18 12:03:26 -0800127# Update ~/oom/kubernetes/kube2msb/values.yaml kubeMasterAuthToken to use the token from ~/.kube/config
128sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kubernetes/kube2msb/values.yaml
Gary Wu1ff56672018-01-17 20:51:45 -0800129
130# Put your onap_key ssh private key in ~/.ssh/onap_key
131
132# Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
Gary Wu52dfdcb2018-01-25 11:02:48 -0800133cat > ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
134# For information regarding those parameters, please visit http://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/installation_heat.html
Gary Wu60dd0d82018-01-18 14:54:47 -0800135
Gary Wu52dfdcb2018-01-25 11:02:48 -0800136#################
137# COMMON CONFIG #
138#################
Gary Wu60dd0d82018-01-18 14:54:47 -0800139
Gary Wu52dfdcb2018-01-25 11:02:48 -0800140# NEXUS
141NEXUS_HTTP_REPO: https://nexus.onap.org/content/sites/raw
142NEXUS_DOCKER_REPO: nexus3.onap.org:10001
143NEXUS_USERNAME: docker
144NEXUS_PASSWORD: docker
145
146# ONAP config
147# Do not change unless you know what you're doing
148DMAAP_TOPIC: "AUTO"
149DEMO_ARTIFACTS_VERSION: "1.1.1"
150
151# ------------------------------------------------#
152# OpenStack Config on which VNFs will be deployed #
153# ------------------------------------------------#
154
155# The four below parameters are only used by Robot.
156# As Robot is able to perform some automated actions,
157# e.g. onboard/distribute/instantiate, it has to be
158# configured with four below parameters (in addition
159# to the OPENSTACK ones).
160# If you don't intend to use Robot for those actions,
161# you can put dummy values, but you will have to provide
162# those values when deploying VNF anyway.
163# --------------------------------------------------
164# This is the OAM Network ID used for internal network by VNFs.
165# You could create 10.10.10.0/24 (256 IPs should be enough) in your cloud instance.
166OPENSTACK_OAM_NETWORK_ID: "__oam_network_id__"
167# This is the public Network ID. Public = external network in OpenStack.
168# Floating IPs will be created and assigned to VNFs from this network,
169# to provide external reachability.
170OPENSTACK_PUBLIC_NETWORK_ID: "__public_net_id__"
171# VM Flavor to be used by VNF.
172OPENSTACK_FLAVOR: "m1.medium"
173# VM image to be used by VNF. Here ubuntu 14.04 is provided.
174OPENSTACK_IMAGE: "__ubuntu_1604_image__"
175
176OPENSTACK_USERNAME: "__openstack_username__"
177OPENSTACK_PASSWORD: "__openstack_api_key__"
Gary Wuc98156d2018-01-18 12:03:26 -0800178OPENSTACK_TENANT_NAME: "__openstack_tenant_name__"
179OPENSTACK_TENANT_ID: "__openstack_tenant_id__"
Gary Wu52dfdcb2018-01-25 11:02:48 -0800180OPENSTACK_REGION: "RegionOne"
181# Either v2.0 or v3
182OPENSTACK_API_VERSION: "v2.0"
Gary Wuc98156d2018-01-18 12:03:26 -0800183OPENSTACK_KEYSTONE_URL: "__keystone_url__"
Gary Wu52dfdcb2018-01-25 11:02:48 -0800184# Don't change this if you don't know what it is
185OPENSTACK_SERVICE_TENANT_NAME: "service"
186
187########
188# DCAE #
189########
190
191# Whether or not to deploy DCAE
192# If set to false, all the parameters below can be left empty or removed
193# If set to false, update ../dcaegen2/values.yaml disableDcae value to true,
194# this is to avoid deploying the DCAE deployments and services.
195DEPLOY_DCAE: "true"
196
197# DCAE Config
198DCAE_DOCKER_VERSION: v1.1.1
199DCAE_VM_BASE_NAME: "dcae"
200
201# ------------------------------------------------#
202# OpenStack Config on which DCAE will be deployed #
203# ------------------------------------------------#
204
205# Whether to have DCAE deployed on the same OpenStack instance on which VNF will be deployed.
206# (e.g. re-use the same config as defined above)
207# If set to true, discard the next config block, else provide the values.
208IS_SAME_OPENSTACK_AS_VNF: "true"
209
210# Fill in the values in below block only if IS_SAME_OPENSTACK_AS_VNF set to "false"
211# ---
212# Either v2.0 or v3
213DCAE_OS_API_VERSION: "v2.0"
214DCAE_OS_KEYSTONE_URL: "__keystone_url__"
215DCAE_OS_USERNAME: ""
216DCAE_OS_PASSWORD: ""
217DCAE_OS_TENANT_NAME: ""
218DCAE_OS_TENANT_ID: ""
219DCAE_OS_REGION: ""
220# ---
221
222# We need to provide the config of the public network here, because the DCAE VMs will be
223# assigned a floating IP on this network so one can access them, to debug for instance.
224# The ID of the public network.
225DCAE_OS_PUBLIC_NET_ID: "__public_net_id__"
226# The name of the public network.
227DCAE_OS_PUBLIC_NET_NAME: "__public_net_name__"
228# This is the private network that will be used by DCAE VMs. The network will be created during the DCAE boostrap process,
229# and will the subnet created will use this CIDR. (/28 provides 16 IPs, DCAE requires 15.)
230DCAE_OS_OAM_NETWORK_CIDR: "10.99.0.0/16"
231# This will be the private ip of the DCAE boostrap VM. This VM is responsible for spinning up the whole DCAE stack (14 VMs total)
232DCAE_IP_ADDR: "10.99.4.1"
233
234# The flavors' name to be used by DCAE VMs
235DCAE_OS_FLAVOR_SMALL: "m1.small"
236DCAE_OS_FLAVOR_MEDIUM: "m1.medium"
237DCAE_OS_FLAVOR_LARGE: "m1.large"
238# The images' name to be used by DCAE VMs
239DCAE_OS_UBUNTU_14_IMAGE: "__ubuntu_1404_image__"
240DCAE_OS_UBUNTU_16_IMAGE: "__ubuntu_1604_image__"
241DCAE_OS_CENTOS_7_IMAGE: "__centos_7_image__"
242
243# This is the keypair that will be created in OpenStack, and that one can use to access DCAE VMs using ssh.
244# The private key needs to be in a specific format so at the end of the process, it's formatted properly
245# when ending up in the DCAE HEAT stack. The best way is to do the following:
246# - copy paste your key
247# - surround it with quote
248# - add \n at the end of each line
249# - escape the result using https://www.freeformatter.com/java-dotnet-escape.html#ad-output
250DCAE_OS_KEY_NAME: "onap_key"
251DCAE_OS_PUB_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
252DCAE_OS_PRIVATE_KEY: \"-----BEGIN RSA PRIVATE KEY-----\\n\r\nMIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\\n\r\nNGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\\n\r\nNhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\\n\r\nyzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\\n\r\n+ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\\n\r\nfiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\\n\r\nqFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\\n\r\nlMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\\n\r\nKqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\\n\r\nF2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\\n\r\nOjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\\n\r\n4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\\n\r\n6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\\n\r\nbe9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\\n\r\nUbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\\n\r\ngMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\\n\r\nY63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\\n\r\n9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\\n\r\naWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\\n\r\nxGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\\n\r\nfMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\\n\r\n22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\\n\r\nYOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\\n\r\nitqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\\n\r\ny7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\\n\r\n-----END RSA PRIVATE KEY-----\\n\r\n\"
253
254# This below settings allows one to configure the /etc/resolv.conf nameserver resolution for all the DCAE VMs.
255# -
256# In the HEAT setup, it's meant to be a DNS list, as the HEAT setup deploys a DNS Server VM in addition to DNS Designate
257# and this DNS Server is setup to forward request to the DNS Designate backend when it cannot resolve, hence the
258# DNS_FORWARDER config here. The DCAE Boostrap requires both inputs, even though they are now similar, we have to pass
259# them.
260# -
261# ATTENTION: Assumption is made the DNS Designate backend is configure to forward request to a public DNS (e.g. 8.8.8.8)
262# -
263# Put the IP of the DNS Designate backend (e.g. the OpenStack IP supporting DNS Designate)
264DNS_IP: "__dns_forwarder__"
Gary Wu60dd0d82018-01-18 14:54:47 -0800265DNS_FORWARDER: "__dns_forwarder__"
Gary Wu52dfdcb2018-01-25 11:02:48 -0800266
267# Public DNS - not used but required by the DCAE boostrap container
Gary Wu7b416392018-01-19 13:16:49 -0800268EXTERNAL_DNS: "__external_dns__"
Gary Wu52dfdcb2018-01-25 11:02:48 -0800269
270# DNS domain for the DCAE VMs
271DCAE_DOMAIN: "dcaeg2.onap.org"
272
273# Proxy DNS Designate. This means DCAE will run in an instance not support Designate, and Designate will be provided by another instance.
274# Set to true if you wish to use it
Gary Wu7b416392018-01-19 13:16:49 -0800275DNSAAS_PROXY_ENABLE: "__dnsaas_proxy_enable__"
Gary Wu52dfdcb2018-01-25 11:02:48 -0800276# Provide this only if DNSAAS_PROXY_ENABLE set to true. The IP has to be the IP of one of the K8S hosts.
277# e.g. http://10.195.197.164/api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0
278DCAE_PROXIED_KEYSTONE_URL: "http://__k8s_ip_addr__/__dnsaas_proxied_keystone_url_path__"
279
280# -----------------------------------------------------#
281# OpenStack Config on which DNS Designate is supported #
282# -----------------------------------------------------#
283
284# If this is the same OpenStack used for the VNF or DCAE, please re-enter the values here.
285
286DNSAAS_API_VERSION: "v3"
287DNSAAS_REGION: "RegionOne"
Gary Wu7b416392018-01-19 13:16:49 -0800288DNSAAS_KEYSTONE_URL: "__dnsaas_keystone_url__"
Gary Wu52dfdcb2018-01-25 11:02:48 -0800289DNSAAS_TENANT_ID: "__dnsaas_tenant_id__"
Gary Wu7b416392018-01-19 13:16:49 -0800290DNSAAS_TENANT_NAME: "__dnsaas_tenant_name__"
291DNSAAS_USERNAME: "__dnsaas_username__"
292DNSAAS_PASSWORD: "__dnsaas_password__"
Gary Wu1ff56672018-01-17 20:51:45 -0800293EOF
Gary Wuc98156d2018-01-18 12:03:26 -0800294cat ~/oom/kubernetes/config/onap-parameters.yaml
295
296
297# wait for kubernetes to initialze
298sleep 100
299until [ $(kubectl get pods --namespace kube-system | tail -n +2 | grep -c Running) -ge 6 ]; do
300 sleep 10
301done
Gary Wu1ff56672018-01-17 20:51:45 -0800302
303# Source the environment file:
304cd ~/oom/kubernetes/oneclick/
305source setenv.bash
306
307# run the config pod creation
308cd ~/oom/kubernetes/config
309./createConfig.sh -n onap
310
311# Wait until the config container completes.
Gary Wu52dfdcb2018-01-25 11:02:48 -0800312sleep 20
Gary Wu1ff56672018-01-17 20:51:45 -0800313until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do
314 sleep 10
315done
316
Gary Wu60dd0d82018-01-18 14:54:47 -0800317# version control the config to see what's happening
318cd /dockerdata-nfs/
319git init
320git config user.email "root@k8s"
321git config user.name "root"
322git add -A
323git commit -m "initial commit"
324
Gary Wu52dfdcb2018-01-25 11:02:48 -0800325cat /dockerdata-nfs/onap/dcaegen2/heat/onap_dcae.env
326
Gary Wu1ff56672018-01-17 20:51:45 -0800327# Run ONAP:
328cd ~/oom/kubernetes/oneclick/
329./createAll.bash -n onap
330
331# Check ONAP status:
Gary Wu60dd0d82018-01-18 14:54:47 -0800332sleep 3
Gary Wuc98156d2018-01-18 12:03:26 -0800333kubectl get pods --all-namespaces