Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 1 | #!/bin/bash -x |
| 2 | printenv |
| 3 | |
| 4 | mkdir -p /opt/config |
| 5 | echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt |
| 6 | echo `hostname -I` `hostname` >> /etc/hosts |
| 7 | mkdir -p /etc/docker |
Gary Wu | 374498a | 2018-02-06 17:31:04 -0800 | [diff] [blame] | 8 | if [ ! -z "__docker_proxy__" ]; then |
| 9 | cat > /etc/docker/daemon.json <<EOF |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 10 | { |
| 11 | "insecure-registries" : ["__docker_proxy__"] |
| 12 | } |
| 13 | EOF |
Gary Wu | 374498a | 2018-02-06 17:31:04 -0800 | [diff] [blame] | 14 | fi |
| 15 | if [ ! -z "__apt_proxy__" ]; then |
| 16 | cat > /etc/apt/apt.conf.d/30proxy<<EOF |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 17 | Acquire::http { Proxy "http://__apt_proxy__"; }; |
| 18 | Acquire::https::Proxy "DIRECT"; |
| 19 | EOF |
Gary Wu | 374498a | 2018-02-06 17:31:04 -0800 | [diff] [blame] | 20 | fi |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 21 | apt-get -y update |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 22 | apt-get -y install linux-image-extra-$(uname -r) jq |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 23 | |
| 24 | cd ~ |
| 25 | |
| 26 | # install docker 1.12 |
| 27 | curl -s https://releases.rancher.com/install-docker/1.12.sh | sh |
| 28 | usermod -aG docker ubuntu |
| 29 | |
| 30 | # install kubernetes 1.8.6 |
| 31 | curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.6/bin/linux/amd64/kubectl |
| 32 | chmod +x ./kubectl |
| 33 | sudo mv ./kubectl /usr/local/bin/kubectl |
| 34 | mkdir ~/.kube |
| 35 | |
| 36 | # install helm 2.3 |
| 37 | wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz |
| 38 | tar -zxvf helm-v2.3.0-linux-amd64.tar.gz |
| 39 | sudo mv linux-amd64/helm /usr/local/bin/helm |
| 40 | |
| 41 | # Fix virtual memory allocation for onap-log:elasticsearch: |
| 42 | echo "vm.max_map_count=262144" >> /etc/sysctl.conf |
| 43 | sysctl -p |
| 44 | |
| 45 | # install rancher agent |
| 46 | echo export RANCHER_IP=__rancher_ip_addr__ > api-keys-rc |
| 47 | source api-keys-rc |
| 48 | |
| 49 | sleep 50 |
| 50 | until curl -s -o projects.json -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projects; do |
| 51 | sleep 10 |
| 52 | done |
| 53 | OLD_PID=$(jq -r '.data[0].id' projects.json) |
| 54 | |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 55 | curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys > apikeys.json |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 56 | echo export RANCHER_ACCESS_KEY=`jq -r '.publicValue' apikeys.json` >> api-keys-rc |
| 57 | echo export RANCHER_SECRET_KEY=`jq -r '.secretValue' apikeys.json` >> api-keys-rc |
| 58 | source api-keys-rc |
| 59 | |
Gary Wu | 60dd0d8 | 2018-01-18 14:54:47 -0800 | [diff] [blame] | 60 | curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X DELETE -H 'Content-Type: application/json' "http://$RANCHER_IP:8080/v2-beta/projects/$OLD_PID" |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 61 | |
| 62 | until [ ! -z "$TEMPLATE_ID" ] && [ "$TEMPLATE_ID" != "null" ]; do |
| 63 | sleep 5 |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 64 | curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes > projectTemplatesKubernetes.json |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 65 | TEMPLATE_ID=$(jq -r '.data[0].id' projectTemplatesKubernetes.json) |
| 66 | done |
| 67 | |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 68 | curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" > project.json |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 69 | PID=`jq -r '.id' project.json` |
| 70 | echo export RANCHER_URL=http://$RANCHER_IP:8080/v1/projects/$PID >> api-keys-rc |
| 71 | source api-keys-rc |
| 72 | |
| 73 | until [ $(jq -r '.state' project.json) == "active" ]; do |
| 74 | sleep 5 |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 75 | curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID > project.json |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 76 | done |
| 77 | |
| 78 | TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id') |
| 79 | touch token.json |
| 80 | while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do |
| 81 | sleep 5 |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 82 | curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID > token.json |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 83 | done |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 84 | RANCHER_AGENT_CMD=$(jq -r .command token.json) |
| 85 | eval $RANCHER_AGENT_CMD |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 86 | |
| 87 | # download rancher CLI |
| 88 | wget -q https://github.com/rancher/cli/releases/download/v0.6.7/rancher-linux-amd64-v0.6.7.tar.xz |
| 89 | unxz rancher-linux-amd64-v0.6.7.tar.xz |
| 90 | tar xvf rancher-linux-amd64-v0.6.7.tar |
| 91 | |
| 92 | # Clone OOM: |
| 93 | cd ~ |
| 94 | git clone -b amsterdam http://gerrit.onap.org/r/oom |
| 95 | |
| 96 | # Update values.yaml to point to docker-proxy instead of nexus3: |
| 97 | cd ~/oom/kubernetes |
Gary Wu | c98156d | 2018-01-18 12:03:26 -0800 | [diff] [blame] | 98 | perl -p -i -e 's/nexus3.onap.org:10001/__docker_proxy__/g' `find ./ -name values.yaml` oneclick/setenv.bash |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 99 | |
| 100 | KUBETOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0) |
| 101 | |
| 102 | # create .kube/config |
| 103 | cat > ~/.kube/config <<EOF |
| 104 | apiVersion: v1 |
| 105 | kind: Config |
| 106 | clusters: |
| 107 | - cluster: |
| 108 | api-version: v1 |
| 109 | insecure-skip-tls-verify: true |
| 110 | server: "https://$RANCHER_IP:8080/r/projects/$PID/kubernetes:6443" |
| 111 | name: "oom" |
| 112 | contexts: |
| 113 | - context: |
| 114 | cluster: "oom" |
| 115 | user: "oom" |
| 116 | name: "oom" |
| 117 | current-context: "oom" |
| 118 | users: |
| 119 | - name: "oom" |
| 120 | user: |
| 121 | token: "$KUBETOKEN" |
| 122 | EOF |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 123 | |
| 124 | export KUBECONFIG=/root/.kube/config |
| 125 | kubectl config view |
| 126 | |
Gary Wu | c98156d | 2018-01-18 12:03:26 -0800 | [diff] [blame] | 127 | # Update ~/oom/kubernetes/kube2msb/values.yaml kubeMasterAuthToken to use the token from ~/.kube/config |
| 128 | sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kubernetes/kube2msb/values.yaml |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 129 | |
| 130 | # Put your onap_key ssh private key in ~/.ssh/onap_key |
| 131 | |
| 132 | # Create or edit ~/oom/kubernetes/config/onap-parameters.yaml |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 133 | cat > ~/oom/kubernetes/config/onap-parameters.yaml <<EOF |
| 134 | # For information regarding those parameters, please visit http://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/installation_heat.html |
Gary Wu | 60dd0d8 | 2018-01-18 14:54:47 -0800 | [diff] [blame] | 135 | |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 136 | ################# |
| 137 | # COMMON CONFIG # |
| 138 | ################# |
Gary Wu | 60dd0d8 | 2018-01-18 14:54:47 -0800 | [diff] [blame] | 139 | |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 140 | # NEXUS |
| 141 | NEXUS_HTTP_REPO: https://nexus.onap.org/content/sites/raw |
| 142 | NEXUS_DOCKER_REPO: nexus3.onap.org:10001 |
| 143 | NEXUS_USERNAME: docker |
| 144 | NEXUS_PASSWORD: docker |
| 145 | |
| 146 | # ONAP config |
| 147 | # Do not change unless you know what you're doing |
| 148 | DMAAP_TOPIC: "AUTO" |
| 149 | DEMO_ARTIFACTS_VERSION: "1.1.1" |
| 150 | |
| 151 | # ------------------------------------------------# |
| 152 | # OpenStack Config on which VNFs will be deployed # |
| 153 | # ------------------------------------------------# |
| 154 | |
| 155 | # The four below parameters are only used by Robot. |
| 156 | # As Robot is able to perform some automated actions, |
| 157 | # e.g. onboard/distribute/instantiate, it has to be |
| 158 | # configured with four below parameters (in addition |
| 159 | # to the OPENSTACK ones). |
| 160 | # If you don't intend to use Robot for those actions, |
| 161 | # you can put dummy values, but you will have to provide |
| 162 | # those values when deploying VNF anyway. |
| 163 | # -------------------------------------------------- |
| 164 | # This is the OAM Network ID used for internal network by VNFs. |
| 165 | # You could create 10.10.10.0/24 (256 IPs should be enough) in your cloud instance. |
| 166 | OPENSTACK_OAM_NETWORK_ID: "__oam_network_id__" |
| 167 | # This is the public Network ID. Public = external network in OpenStack. |
| 168 | # Floating IPs will be created and assigned to VNFs from this network, |
| 169 | # to provide external reachability. |
| 170 | OPENSTACK_PUBLIC_NETWORK_ID: "__public_net_id__" |
| 171 | # VM Flavor to be used by VNF. |
| 172 | OPENSTACK_FLAVOR: "m1.medium" |
| 173 | # VM image to be used by VNF. Here ubuntu 14.04 is provided. |
| 174 | OPENSTACK_IMAGE: "__ubuntu_1604_image__" |
| 175 | |
| 176 | OPENSTACK_USERNAME: "__openstack_username__" |
| 177 | OPENSTACK_PASSWORD: "__openstack_api_key__" |
Gary Wu | c98156d | 2018-01-18 12:03:26 -0800 | [diff] [blame] | 178 | OPENSTACK_TENANT_NAME: "__openstack_tenant_name__" |
| 179 | OPENSTACK_TENANT_ID: "__openstack_tenant_id__" |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 180 | OPENSTACK_REGION: "RegionOne" |
| 181 | # Either v2.0 or v3 |
| 182 | OPENSTACK_API_VERSION: "v2.0" |
Gary Wu | c98156d | 2018-01-18 12:03:26 -0800 | [diff] [blame] | 183 | OPENSTACK_KEYSTONE_URL: "__keystone_url__" |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 184 | # Don't change this if you don't know what it is |
| 185 | OPENSTACK_SERVICE_TENANT_NAME: "service" |
| 186 | |
| 187 | ######## |
| 188 | # DCAE # |
| 189 | ######## |
| 190 | |
| 191 | # Whether or not to deploy DCAE |
| 192 | # If set to false, all the parameters below can be left empty or removed |
| 193 | # If set to false, update ../dcaegen2/values.yaml disableDcae value to true, |
| 194 | # this is to avoid deploying the DCAE deployments and services. |
| 195 | DEPLOY_DCAE: "true" |
| 196 | |
| 197 | # DCAE Config |
| 198 | DCAE_DOCKER_VERSION: v1.1.1 |
| 199 | DCAE_VM_BASE_NAME: "dcae" |
| 200 | |
| 201 | # ------------------------------------------------# |
| 202 | # OpenStack Config on which DCAE will be deployed # |
| 203 | # ------------------------------------------------# |
| 204 | |
| 205 | # Whether to have DCAE deployed on the same OpenStack instance on which VNF will be deployed. |
| 206 | # (e.g. re-use the same config as defined above) |
| 207 | # If set to true, discard the next config block, else provide the values. |
| 208 | IS_SAME_OPENSTACK_AS_VNF: "true" |
| 209 | |
| 210 | # Fill in the values in below block only if IS_SAME_OPENSTACK_AS_VNF set to "false" |
| 211 | # --- |
| 212 | # Either v2.0 or v3 |
| 213 | DCAE_OS_API_VERSION: "v2.0" |
| 214 | DCAE_OS_KEYSTONE_URL: "__keystone_url__" |
| 215 | DCAE_OS_USERNAME: "" |
| 216 | DCAE_OS_PASSWORD: "" |
| 217 | DCAE_OS_TENANT_NAME: "" |
| 218 | DCAE_OS_TENANT_ID: "" |
| 219 | DCAE_OS_REGION: "" |
| 220 | # --- |
| 221 | |
| 222 | # We need to provide the config of the public network here, because the DCAE VMs will be |
| 223 | # assigned a floating IP on this network so one can access them, to debug for instance. |
| 224 | # The ID of the public network. |
| 225 | DCAE_OS_PUBLIC_NET_ID: "__public_net_id__" |
| 226 | # The name of the public network. |
| 227 | DCAE_OS_PUBLIC_NET_NAME: "__public_net_name__" |
| 228 | # This is the private network that will be used by DCAE VMs. The network will be created during the DCAE boostrap process, |
| 229 | # and will the subnet created will use this CIDR. (/28 provides 16 IPs, DCAE requires 15.) |
| 230 | DCAE_OS_OAM_NETWORK_CIDR: "10.99.0.0/16" |
| 231 | # This will be the private ip of the DCAE boostrap VM. This VM is responsible for spinning up the whole DCAE stack (14 VMs total) |
| 232 | DCAE_IP_ADDR: "10.99.4.1" |
| 233 | |
| 234 | # The flavors' name to be used by DCAE VMs |
| 235 | DCAE_OS_FLAVOR_SMALL: "m1.small" |
| 236 | DCAE_OS_FLAVOR_MEDIUM: "m1.medium" |
| 237 | DCAE_OS_FLAVOR_LARGE: "m1.large" |
| 238 | # The images' name to be used by DCAE VMs |
| 239 | DCAE_OS_UBUNTU_14_IMAGE: "__ubuntu_1404_image__" |
| 240 | DCAE_OS_UBUNTU_16_IMAGE: "__ubuntu_1604_image__" |
| 241 | DCAE_OS_CENTOS_7_IMAGE: "__centos_7_image__" |
| 242 | |
| 243 | # This is the keypair that will be created in OpenStack, and that one can use to access DCAE VMs using ssh. |
| 244 | # The private key needs to be in a specific format so at the end of the process, it's formatted properly |
| 245 | # when ending up in the DCAE HEAT stack. The best way is to do the following: |
| 246 | # - copy paste your key |
| 247 | # - surround it with quote |
| 248 | # - add \n at the end of each line |
| 249 | # - escape the result using https://www.freeformatter.com/java-dotnet-escape.html#ad-output |
| 250 | DCAE_OS_KEY_NAME: "onap_key" |
| 251 | DCAE_OS_PUB_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" |
| 252 | DCAE_OS_PRIVATE_KEY: \"-----BEGIN RSA PRIVATE KEY-----\\n\r\nMIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\\n\r\nNGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\\n\r\nNhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\\n\r\nyzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\\n\r\n+ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\\n\r\nfiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\\n\r\nqFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\\n\r\nlMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\\n\r\nKqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\\n\r\nF2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\\n\r\nOjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\\n\r\n4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\\n\r\n6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\\n\r\nbe9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\\n\r\nUbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\\n\r\ngMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\\n\r\nY63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\\n\r\n9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\\n\r\naWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\\n\r\nxGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\\n\r\nfMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\\n\r\n22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\\n\r\nYOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\\n\r\nitqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\\n\r\ny7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\\n\r\n-----END RSA PRIVATE KEY-----\\n\r\n\" |
| 253 | |
| 254 | # This below settings allows one to configure the /etc/resolv.conf nameserver resolution for all the DCAE VMs. |
| 255 | # - |
| 256 | # In the HEAT setup, it's meant to be a DNS list, as the HEAT setup deploys a DNS Server VM in addition to DNS Designate |
| 257 | # and this DNS Server is setup to forward request to the DNS Designate backend when it cannot resolve, hence the |
| 258 | # DNS_FORWARDER config here. The DCAE Boostrap requires both inputs, even though they are now similar, we have to pass |
| 259 | # them. |
| 260 | # - |
| 261 | # ATTENTION: Assumption is made the DNS Designate backend is configure to forward request to a public DNS (e.g. 8.8.8.8) |
| 262 | # - |
| 263 | # Put the IP of the DNS Designate backend (e.g. the OpenStack IP supporting DNS Designate) |
| 264 | DNS_IP: "__dns_forwarder__" |
Gary Wu | 60dd0d8 | 2018-01-18 14:54:47 -0800 | [diff] [blame] | 265 | DNS_FORWARDER: "__dns_forwarder__" |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 266 | |
| 267 | # Public DNS - not used but required by the DCAE boostrap container |
Gary Wu | 7b41639 | 2018-01-19 13:16:49 -0800 | [diff] [blame] | 268 | EXTERNAL_DNS: "__external_dns__" |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 269 | |
| 270 | # DNS domain for the DCAE VMs |
| 271 | DCAE_DOMAIN: "dcaeg2.onap.org" |
| 272 | |
| 273 | # Proxy DNS Designate. This means DCAE will run in an instance not support Designate, and Designate will be provided by another instance. |
| 274 | # Set to true if you wish to use it |
Gary Wu | 7b41639 | 2018-01-19 13:16:49 -0800 | [diff] [blame] | 275 | DNSAAS_PROXY_ENABLE: "__dnsaas_proxy_enable__" |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 276 | # Provide this only if DNSAAS_PROXY_ENABLE set to true. The IP has to be the IP of one of the K8S hosts. |
| 277 | # e.g. http://10.195.197.164/api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0 |
| 278 | DCAE_PROXIED_KEYSTONE_URL: "http://__k8s_ip_addr__/__dnsaas_proxied_keystone_url_path__" |
| 279 | |
| 280 | # -----------------------------------------------------# |
| 281 | # OpenStack Config on which DNS Designate is supported # |
| 282 | # -----------------------------------------------------# |
| 283 | |
| 284 | # If this is the same OpenStack used for the VNF or DCAE, please re-enter the values here. |
| 285 | |
| 286 | DNSAAS_API_VERSION: "v3" |
| 287 | DNSAAS_REGION: "RegionOne" |
Gary Wu | 7b41639 | 2018-01-19 13:16:49 -0800 | [diff] [blame] | 288 | DNSAAS_KEYSTONE_URL: "__dnsaas_keystone_url__" |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 289 | DNSAAS_TENANT_ID: "__dnsaas_tenant_id__" |
Gary Wu | 7b41639 | 2018-01-19 13:16:49 -0800 | [diff] [blame] | 290 | DNSAAS_TENANT_NAME: "__dnsaas_tenant_name__" |
| 291 | DNSAAS_USERNAME: "__dnsaas_username__" |
| 292 | DNSAAS_PASSWORD: "__dnsaas_password__" |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 293 | EOF |
Gary Wu | c98156d | 2018-01-18 12:03:26 -0800 | [diff] [blame] | 294 | cat ~/oom/kubernetes/config/onap-parameters.yaml |
| 295 | |
| 296 | |
| 297 | # wait for kubernetes to initialze |
| 298 | sleep 100 |
| 299 | until [ $(kubectl get pods --namespace kube-system | tail -n +2 | grep -c Running) -ge 6 ]; do |
| 300 | sleep 10 |
| 301 | done |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 302 | |
| 303 | # Source the environment file: |
| 304 | cd ~/oom/kubernetes/oneclick/ |
| 305 | source setenv.bash |
| 306 | |
| 307 | # run the config pod creation |
| 308 | cd ~/oom/kubernetes/config |
| 309 | ./createConfig.sh -n onap |
| 310 | |
| 311 | # Wait until the config container completes. |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 312 | sleep 20 |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 313 | until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do |
| 314 | sleep 10 |
| 315 | done |
| 316 | |
Gary Wu | 60dd0d8 | 2018-01-18 14:54:47 -0800 | [diff] [blame] | 317 | # version control the config to see what's happening |
| 318 | cd /dockerdata-nfs/ |
| 319 | git init |
| 320 | git config user.email "root@k8s" |
| 321 | git config user.name "root" |
| 322 | git add -A |
| 323 | git commit -m "initial commit" |
| 324 | |
Gary Wu | 52dfdcb | 2018-01-25 11:02:48 -0800 | [diff] [blame] | 325 | cat /dockerdata-nfs/onap/dcaegen2/heat/onap_dcae.env |
| 326 | |
Gary Wu | 1ff5667 | 2018-01-17 20:51:45 -0800 | [diff] [blame] | 327 | # Run ONAP: |
| 328 | cd ~/oom/kubernetes/oneclick/ |
| 329 | ./createAll.bash -n onap |
| 330 | |
| 331 | # Check ONAP status: |
Gary Wu | 60dd0d8 | 2018-01-18 14:54:47 -0800 | [diff] [blame] | 332 | sleep 3 |
Gary Wu | c98156d | 2018-01-18 12:03:26 -0800 | [diff] [blame] | 333 | kubectl get pods --all-namespaces |