Michal Zegan | 07479cb | 2019-08-22 14:43:11 +0200 | [diff] [blame] | 1 | #This is the environment heat template, compatible with openstack ocata. |
| 2 | heat_template_version: 2017-02-24 |
| 3 | description: "Heat template for deploying onap env" |
| 4 | parameters: |
| 5 | auth_key: |
| 6 | label: "Auth public key" |
| 7 | description: "The public key used to authenticate to instances" |
| 8 | type: string |
| 9 | node_flavor_name: |
| 10 | label: "name of node flavor" |
| 11 | description: "The name of the flavor used to create kubernetes nodes" |
| 12 | type: string |
| 13 | constraints: |
| 14 | - custom_constraint: nova.flavor |
| 15 | description: "need to specify a valid flavor" |
| 16 | infra_flavor_name: |
| 17 | label: "name of infra flavor" |
| 18 | description: "flavor used to create infra instance" |
| 19 | type: string |
| 20 | constraints: |
| 21 | - custom_constraint: nova.flavor |
| 22 | description: "need to specify a valid flavor" |
| 23 | installer_flavor_name: |
| 24 | label: "name of installer flavor" |
| 25 | description: "flavor used to create installer instance" |
| 26 | type: string |
| 27 | constraints: |
| 28 | - custom_constraint: nova.flavor |
| 29 | description: "need to specify a valid flavor" |
| 30 | image_name: |
| 31 | label: "image name" |
| 32 | description: "name of the image from which to create all instances, should be rhel 7.6 or centos image" |
| 33 | type: string |
| 34 | constraints: |
| 35 | - custom_constraint: glance.image |
| 36 | description: "must specify a valid image name" |
| 37 | subnet_cidr: |
| 38 | label: "private subnet cidr" |
| 39 | description: "Cidr of a private subnet instances will be connected to" |
| 40 | type: string |
| 41 | constraints: |
| 42 | - custom_constraint: net_cidr |
| 43 | subnet_range_start: |
| 44 | label: "subnet dhcp allocation range start" |
| 45 | description: "Start of range of dhcp allocatable ips on private subnet" |
| 46 | type: string |
| 47 | constraints: |
| 48 | - custom_constraint: ip_addr |
| 49 | subnet_range_end: |
| 50 | label: "end of subnet dhcp allocation range" |
| 51 | description: "End of private subnet's dhcp allocation range" |
| 52 | type: string |
| 53 | constraints: |
| 54 | - custom_constraint: ip_addr |
| 55 | router_addr: |
| 56 | label: "ip address of router" |
| 57 | description: "IP address of the router allowing access to other networks incl. company network" |
| 58 | type: string |
| 59 | constraints: |
| 60 | - custom_constraint: ip_addr |
| 61 | public_network_name: |
| 62 | label: "name of the public network" |
| 63 | description: "Name of the public, internet facing network, also allowing access to company internal hosts" |
| 64 | type: string |
| 65 | constraints: |
| 66 | - custom_constraint: neutron.network |
| 67 | description: "Must specify a valid network name or id" |
| 68 | external_subnet_cidr: |
| 69 | label: "external subnet cidr" |
| 70 | description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet." |
| 71 | type: string |
| 72 | constraints: |
| 73 | - custom_constraint: net_cidr |
| 74 | installer_ip: |
| 75 | label: "floating ip of the installer" |
| 76 | description: "a pre-allocated floating ip that will be associated with the installer instance" |
| 77 | type: string |
| 78 | infra_ip: |
| 79 | label: "floating ip of the infra" |
| 80 | description: "a pre-allocated floating ip that will be associated with the infrastructure instance" |
| 81 | type: string |
| 82 | node_ip: |
| 83 | label: "floating ip of the first node" |
| 84 | description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap" |
| 85 | type: string |
| 86 | num_nodes: |
| 87 | label: "num nodes" |
| 88 | description: "the number of kubernetes nodes to create, min 1" |
| 89 | type: number |
| 90 | constraints: |
| 91 | - range: { min: 1 } |
| 92 | description: "must be a positive number" |
| 93 | resources: |
| 94 | # Security group used to secure access to instances. |
| 95 | secgroup: |
| 96 | type: OS::Neutron::SecurityGroup |
| 97 | properties: |
| 98 | rules: |
| 99 | # Egress rule allowing access to external_subnet_cidr. |
| 100 | - direction: egress |
| 101 | ethertype: IPv4 |
| 102 | remote_ip_prefix: { get_param: external_subnet_cidr } |
| 103 | # Ingress rule, allowing also inbound access by external network. |
| 104 | - direction: ingress |
| 105 | ethertype: IPv4 |
| 106 | remote_ip_prefix: { get_param: external_subnet_cidr } |
| 107 | # Allow outbound communication with the internal subnet. |
| 108 | - direction: egress |
| 109 | ethertype: IPv4 |
| 110 | remote_ip_prefix: { get_param: subnet_cidr } |
| 111 | # Allow inbound communication from internal network. |
| 112 | - direction: ingress |
| 113 | ethertype: IPv4 |
| 114 | remote_ip_prefix: { get_param: subnet_cidr } |
| 115 | # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound. |
| 116 | - direction: egress |
| 117 | ethertype: IPv4 |
| 118 | remote_ip_prefix: 169.254.0.0/16 |
| 119 | #A network that our test environment will be connected to. |
| 120 | privnet: |
| 121 | type: OS::Neutron::Net |
| 122 | #Subnet that instances will live in. |
| 123 | privsubnet: |
| 124 | type: OS::Neutron::Subnet |
| 125 | properties: |
| 126 | network: { get_resource: privnet } |
| 127 | cidr: { get_param: subnet_cidr } |
| 128 | allocation_pools: |
| 129 | - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } } |
| 130 | gateway_ip: { get_param: router_addr } |
| 131 | ip_version: 4 |
| 132 | #A port connected to the private network, taken by router. |
| 133 | routerport: |
| 134 | type: OS::Neutron::Port |
| 135 | properties: |
| 136 | network: { get_resource: privnet } |
| 137 | fixed_ips: |
| 138 | - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } } |
| 139 | security_groups: [{ get_resource: secgroup }] |
| 140 | #This is a router, routing between us and the internet. |
| 141 | #It has an external gateway to public network. |
| 142 | router: |
| 143 | type: OS::Neutron::Router |
| 144 | properties: |
| 145 | external_gateway_info: |
| 146 | network: { get_param: public_network_name } |
| 147 | #This is a router interface connecting it to our private subnet's router port. |
| 148 | routercon: |
| 149 | type: OS::Neutron::RouterInterface |
| 150 | properties: |
| 151 | router: { get_resource: router } |
| 152 | port: { get_resource: routerport } |
| 153 | |
| 154 | #Key used to authenticate to instances as root. |
| 155 | key: |
| 156 | type: OS::Nova::KeyPair |
| 157 | properties: |
| 158 | name: { get_param: "OS::stack_name" } |
| 159 | public_key: { get_param: auth_key } |
| 160 | #Handle to signal about starting up of instances. |
| 161 | instance_wait_handle: |
| 162 | type: OS::Heat::WaitConditionHandle |
| 163 | #Monitor waiting for all instances to start. |
| 164 | instance_wait: |
| 165 | type: OS::Heat::WaitCondition |
| 166 | properties: |
| 167 | handle: { get_resource: instance_wait_handle } |
| 168 | timeout: 1200 |
| 169 | count: |
| 170 | yaql: |
| 171 | data: { num_nodes: { get_param: num_nodes } } |
| 172 | #This is number of all nodes + 2 (infra instance and installer) |
| 173 | expression: "$.data.num_nodes + 2" |
| 174 | #Resource group to deploy n nodes using node template for each, each node numbered starting from 0. |
| 175 | nodes: |
| 176 | type: OS::Heat::ResourceGroup |
| 177 | properties: |
| 178 | count: { get_param: num_nodes } |
| 179 | resource_def: |
| 180 | type: node.yaml |
| 181 | properties: |
| 182 | nodenum: "%index%" |
| 183 | key_name: { get_resource: key } |
| 184 | image_name: { get_param: image_name } |
| 185 | network: { get_resource: privnet } |
| 186 | subnet: { get_resource: privsubnet } |
| 187 | flavor_name: { get_param: node_flavor_name } |
| 188 | notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] } |
| 189 | security_group: { get_resource: secgroup } |
| 190 | depends_on: [routercon, instance_wait_handle] |
| 191 | #Nfs storage volume for first node. |
| 192 | nfs_storage: |
| 193 | type: OS::Cinder::Volume |
| 194 | properties: |
| 195 | name: nfs_storage |
| 196 | size: 50 |
| 197 | #Attachment of volume to first node. |
| 198 | nfs_storage_attachment: |
| 199 | type: OS::Cinder::VolumeAttachment |
| 200 | properties: |
| 201 | instance_uuid: { get_attr: [nodes, "resource.0"] } |
| 202 | volume_id: { get_resource: nfs_storage } |
| 203 | #Floating ip association for node (first only). |
| 204 | node_fip_assoc: |
| 205 | type: OS::Neutron::FloatingIPAssociation |
| 206 | properties: |
| 207 | floatingip_id: { get_param: node_ip } |
| 208 | port_id: { get_attr: ["nodes", "resource.0.port_id"] } |
| 209 | #Openstack volume used for storing resources. |
| 210 | resources_storage: |
| 211 | type: "OS::Cinder::Volume" |
| 212 | properties: |
| 213 | name: "resources_storage" |
| 214 | size: 120 |
| 215 | #Instance representing infrastructure instance, created using subtemplate. |
| 216 | infra: |
| 217 | type: "instance.yaml" |
| 218 | properties: |
| 219 | instance_name: infra |
| 220 | network: { get_resource: privnet } |
| 221 | subnet: { get_resource: privsubnet } |
| 222 | key_name: { get_resource: key } |
| 223 | flavor_name: { get_param: infra_flavor_name } |
| 224 | image_name: { get_param: image_name } |
| 225 | notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] } |
| 226 | security_group: { get_resource: secgroup } |
| 227 | depends_on: [instance_wait_handle] |
| 228 | #Volume attachment for infra node. |
| 229 | resources_storage_attachment: |
| 230 | type: OS::Cinder::VolumeAttachment |
| 231 | properties: |
| 232 | volume_id: { get_resource: resources_storage } |
| 233 | instance_uuid: { get_resource: infra } |
| 234 | #Floating ip association for infra. |
| 235 | infra_fip_assoc: |
| 236 | type: OS::Neutron::FloatingIPAssociation |
| 237 | properties: |
| 238 | floatingip_id: { get_param: infra_ip } |
| 239 | port_id: { get_attr: ["infra", "port_id"] } |
| 240 | #Small installer vm having access to other instances, used to install onap. |
| 241 | installer: |
| 242 | type: "instance.yaml" |
| 243 | properties: |
| 244 | instance_name: installer |
| 245 | image_name: { get_param: image_name } |
| 246 | flavor_name: { get_param: installer_flavor_name } |
| 247 | key_name: { get_resource: key } |
| 248 | network: { get_resource: privnet } |
| 249 | subnet: { get_resource: privsubnet } |
| 250 | notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] } |
| 251 | security_group: { get_resource: secgroup } |
| 252 | depends_on: instance_wait_handle |
| 253 | #Floating ip for installer. |
| 254 | installer_fip_assoc: |
| 255 | type: OS::Neutron::FloatingIPAssociation |
| 256 | properties: |
| 257 | floatingip_id: { get_param: installer_ip } |
| 258 | port_id: { get_attr: [installer, port_id] } |
Michal Zegan | 07d9988 | 2019-09-05 18:36:47 +0200 | [diff] [blame^] | 259 | #Map of node volumes, taken from volumes output param. |
| 260 | node_volumes: |
| 261 | type: OS::Heat::Value |
| 262 | properties: |
| 263 | type: json |
| 264 | #We need yaql transformation to be done on the volume map. |
| 265 | value: |
| 266 | yaql: |
| 267 | data: |
| 268 | #This is a map of node number to value of "volumes" attribute, that contains |
| 269 | #a list of volumes written as pairs [volumeid, mountpoint]. |
| 270 | volumes: { get_attr: [nodes, attributes, volumes] } |
| 271 | #We need yaql expressions to transform node numbers to node names in the form "node0" and similar. |
| 272 | #However we don't need anything more complicated. |
| 273 | expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])" |
| 274 | #List of infra specific volumes (not a map as above). |
| 275 | infra_volumes: |
| 276 | type: OS::Heat::Value |
| 277 | properties: |
| 278 | value: |
| 279 | - [{ get_resource: resources_storage }, "/opt/onap"] |
| 280 | #Contains node0 specific volume list. |
| 281 | node0_volumes: |
| 282 | type: OS::Heat::Value |
| 283 | properties: |
| 284 | value: |
| 285 | - [{ get_resource: nfs_storage }, "/dockerdata-nfs"] |
Michal Zegan | 07479cb | 2019-08-22 14:43:11 +0200 | [diff] [blame] | 286 | #Output values |
| 287 | outputs: |
| 288 | installer_ip: |
| 289 | value: { get_attr: [installer, ip] } |
| 290 | description: "Internal ip of installer instance" |
| 291 | infra_ip: |
| 292 | value: { get_attr: [infra, ip] } |
| 293 | description: "Internal ip of infra instance" |
| 294 | node_ips: |
| 295 | value: { get_attr: [nodes, ip] } |
| 296 | description: "Serialized json list of node internal ips starting at node0" |
| 297 | volumes: |
| 298 | description: "map of volumes per each instance" |
| 299 | value: |
Michal Zegan | 07d9988 | 2019-09-05 18:36:47 +0200 | [diff] [blame^] | 300 | #Can do deep merging only with yaql. |
Michal Zegan | 07479cb | 2019-08-22 14:43:11 +0200 | [diff] [blame] | 301 | yaql: |
| 302 | data: |
Michal Zegan | 07d9988 | 2019-09-05 18:36:47 +0200 | [diff] [blame^] | 303 | node_volumes: { get_attr: [node_volumes, value]} |
| 304 | infra_volumes: { infra: { get_attr: [infra_volumes, value] }} |
| 305 | node0_volumes: {node0: { get_attr: [node0_volumes, value] }} |
| 306 | expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)" |