blob: 7b3f10c0640229abe3f76ac4971717fc7b6440bc [file] [log] [blame]
Michal Zegan07479cb2019-08-22 14:43:11 +02001#This is the environment heat template, compatible with openstack ocata.
2heat_template_version: 2017-02-24
3description: "Heat template for deploying onap env"
4parameters:
5 auth_key:
6 label: "Auth public key"
7 description: "The public key used to authenticate to instances"
8 type: string
9 node_flavor_name:
10 label: "name of node flavor"
11 description: "The name of the flavor used to create kubernetes nodes"
12 type: string
13 constraints:
14 - custom_constraint: nova.flavor
15 description: "need to specify a valid flavor"
16 infra_flavor_name:
17 label: "name of infra flavor"
18 description: "flavor used to create infra instance"
19 type: string
20 constraints:
21 - custom_constraint: nova.flavor
22 description: "need to specify a valid flavor"
23 installer_flavor_name:
24 label: "name of installer flavor"
25 description: "flavor used to create installer instance"
26 type: string
27 constraints:
28 - custom_constraint: nova.flavor
29 description: "need to specify a valid flavor"
30 image_name:
31 label: "image name"
32 description: "name of the image from which to create all instances, should be rhel 7.6 or centos image"
33 type: string
34 constraints:
35 - custom_constraint: glance.image
36 description: "must specify a valid image name"
37 subnet_cidr:
38 label: "private subnet cidr"
39 description: "Cidr of a private subnet instances will be connected to"
40 type: string
41 constraints:
42 - custom_constraint: net_cidr
43 subnet_range_start:
44 label: "subnet dhcp allocation range start"
45 description: "Start of range of dhcp allocatable ips on private subnet"
46 type: string
47 constraints:
48 - custom_constraint: ip_addr
49 subnet_range_end:
50 label: "end of subnet dhcp allocation range"
51 description: "End of private subnet's dhcp allocation range"
52 type: string
53 constraints:
54 - custom_constraint: ip_addr
55 router_addr:
56 label: "ip address of router"
57 description: "IP address of the router allowing access to other networks incl. company network"
58 type: string
59 constraints:
60 - custom_constraint: ip_addr
61 public_network_name:
62 label: "name of the public network"
63 description: "Name of the public, internet facing network, also allowing access to company internal hosts"
64 type: string
65 constraints:
66 - custom_constraint: neutron.network
67 description: "Must specify a valid network name or id"
68 external_subnet_cidr:
69 label: "external subnet cidr"
70 description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet."
71 type: string
72 constraints:
73 - custom_constraint: net_cidr
74 installer_ip:
75 label: "floating ip of the installer"
76 description: "a pre-allocated floating ip that will be associated with the installer instance"
77 type: string
78 infra_ip:
79 label: "floating ip of the infra"
80 description: "a pre-allocated floating ip that will be associated with the infrastructure instance"
81 type: string
82 node_ip:
83 label: "floating ip of the first node"
84 description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap"
85 type: string
86 num_nodes:
87 label: "num nodes"
88 description: "the number of kubernetes nodes to create, min 1"
89 type: number
90 constraints:
91 - range: { min: 1 }
92 description: "must be a positive number"
Michal Zegan7c1131e2019-09-05 18:48:18 +020093 use_volume_for_nfs:
94 type: boolean
95 label: "use volume for nfs storage"
96 description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk"
97conditions:
98 #Condition for nfs volume usage.
99 use_volume_for_nfs: { get_param: use_volume_for_nfs }
Michal Zegan07479cb2019-08-22 14:43:11 +0200100resources:
101 # Security group used to secure access to instances.
102 secgroup:
103 type: OS::Neutron::SecurityGroup
104 properties:
105 rules:
106 # Egress rule allowing access to external_subnet_cidr.
107 - direction: egress
108 ethertype: IPv4
109 remote_ip_prefix: { get_param: external_subnet_cidr }
110 # Ingress rule, allowing also inbound access by external network.
111 - direction: ingress
112 ethertype: IPv4
113 remote_ip_prefix: { get_param: external_subnet_cidr }
114 # Allow outbound communication with the internal subnet.
115 - direction: egress
116 ethertype: IPv4
117 remote_ip_prefix: { get_param: subnet_cidr }
118 # Allow inbound communication from internal network.
119 - direction: ingress
120 ethertype: IPv4
121 remote_ip_prefix: { get_param: subnet_cidr }
122 # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound.
123 - direction: egress
124 ethertype: IPv4
125 remote_ip_prefix: 169.254.0.0/16
126 #A network that our test environment will be connected to.
127 privnet:
128 type: OS::Neutron::Net
129 #Subnet that instances will live in.
130 privsubnet:
131 type: OS::Neutron::Subnet
132 properties:
133 network: { get_resource: privnet }
134 cidr: { get_param: subnet_cidr }
135 allocation_pools:
136 - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
137 gateway_ip: { get_param: router_addr }
138 ip_version: 4
139 #A port connected to the private network, taken by router.
140 routerport:
141 type: OS::Neutron::Port
142 properties:
143 network: { get_resource: privnet }
144 fixed_ips:
145 - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } }
146 security_groups: [{ get_resource: secgroup }]
147 #This is a router, routing between us and the internet.
148 #It has an external gateway to public network.
149 router:
150 type: OS::Neutron::Router
151 properties:
152 external_gateway_info:
153 network: { get_param: public_network_name }
154 #This is a router interface connecting it to our private subnet's router port.
155 routercon:
156 type: OS::Neutron::RouterInterface
157 properties:
158 router: { get_resource: router }
159 port: { get_resource: routerport }
160
161 #Key used to authenticate to instances as root.
162 key:
163 type: OS::Nova::KeyPair
164 properties:
165 name: { get_param: "OS::stack_name" }
166 public_key: { get_param: auth_key }
167 #Handle to signal about starting up of instances.
168 instance_wait_handle:
169 type: OS::Heat::WaitConditionHandle
170 #Monitor waiting for all instances to start.
171 instance_wait:
172 type: OS::Heat::WaitCondition
173 properties:
174 handle: { get_resource: instance_wait_handle }
175 timeout: 1200
176 count:
177 yaql:
178 data: { num_nodes: { get_param: num_nodes } }
179 #This is number of all nodes + 2 (infra instance and installer)
180 expression: "$.data.num_nodes + 2"
Michal Zegan6425e672019-09-05 18:46:05 +0200181 #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.).
182 anti_affinity_group:
183 type: OS::Nova::ServerGroup
184 properties:
185 name: k8s nodes on separate computes
186 policies:
187 - anti-affinity
Michal Zegan07479cb2019-08-22 14:43:11 +0200188 #Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
189 nodes:
190 type: OS::Heat::ResourceGroup
191 properties:
192 count: { get_param: num_nodes }
193 resource_def:
194 type: node.yaml
195 properties:
196 nodenum: "%index%"
197 key_name: { get_resource: key }
198 image_name: { get_param: image_name }
199 network: { get_resource: privnet }
200 subnet: { get_resource: privsubnet }
201 flavor_name: { get_param: node_flavor_name }
202 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
203 security_group: { get_resource: secgroup }
Michal Zegan6425e672019-09-05 18:46:05 +0200204 scheduler_hints:
205 group: { get_resource: anti_affinity_group }
Michal Zegan07479cb2019-08-22 14:43:11 +0200206 depends_on: [routercon, instance_wait_handle]
207 #Nfs storage volume for first node.
208 nfs_storage:
209 type: OS::Cinder::Volume
Michal Zegan7c1131e2019-09-05 18:48:18 +0200210 condition: use_volume_for_nfs
Michal Zegan07479cb2019-08-22 14:43:11 +0200211 properties:
212 name: nfs_storage
213 size: 50
214 #Attachment of volume to first node.
215 nfs_storage_attachment:
216 type: OS::Cinder::VolumeAttachment
Michal Zegan7c1131e2019-09-05 18:48:18 +0200217 condition: use_volume_for_nfs
Michal Zegan07479cb2019-08-22 14:43:11 +0200218 properties:
219 instance_uuid: { get_attr: [nodes, "resource.0"] }
220 volume_id: { get_resource: nfs_storage }
221 #Floating ip association for node (first only).
222 node_fip_assoc:
223 type: OS::Neutron::FloatingIPAssociation
224 properties:
225 floatingip_id: { get_param: node_ip }
226 port_id: { get_attr: ["nodes", "resource.0.port_id"] }
227 #Openstack volume used for storing resources.
228 resources_storage:
229 type: "OS::Cinder::Volume"
230 properties:
231 name: "resources_storage"
232 size: 120
233 #Instance representing infrastructure instance, created using subtemplate.
234 infra:
235 type: "instance.yaml"
236 properties:
237 instance_name: infra
238 network: { get_resource: privnet }
239 subnet: { get_resource: privsubnet }
240 key_name: { get_resource: key }
241 flavor_name: { get_param: infra_flavor_name }
242 image_name: { get_param: image_name }
243 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
244 security_group: { get_resource: secgroup }
Michal Zegan6425e672019-09-05 18:46:05 +0200245 scheduler_hints: {}
Michal Zegan07479cb2019-08-22 14:43:11 +0200246 depends_on: [instance_wait_handle]
247 #Volume attachment for infra node.
248 resources_storage_attachment:
249 type: OS::Cinder::VolumeAttachment
250 properties:
251 volume_id: { get_resource: resources_storage }
252 instance_uuid: { get_resource: infra }
253 #Floating ip association for infra.
254 infra_fip_assoc:
255 type: OS::Neutron::FloatingIPAssociation
256 properties:
257 floatingip_id: { get_param: infra_ip }
258 port_id: { get_attr: ["infra", "port_id"] }
259 #Small installer vm having access to other instances, used to install onap.
260 installer:
261 type: "instance.yaml"
262 properties:
263 instance_name: installer
264 image_name: { get_param: image_name }
265 flavor_name: { get_param: installer_flavor_name }
266 key_name: { get_resource: key }
267 network: { get_resource: privnet }
268 subnet: { get_resource: privsubnet }
269 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
270 security_group: { get_resource: secgroup }
Michal Zegan6425e672019-09-05 18:46:05 +0200271 scheduler_hints: {}
Michal Zegan07479cb2019-08-22 14:43:11 +0200272 depends_on: instance_wait_handle
273 #Floating ip for installer.
274 installer_fip_assoc:
275 type: OS::Neutron::FloatingIPAssociation
276 properties:
277 floatingip_id: { get_param: installer_ip }
278 port_id: { get_attr: [installer, port_id] }
Michal Zegan07d99882019-09-05 18:36:47 +0200279 #Map of node volumes, taken from volumes output param.
280 node_volumes:
281 type: OS::Heat::Value
282 properties:
283 type: json
284 #We need yaql transformation to be done on the volume map.
285 value:
286 yaql:
287 data:
288 #This is a map of node number to value of "volumes" attribute, that contains
289 #a list of volumes written as pairs [volumeid, mountpoint].
290 volumes: { get_attr: [nodes, attributes, volumes] }
291 #We need yaql expressions to transform node numbers to node names in the form "node0" and similar.
292 #However we don't need anything more complicated.
293 expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])"
294 #List of infra specific volumes (not a map as above).
295 infra_volumes:
296 type: OS::Heat::Value
297 properties:
298 value:
299 - [{ get_resource: resources_storage }, "/opt/onap"]
300 #Contains node0 specific volume list.
301 node0_volumes:
302 type: OS::Heat::Value
303 properties:
Michal Zegan7c1131e2019-09-05 18:48:18 +0200304 #Note that it returns an empty list if nfs volume is disabled.
Michal Zegan07d99882019-09-05 18:36:47 +0200305 value:
Michal Zegan7c1131e2019-09-05 18:48:18 +0200306 if:
307 - use_volume_for_nfs
308 - - [{ get_resource: nfs_storage }, "/dockerdata-nfs"]
309 - []
Michal Zegan07479cb2019-08-22 14:43:11 +0200310#Output values
311outputs:
312 installer_ip:
313 value: { get_attr: [installer, ip] }
314 description: "Internal ip of installer instance"
315 infra_ip:
316 value: { get_attr: [infra, ip] }
317 description: "Internal ip of infra instance"
318 node_ips:
319 value: { get_attr: [nodes, ip] }
320 description: "Serialized json list of node internal ips starting at node0"
321 volumes:
322 description: "map of volumes per each instance"
323 value:
Michal Zegan07d99882019-09-05 18:36:47 +0200324 #Can do deep merging only with yaql.
Michal Zegan07479cb2019-08-22 14:43:11 +0200325 yaql:
326 data:
Michal Zegan07d99882019-09-05 18:36:47 +0200327 node_volumes: { get_attr: [node_volumes, value]}
328 infra_volumes: { infra: { get_attr: [infra_volumes, value] }}
329 node0_volumes: {node0: { get_attr: [node0_volumes, value] }}
330 expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)"