Make kubernetes nodes to be placed on different computes

Modify the heat template so that kubernetes node
instances are not placed on the same compute nodes, but each on a different node.
This is called anti-affinity.

Change-Id: I16cad7cc2f503130bae80be3820c477890214594
Issue-ID: OOM-2042
Signed-off-by: Mateusz Pilat <m.pilat@partner.samsung.com>
Signed-off-by: Michal Zegan <m.zegan@samsung.com>
diff --git a/tools/cicdansible/heat/installer.yaml b/tools/cicdansible/heat/installer.yaml
index 7dd585d..793386c 100644
--- a/tools/cicdansible/heat/installer.yaml
+++ b/tools/cicdansible/heat/installer.yaml
@@ -171,6 +171,13 @@
           data: { num_nodes: { get_param: num_nodes } }
           #This is number of all nodes + 2 (infra instance and installer)
           expression: "$.data.num_nodes + 2"
+  #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.).
+  anti_affinity_group:
+   type: OS::Nova::ServerGroup
+   properties:
+     name: k8s nodes on separate computes
+     policies:
+      - anti-affinity
   #Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
   nodes:
     type: OS::Heat::ResourceGroup
@@ -187,6 +194,8 @@
           flavor_name: { get_param: node_flavor_name }
           notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
           security_group: { get_resource: secgroup }
+          scheduler_hints:
+            group: { get_resource: anti_affinity_group }
     depends_on: [routercon, instance_wait_handle]
   #Nfs storage volume for first node.
   nfs_storage:
@@ -224,6 +233,7 @@
       image_name: { get_param: image_name }
       notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
       security_group: { get_resource: secgroup }
+      scheduler_hints: {}
     depends_on: [instance_wait_handle]
   #Volume attachment for infra node.
   resources_storage_attachment:
@@ -249,6 +259,7 @@
       subnet: { get_resource: privsubnet }
       notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
       security_group: { get_resource: secgroup }
+      scheduler_hints: {}
     depends_on: instance_wait_handle
   #Floating ip for installer.
   installer_fip_assoc:
diff --git a/tools/cicdansible/heat/instance.yaml b/tools/cicdansible/heat/instance.yaml
index 2734704..5429eb6 100644
--- a/tools/cicdansible/heat/instance.yaml
+++ b/tools/cicdansible/heat/instance.yaml
@@ -18,6 +18,9 @@
     type: string
   security_group:
     type: string
+  scheduler_hints:
+    type: json
+    default: {}
 #Resources.
 resources:
   #This is the network port to attach instance to.
@@ -49,6 +52,7 @@
         - port: { get_resource: port }
       user_data_format: SOFTWARE_CONFIG
       user_data: { get_resource: config }
+      scheduler_hints: { get_param: scheduler_hints }
 outputs:
   OS::stack_id:
     value: { get_resource: instance }
diff --git a/tools/cicdansible/heat/node.yaml b/tools/cicdansible/heat/node.yaml
index 7f6af35..cd628ee 100644
--- a/tools/cicdansible/heat/node.yaml
+++ b/tools/cicdansible/heat/node.yaml
@@ -20,6 +20,8 @@
     type: string
   security_group:
     type: string
+  scheduler_hints:
+    type: json
 resources:
   #Volume for storing /var/lib/docker for node.
   docker_storage:
@@ -42,6 +44,7 @@
       flavor_name: { get_param: flavor_name }
       notify_command: { get_param: notify_command }
       security_group: { get_param: security_group }
+      scheduler_hints: { get_param: scheduler_hints }
   #Attachment of docker volume to node.
   docker_storage_attachment:
     type: OS::Cinder::VolumeAttachment