dfilppi | 9981f55 | 2017-08-07 20:10:53 +0000 | [diff] [blame^] | 1 | ######### |
| 2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved |
| 3 | # |
| 4 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | # you may not use this file except in compliance with the License. |
| 6 | # You may obtain a copy of the License at |
| 7 | # |
| 8 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | # |
| 10 | # Unless required by applicable law or agreed to in writing, software |
| 11 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | # * See the License for the specific language governing permissions and |
| 14 | # * limitations under the License. |
| 15 | |
| 16 | |
| 17 | import os |
| 18 | import time |
| 19 | import copy |
| 20 | import operator |
| 21 | |
| 22 | from novaclient import exceptions as nova_exceptions |
| 23 | |
| 24 | from cloudify import ctx |
| 25 | from cloudify.manager import get_rest_client |
| 26 | from cloudify.decorators import operation |
| 27 | from cloudify.exceptions import NonRecoverableError, RecoverableError |
| 28 | from cinder_plugin import volume |
| 29 | from openstack_plugin_common import ( |
| 30 | provider, |
| 31 | transform_resource_name, |
| 32 | get_resource_id, |
| 33 | get_openstack_ids_of_connected_nodes_by_openstack_type, |
| 34 | with_nova_client, |
| 35 | with_cinder_client, |
| 36 | assign_payload_as_runtime_properties, |
| 37 | get_openstack_id_of_single_connected_node_by_openstack_type, |
| 38 | get_openstack_names_of_connected_nodes_by_openstack_type, |
| 39 | get_single_connected_node_by_openstack_type, |
| 40 | is_external_resource, |
| 41 | is_external_resource_by_properties, |
| 42 | is_external_resource_not_conditionally_created, |
| 43 | is_external_relationship_not_conditionally_created, |
| 44 | use_external_resource, |
| 45 | delete_runtime_properties, |
| 46 | is_external_relationship, |
| 47 | validate_resource, |
| 48 | USE_EXTERNAL_RESOURCE_PROPERTY, |
| 49 | OPENSTACK_AZ_PROPERTY, |
| 50 | OPENSTACK_ID_PROPERTY, |
| 51 | OPENSTACK_TYPE_PROPERTY, |
| 52 | OPENSTACK_NAME_PROPERTY, |
| 53 | COMMON_RUNTIME_PROPERTIES_KEYS, |
| 54 | with_neutron_client) |
| 55 | from nova_plugin.keypair import KEYPAIR_OPENSTACK_TYPE |
| 56 | from nova_plugin import userdata |
| 57 | from openstack_plugin_common.floatingip import (IP_ADDRESS_PROPERTY, |
| 58 | get_server_floating_ip) |
| 59 | from neutron_plugin.network import NETWORK_OPENSTACK_TYPE |
| 60 | from neutron_plugin.port import PORT_OPENSTACK_TYPE |
| 61 | from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE |
| 62 | from openstack_plugin_common.security_group import \ |
| 63 | SECURITY_GROUP_OPENSTACK_TYPE |
| 64 | from glance_plugin.image import handle_image_from_relationship |
| 65 | |
| 66 | SERVER_OPENSTACK_TYPE = 'server' |
| 67 | |
| 68 | # server status constants. Full lists here: http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html # NOQA |
| 69 | SERVER_STATUS_ACTIVE = 'ACTIVE' |
| 70 | SERVER_STATUS_BUILD = 'BUILD' |
| 71 | SERVER_STATUS_SHUTOFF = 'SHUTOFF' |
| 72 | |
| 73 | OS_EXT_STS_TASK_STATE = 'OS-EXT-STS:task_state' |
| 74 | SERVER_TASK_STATE_POWERING_ON = 'powering-on' |
| 75 | |
| 76 | MUST_SPECIFY_NETWORK_EXCEPTION_TEXT = 'More than one possible network found.' |
| 77 | SERVER_DELETE_CHECK_SLEEP = 2 |
| 78 | |
| 79 | # Runtime properties |
| 80 | NETWORKS_PROPERTY = 'networks' # all of the server's ips |
| 81 | IP_PROPERTY = 'ip' # the server's private ip |
| 82 | ADMIN_PASSWORD_PROPERTY = 'password' # the server's password |
| 83 | RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \ |
| 84 | [NETWORKS_PROPERTY, IP_PROPERTY, ADMIN_PASSWORD_PROPERTY] |
| 85 | |
| 86 | |
| 87 | def _get_management_network_id_and_name(neutron_client, ctx): |
| 88 | """Examine the context to find the management network id and name.""" |
| 89 | management_network_id = None |
| 90 | management_network_name = None |
| 91 | provider_context = provider(ctx) |
| 92 | |
| 93 | if ('management_network_name' in ctx.node.properties) and \ |
| 94 | ctx.node.properties['management_network_name']: |
| 95 | management_network_name = \ |
| 96 | ctx.node.properties['management_network_name'] |
| 97 | management_network_name = transform_resource_name( |
| 98 | ctx, management_network_name) |
| 99 | management_network_id = neutron_client.cosmo_get_named( |
| 100 | 'network', management_network_name) |
| 101 | management_network_id = management_network_id['id'] |
| 102 | else: |
| 103 | int_network = provider_context.int_network |
| 104 | if int_network: |
| 105 | management_network_id = int_network['id'] |
| 106 | management_network_name = int_network['name'] # Already transform. |
| 107 | |
| 108 | return management_network_id, management_network_name |
| 109 | |
| 110 | |
| 111 | def _merge_nics(management_network_id, *nics_sources): |
| 112 | """Merge nics_sources into a single nics list, insert mgmt network if |
| 113 | needed. |
| 114 | nics_sources are lists of networks received from several sources |
| 115 | (server properties, relationships to networks, relationships to ports). |
| 116 | Merge them into a single list, and if the management network isn't present |
| 117 | there, prepend it as the first network. |
| 118 | """ |
| 119 | merged = [] |
| 120 | for nics in nics_sources: |
| 121 | merged.extend(nics) |
| 122 | if management_network_id is not None and \ |
| 123 | not any(nic['net-id'] == management_network_id for nic in merged): |
| 124 | merged.insert(0, {'net-id': management_network_id}) |
| 125 | return merged |
| 126 | |
| 127 | |
| 128 | def _normalize_nics(nics): |
| 129 | """Transform the NICs passed to the form expected by openstack. |
| 130 | |
| 131 | If both net-id and port-id are provided, remove net-id: it is ignored |
| 132 | by openstack anyway. |
| 133 | """ |
| 134 | def _normalize(nic): |
| 135 | if 'port-id' in nic and 'net-id' in nic: |
| 136 | nic = nic.copy() |
| 137 | del nic['net-id'] |
| 138 | return nic |
| 139 | return [_normalize(nic) for nic in nics] |
| 140 | |
| 141 | |
| 142 | def _prepare_server_nics(neutron_client, ctx, server): |
| 143 | """Update server['nics'] based on declared relationships. |
| 144 | |
| 145 | server['nics'] should contain the pre-declared nics, then the networks |
| 146 | that the server has a declared relationship to, then the networks |
| 147 | of the ports the server has a relationship to. |
| 148 | |
| 149 | If that doesn't include the management network, it should be prepended |
| 150 | as the first network. |
| 151 | |
| 152 | The management network id and name are stored in the server meta properties |
| 153 | """ |
| 154 | network_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( |
| 155 | ctx, NETWORK_OPENSTACK_TYPE) |
| 156 | port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( |
| 157 | ctx, PORT_OPENSTACK_TYPE) |
| 158 | management_network_id, management_network_name = \ |
| 159 | _get_management_network_id_and_name(neutron_client, ctx) |
| 160 | if management_network_id is None and (network_ids or port_ids): |
| 161 | # Known limitation |
| 162 | raise NonRecoverableError( |
| 163 | "Nova server with NICs requires " |
| 164 | "'management_network_name' in properties or id " |
| 165 | "from provider context, which was not supplied") |
| 166 | |
| 167 | nics = _merge_nics( |
| 168 | management_network_id, |
| 169 | server.get('nics', []), |
| 170 | [{'net-id': net_id} for net_id in network_ids], |
| 171 | get_port_networks(neutron_client, port_ids)) |
| 172 | |
| 173 | nics = _normalize_nics(nics) |
| 174 | |
| 175 | server['nics'] = nics |
| 176 | if management_network_id is not None: |
| 177 | server['meta']['cloudify_management_network_id'] = \ |
| 178 | management_network_id |
| 179 | if management_network_name is not None: |
| 180 | server['meta']['cloudify_management_network_name'] = \ |
| 181 | management_network_name |
| 182 | |
| 183 | |
| 184 | def _get_boot_volume_relationships(type_name, ctx): |
| 185 | ctx.logger.debug('Instance relationship target instances: {0}'.format(str([ |
| 186 | rel.target.instance.runtime_properties |
| 187 | for rel in ctx.instance.relationships]))) |
| 188 | targets = [ |
| 189 | rel.target.instance |
| 190 | for rel in ctx.instance.relationships |
| 191 | if rel.target.instance.runtime_properties.get( |
| 192 | OPENSTACK_TYPE_PROPERTY) == type_name and |
| 193 | rel.target.node.properties.get('boot', False)] |
| 194 | |
| 195 | if not targets: |
| 196 | return None |
| 197 | elif len(targets) > 1: |
| 198 | raise NonRecoverableError("2 boot volumes not supported") |
| 199 | return targets[0] |
| 200 | |
| 201 | |
| 202 | def _handle_boot_volume(server, ctx): |
| 203 | boot_volume = _get_boot_volume_relationships(VOLUME_OPENSTACK_TYPE, ctx) |
| 204 | if boot_volume: |
| 205 | boot_volume_id = boot_volume.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 206 | ctx.logger.info('boot_volume_id: {0}'.format(boot_volume_id)) |
| 207 | az = boot_volume.runtime_properties[OPENSTACK_AZ_PROPERTY] |
| 208 | # If a block device mapping already exists we shouldn't overwrite it |
| 209 | # completely |
| 210 | bdm = server.setdefault('block_device_mapping', {}) |
| 211 | bdm['vda'] = '{0}:::0'.format(boot_volume_id) |
| 212 | # Some nova configurations allow cross-az server-volume connections, so |
| 213 | # we can't treat that as an error. |
| 214 | if not server.get('availability_zone'): |
| 215 | server['availability_zone'] = az |
| 216 | |
| 217 | |
| 218 | @operation |
| 219 | @with_nova_client |
| 220 | @with_neutron_client |
| 221 | def create(nova_client, neutron_client, args, **kwargs): |
| 222 | """ |
| 223 | Creates a server. Exposes the parameters mentioned in |
| 224 | http://docs.openstack.org/developer/python-novaclient/api/novaclient.v1_1 |
| 225 | .servers.html#novaclient.v1_1.servers.ServerManager.create |
| 226 | """ |
| 227 | |
| 228 | external_server = use_external_resource(ctx, nova_client, |
| 229 | SERVER_OPENSTACK_TYPE) |
| 230 | |
| 231 | if external_server: |
| 232 | _set_network_and_ip_runtime_properties(external_server) |
| 233 | if ctx._local: |
| 234 | return |
| 235 | else: |
| 236 | network_ids = \ |
| 237 | get_openstack_ids_of_connected_nodes_by_openstack_type( |
| 238 | ctx, NETWORK_OPENSTACK_TYPE) |
| 239 | port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( |
| 240 | ctx, PORT_OPENSTACK_TYPE) |
| 241 | try: |
| 242 | _validate_external_server_nics( |
| 243 | neutron_client, |
| 244 | network_ids, |
| 245 | port_ids |
| 246 | ) |
| 247 | _validate_external_server_keypair(nova_client) |
| 248 | return |
| 249 | except Exception: |
| 250 | delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) |
| 251 | raise |
| 252 | |
| 253 | provider_context = provider(ctx) |
| 254 | |
| 255 | def rename(name): |
| 256 | return transform_resource_name(ctx, name) |
| 257 | |
| 258 | server = { |
| 259 | 'name': get_resource_id(ctx, SERVER_OPENSTACK_TYPE), |
| 260 | } |
| 261 | server.update(copy.deepcopy(ctx.node.properties['server'])) |
| 262 | server.update(copy.deepcopy(args)) |
| 263 | |
| 264 | _handle_boot_volume(server, ctx) |
| 265 | handle_image_from_relationship(server, 'image', ctx) |
| 266 | |
| 267 | if 'meta' not in server: |
| 268 | server['meta'] = dict() |
| 269 | |
| 270 | transform_resource_name(ctx, server) |
| 271 | |
| 272 | ctx.logger.debug( |
| 273 | "server.create() server before transformations: {0}".format(server)) |
| 274 | |
| 275 | for key in 'block_device_mapping', 'block_device_mapping_v2': |
| 276 | if key in server: |
| 277 | # if there is a connected boot volume, don't require the `image` |
| 278 | # property. |
| 279 | # However, python-novaclient requires an `image` input anyway, and |
| 280 | # checks it for truthiness when deciding whether to pass it along |
| 281 | # to the API |
| 282 | if 'image' not in server: |
| 283 | server['image'] = ctx.node.properties.get('image') |
| 284 | break |
| 285 | else: |
| 286 | _handle_image_or_flavor(server, nova_client, 'image') |
| 287 | _handle_image_or_flavor(server, nova_client, 'flavor') |
| 288 | |
| 289 | if provider_context.agents_security_group: |
| 290 | security_groups = server.get('security_groups', []) |
| 291 | asg = provider_context.agents_security_group['name'] |
| 292 | if asg not in security_groups: |
| 293 | security_groups.append(asg) |
| 294 | server['security_groups'] = security_groups |
| 295 | elif not server.get('security_groups', []): |
| 296 | # Make sure that if the server is connected to a security group |
| 297 | # from CREATE time so that there the user can control |
| 298 | # that there is never a time that a running server is not protected. |
| 299 | security_group_names = \ |
| 300 | get_openstack_names_of_connected_nodes_by_openstack_type( |
| 301 | ctx, |
| 302 | SECURITY_GROUP_OPENSTACK_TYPE) |
| 303 | server['security_groups'] = security_group_names |
| 304 | |
| 305 | # server keypair handling |
| 306 | keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type( |
| 307 | ctx, KEYPAIR_OPENSTACK_TYPE, True) |
| 308 | |
| 309 | if 'key_name' in server: |
| 310 | if keypair_id: |
| 311 | raise NonRecoverableError("server can't both have the " |
| 312 | '"key_name" nested property and be ' |
| 313 | 'connected to a keypair via a ' |
| 314 | 'relationship at the same time') |
| 315 | server['key_name'] = rename(server['key_name']) |
| 316 | elif keypair_id: |
| 317 | server['key_name'] = _get_keypair_name_by_id(nova_client, keypair_id) |
| 318 | elif provider_context.agents_keypair: |
| 319 | server['key_name'] = provider_context.agents_keypair['name'] |
| 320 | else: |
| 321 | server['key_name'] = None |
| 322 | ctx.logger.info( |
| 323 | 'server must have a keypair, yet no keypair was connected to the ' |
| 324 | 'server node, the "key_name" nested property ' |
| 325 | "wasn't used, and there is no agent keypair in the provider " |
| 326 | "context. Agent installation can have issues.") |
| 327 | |
| 328 | _fail_on_missing_required_parameters( |
| 329 | server, |
| 330 | ('name', 'flavor'), |
| 331 | 'server') |
| 332 | |
| 333 | _prepare_server_nics(neutron_client, ctx, server) |
| 334 | |
| 335 | ctx.logger.debug( |
| 336 | "server.create() server after transformations: {0}".format(server)) |
| 337 | |
| 338 | userdata.handle_userdata(server) |
| 339 | |
| 340 | ctx.logger.info("Creating VM with parameters: {0}".format(str(server))) |
| 341 | # Store the server dictionary contents in runtime properties |
| 342 | assign_payload_as_runtime_properties(ctx, SERVER_OPENSTACK_TYPE, server) |
| 343 | ctx.logger.debug( |
| 344 | "Asking Nova to create server. All possible parameters are: {0})" |
| 345 | .format(','.join(server.keys()))) |
| 346 | |
| 347 | try: |
| 348 | s = nova_client.servers.create(**server) |
| 349 | except nova_exceptions.BadRequest as e: |
| 350 | if 'Block Device Mapping is Invalid' in str(e): |
| 351 | return ctx.operation.retry( |
| 352 | message='Block Device Mapping is not created yet', |
| 353 | retry_after=30) |
| 354 | if str(e).startswith(MUST_SPECIFY_NETWORK_EXCEPTION_TEXT): |
| 355 | raise NonRecoverableError( |
| 356 | "Can not provision server: management_network_name or id" |
| 357 | " is not specified but there are several networks that the " |
| 358 | "server can be connected to.") |
| 359 | raise |
| 360 | ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s.id |
| 361 | ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ |
| 362 | SERVER_OPENSTACK_TYPE |
| 363 | ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = server['name'] |
| 364 | |
| 365 | |
| 366 | def get_port_networks(neutron_client, port_ids): |
| 367 | |
| 368 | def get_network(port_id): |
| 369 | port = neutron_client.show_port(port_id) |
| 370 | return { |
| 371 | 'net-id': port['port']['network_id'], |
| 372 | 'port-id': port['port']['id'] |
| 373 | } |
| 374 | |
| 375 | return map(get_network, port_ids) |
| 376 | |
| 377 | |
| 378 | @operation |
| 379 | @with_nova_client |
| 380 | def start(nova_client, start_retry_interval, private_key_path, **kwargs): |
| 381 | server = get_server_by_context(nova_client) |
| 382 | |
| 383 | if is_external_resource_not_conditionally_created(ctx): |
| 384 | ctx.logger.info('Validating external server is started') |
| 385 | if server.status != SERVER_STATUS_ACTIVE: |
| 386 | raise NonRecoverableError( |
| 387 | 'Expected external resource server {0} to be in ' |
| 388 | '"{1}" status'.format(server.id, SERVER_STATUS_ACTIVE)) |
| 389 | return |
| 390 | |
| 391 | if server.status == SERVER_STATUS_ACTIVE: |
| 392 | ctx.logger.info('Server is {0}'.format(server.status)) |
| 393 | |
| 394 | if ctx.node.properties['use_password']: |
| 395 | private_key = _get_private_key(private_key_path) |
| 396 | ctx.logger.debug('retrieving password for server') |
| 397 | password = server.get_password(private_key) |
| 398 | |
| 399 | if not password: |
| 400 | return ctx.operation.retry( |
| 401 | message='Waiting for server to post generated password', |
| 402 | retry_after=start_retry_interval) |
| 403 | |
| 404 | ctx.instance.runtime_properties[ADMIN_PASSWORD_PROPERTY] = password |
| 405 | ctx.logger.info('Server has been set with a password') |
| 406 | |
| 407 | _set_network_and_ip_runtime_properties(server) |
| 408 | return |
| 409 | |
| 410 | server_task_state = getattr(server, OS_EXT_STS_TASK_STATE) |
| 411 | |
| 412 | if server.status == SERVER_STATUS_SHUTOFF and \ |
| 413 | server_task_state != SERVER_TASK_STATE_POWERING_ON: |
| 414 | ctx.logger.info('Server is in {0} status - starting server...'.format( |
| 415 | SERVER_STATUS_SHUTOFF)) |
| 416 | server.start() |
| 417 | server_task_state = SERVER_TASK_STATE_POWERING_ON |
| 418 | |
| 419 | if server.status == SERVER_STATUS_BUILD or \ |
| 420 | server_task_state == SERVER_TASK_STATE_POWERING_ON: |
| 421 | return ctx.operation.retry( |
| 422 | message='Waiting for server to be in {0} state but is in {1}:{2} ' |
| 423 | 'state. Retrying...'.format(SERVER_STATUS_ACTIVE, |
| 424 | server.status, |
| 425 | server_task_state), |
| 426 | retry_after=start_retry_interval) |
| 427 | |
| 428 | raise NonRecoverableError( |
| 429 | 'Unexpected server state {0}:{1}'.format(server.status, |
| 430 | server_task_state)) |
| 431 | |
| 432 | |
| 433 | @operation |
| 434 | @with_nova_client |
| 435 | def stop(nova_client, **kwargs): |
| 436 | """ |
| 437 | Stop server. |
| 438 | |
| 439 | Depends on OpenStack implementation, server.stop() might not be supported. |
| 440 | """ |
| 441 | if is_external_resource(ctx): |
| 442 | ctx.logger.info('Not stopping server since an external server is ' |
| 443 | 'being used') |
| 444 | return |
| 445 | |
| 446 | server = get_server_by_context(nova_client) |
| 447 | |
| 448 | if server.status != SERVER_STATUS_SHUTOFF: |
| 449 | nova_client.servers.stop(server) |
| 450 | else: |
| 451 | ctx.logger.info('Server is already stopped') |
| 452 | |
| 453 | |
| 454 | @operation |
| 455 | @with_nova_client |
| 456 | def delete(nova_client, **kwargs): |
| 457 | if not is_external_resource(ctx): |
| 458 | ctx.logger.info('deleting server') |
| 459 | server = get_server_by_context(nova_client) |
| 460 | nova_client.servers.delete(server) |
| 461 | _wait_for_server_to_be_deleted(nova_client, server) |
| 462 | else: |
| 463 | ctx.logger.info('not deleting server since an external server is ' |
| 464 | 'being used') |
| 465 | |
| 466 | delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) |
| 467 | |
| 468 | |
| 469 | def _wait_for_server_to_be_deleted(nova_client, |
| 470 | server, |
| 471 | timeout=120, |
| 472 | sleep_interval=5): |
| 473 | timeout = time.time() + timeout |
| 474 | while time.time() < timeout: |
| 475 | try: |
| 476 | server = nova_client.servers.get(server) |
| 477 | ctx.logger.debug('Waiting for server "{}" to be deleted. current' |
| 478 | ' status: {}'.format(server.id, server.status)) |
| 479 | time.sleep(sleep_interval) |
| 480 | except nova_exceptions.NotFound: |
| 481 | return |
| 482 | # recoverable error |
| 483 | raise RuntimeError('Server {} has not been deleted. waited for {} seconds' |
| 484 | .format(server.id, timeout)) |
| 485 | |
| 486 | |
| 487 | def get_server_by_context(nova_client): |
| 488 | return nova_client.servers.get( |
| 489 | ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) |
| 490 | |
| 491 | |
| 492 | def _set_network_and_ip_runtime_properties(server): |
| 493 | |
| 494 | ips = {} |
| 495 | |
| 496 | if not server.networks: |
| 497 | raise NonRecoverableError( |
| 498 | 'The server was created but not attached to a network. ' |
| 499 | 'Cloudify requires that a server is connected to ' |
| 500 | 'at least one port.' |
| 501 | ) |
| 502 | |
| 503 | manager_network_ip = None |
| 504 | management_network_name = server.metadata.get( |
| 505 | 'cloudify_management_network_name') |
| 506 | |
| 507 | for network, network_ips in server.networks.items(): |
| 508 | if (management_network_name and |
| 509 | network == management_network_name) or not \ |
| 510 | manager_network_ip: |
| 511 | manager_network_ip = next(iter(network_ips or []), None) |
| 512 | ips[network] = network_ips |
| 513 | ctx.instance.runtime_properties[NETWORKS_PROPERTY] = ips |
| 514 | # The ip of this instance in the management network |
| 515 | ctx.instance.runtime_properties[IP_PROPERTY] = manager_network_ip |
| 516 | |
| 517 | |
| 518 | @operation |
| 519 | @with_nova_client |
| 520 | def connect_floatingip(nova_client, fixed_ip, **kwargs): |
| 521 | server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 522 | floating_ip_id = ctx.target.instance.runtime_properties[ |
| 523 | OPENSTACK_ID_PROPERTY] |
| 524 | |
| 525 | if is_external_relationship_not_conditionally_created(ctx): |
| 526 | ctx.logger.info('Validating external floatingip and server ' |
| 527 | 'are associated') |
| 528 | if nova_client.floating_ips.get(floating_ip_id).instance_id ==\ |
| 529 | server_id: |
| 530 | return |
| 531 | raise NonRecoverableError( |
| 532 | 'Expected external resources server {0} and floating-ip {1} to be ' |
| 533 | 'connected'.format(server_id, floating_ip_id)) |
| 534 | |
| 535 | floating_ip_address = ctx.target.instance.runtime_properties[ |
| 536 | IP_ADDRESS_PROPERTY] |
| 537 | server = nova_client.servers.get(server_id) |
| 538 | server.add_floating_ip(floating_ip_address, fixed_ip or None) |
| 539 | |
| 540 | server = nova_client.servers.get(server_id) |
| 541 | all_server_ips = reduce(operator.add, server.networks.values()) |
| 542 | if floating_ip_address not in all_server_ips: |
| 543 | return ctx.operation.retry(message='Failed to assign floating ip {0}' |
| 544 | ' to machine {1}.' |
| 545 | .format(floating_ip_address, server_id)) |
| 546 | |
| 547 | |
| 548 | @operation |
| 549 | @with_nova_client |
| 550 | @with_neutron_client |
| 551 | def disconnect_floatingip(nova_client, neutron_client, **kwargs): |
| 552 | if is_external_relationship(ctx): |
| 553 | ctx.logger.info('Not disassociating floatingip and server since ' |
| 554 | 'external floatingip and server are being used') |
| 555 | return |
| 556 | |
| 557 | server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 558 | ctx.logger.info("Remove floating ip {0}".format( |
| 559 | ctx.target.instance.runtime_properties[IP_ADDRESS_PROPERTY])) |
| 560 | server_floating_ip = get_server_floating_ip(neutron_client, server_id) |
| 561 | if server_floating_ip: |
| 562 | server = nova_client.servers.get(server_id) |
| 563 | server.remove_floating_ip(server_floating_ip['floating_ip_address']) |
| 564 | ctx.logger.info("Floating ip {0} detached from server" |
| 565 | .format(server_floating_ip['floating_ip_address'])) |
| 566 | |
| 567 | |
| 568 | @operation |
| 569 | @with_nova_client |
| 570 | def connect_security_group(nova_client, **kwargs): |
| 571 | server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 572 | security_group_id = ctx.target.instance.runtime_properties[ |
| 573 | OPENSTACK_ID_PROPERTY] |
| 574 | security_group_name = ctx.target.instance.runtime_properties[ |
| 575 | OPENSTACK_NAME_PROPERTY] |
| 576 | |
| 577 | if is_external_relationship_not_conditionally_created(ctx): |
| 578 | ctx.logger.info('Validating external security group and server ' |
| 579 | 'are associated') |
| 580 | server = nova_client.servers.get(server_id) |
| 581 | if [sg for sg in server.list_security_group() if sg.id == |
| 582 | security_group_id]: |
| 583 | return |
| 584 | raise NonRecoverableError( |
| 585 | 'Expected external resources server {0} and security-group {1} to ' |
| 586 | 'be connected'.format(server_id, security_group_id)) |
| 587 | |
| 588 | server = nova_client.servers.get(server_id) |
| 589 | for security_group in server.list_security_group(): |
| 590 | # Since some security groups are already attached in |
| 591 | # create this will ensure that they are not attached twice. |
| 592 | if security_group_id != security_group.id and \ |
| 593 | security_group_name != security_group.name: |
| 594 | # to support nova security groups as well, |
| 595 | # we connect the security group by name |
| 596 | # (as connecting by id |
| 597 | # doesn't seem to work well for nova SGs) |
| 598 | server.add_security_group(security_group_name) |
| 599 | |
| 600 | _validate_security_group_and_server_connection_status(nova_client, |
| 601 | server_id, |
| 602 | security_group_id, |
| 603 | security_group_name, |
| 604 | is_connected=True) |
| 605 | |
| 606 | |
| 607 | @operation |
| 608 | @with_nova_client |
| 609 | def disconnect_security_group(nova_client, **kwargs): |
| 610 | if is_external_relationship(ctx): |
| 611 | ctx.logger.info('Not disconnecting security group and server since ' |
| 612 | 'external security group and server are being used') |
| 613 | return |
| 614 | |
| 615 | server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 616 | security_group_id = ctx.target.instance.runtime_properties[ |
| 617 | OPENSTACK_ID_PROPERTY] |
| 618 | security_group_name = ctx.target.instance.runtime_properties[ |
| 619 | OPENSTACK_NAME_PROPERTY] |
| 620 | server = nova_client.servers.get(server_id) |
| 621 | # to support nova security groups as well, we disconnect the security group |
| 622 | # by name (as disconnecting by id doesn't seem to work well for nova SGs) |
| 623 | server.remove_security_group(security_group_name) |
| 624 | |
| 625 | _validate_security_group_and_server_connection_status(nova_client, |
| 626 | server_id, |
| 627 | security_group_id, |
| 628 | security_group_name, |
| 629 | is_connected=False) |
| 630 | |
| 631 | |
| 632 | @operation |
| 633 | @with_nova_client |
| 634 | @with_cinder_client |
| 635 | def attach_volume(nova_client, cinder_client, status_attempts, |
| 636 | status_timeout, **kwargs): |
| 637 | server_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 638 | volume_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 639 | |
| 640 | if is_external_relationship_not_conditionally_created(ctx): |
| 641 | ctx.logger.info('Validating external volume and server ' |
| 642 | 'are connected') |
| 643 | attachment = volume.get_attachment(cinder_client=cinder_client, |
| 644 | volume_id=volume_id, |
| 645 | server_id=server_id) |
| 646 | if attachment: |
| 647 | return |
| 648 | else: |
| 649 | raise NonRecoverableError( |
| 650 | 'Expected external resources server {0} and volume {1} to be ' |
| 651 | 'connected'.format(server_id, volume_id)) |
| 652 | |
| 653 | # Note: The 'device_name' property should actually be a property of the |
| 654 | # relationship between a server and a volume; It'll move to that |
| 655 | # relationship type once relationship properties are better supported. |
| 656 | device = ctx.source.node.properties[volume.DEVICE_NAME_PROPERTY] |
| 657 | nova_client.volumes.create_server_volume( |
| 658 | server_id, |
| 659 | volume_id, |
| 660 | device if device != 'auto' else None) |
| 661 | try: |
| 662 | vol, wait_succeeded = volume.wait_until_status( |
| 663 | cinder_client=cinder_client, |
| 664 | volume_id=volume_id, |
| 665 | status=volume.VOLUME_STATUS_IN_USE, |
| 666 | num_tries=status_attempts, |
| 667 | timeout=status_timeout |
| 668 | ) |
| 669 | if not wait_succeeded: |
| 670 | raise RecoverableError( |
| 671 | 'Waiting for volume status {0} failed - detaching volume and ' |
| 672 | 'retrying..'.format(volume.VOLUME_STATUS_IN_USE)) |
| 673 | if device == 'auto': |
| 674 | # The device name was assigned automatically so we |
| 675 | # query the actual device name |
| 676 | attachment = volume.get_attachment( |
| 677 | cinder_client=cinder_client, |
| 678 | volume_id=volume_id, |
| 679 | server_id=server_id |
| 680 | ) |
| 681 | device_name = attachment['device'] |
| 682 | ctx.logger.info('Detected device name for attachment of volume ' |
| 683 | '{0} to server {1}: {2}' |
| 684 | .format(volume_id, server_id, device_name)) |
| 685 | ctx.source.instance.runtime_properties[ |
| 686 | volume.DEVICE_NAME_PROPERTY] = device_name |
| 687 | except Exception, e: |
| 688 | if not isinstance(e, NonRecoverableError): |
| 689 | _prepare_attach_volume_to_be_repeated( |
| 690 | nova_client, cinder_client, server_id, volume_id, |
| 691 | status_attempts, status_timeout) |
| 692 | raise |
| 693 | |
| 694 | |
| 695 | def _prepare_attach_volume_to_be_repeated( |
| 696 | nova_client, cinder_client, server_id, volume_id, |
| 697 | status_attempts, status_timeout): |
| 698 | |
| 699 | ctx.logger.info('Cleaning after a failed attach_volume() call') |
| 700 | try: |
| 701 | _detach_volume(nova_client, cinder_client, server_id, volume_id, |
| 702 | status_attempts, status_timeout) |
| 703 | except Exception, e: |
| 704 | ctx.logger.error('Cleaning after a failed attach_volume() call failed ' |
| 705 | 'raising a \'{0}\' exception.'.format(e)) |
| 706 | raise NonRecoverableError(e) |
| 707 | |
| 708 | |
| 709 | def _detach_volume(nova_client, cinder_client, server_id, volume_id, |
| 710 | status_attempts, status_timeout): |
| 711 | attachment = volume.get_attachment(cinder_client=cinder_client, |
| 712 | volume_id=volume_id, |
| 713 | server_id=server_id) |
| 714 | if attachment: |
| 715 | nova_client.volumes.delete_server_volume(server_id, attachment['id']) |
| 716 | volume.wait_until_status(cinder_client=cinder_client, |
| 717 | volume_id=volume_id, |
| 718 | status=volume.VOLUME_STATUS_AVAILABLE, |
| 719 | num_tries=status_attempts, |
| 720 | timeout=status_timeout) |
| 721 | |
| 722 | |
| 723 | @operation |
| 724 | @with_nova_client |
| 725 | @with_cinder_client |
| 726 | def detach_volume(nova_client, cinder_client, status_attempts, |
| 727 | status_timeout, **kwargs): |
| 728 | if is_external_relationship(ctx): |
| 729 | ctx.logger.info('Not detaching volume from server since ' |
| 730 | 'external volume and server are being used') |
| 731 | return |
| 732 | |
| 733 | server_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 734 | volume_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 735 | |
| 736 | _detach_volume(nova_client, cinder_client, server_id, volume_id, |
| 737 | status_attempts, status_timeout) |
| 738 | |
| 739 | |
| 740 | def _fail_on_missing_required_parameters(obj, required_parameters, hint_where): |
| 741 | for k in required_parameters: |
| 742 | if k not in obj: |
| 743 | raise NonRecoverableError( |
| 744 | "Required parameter '{0}' is missing (under host's " |
| 745 | "properties.{1}). Required parameters are: {2}" |
| 746 | .format(k, hint_where, required_parameters)) |
| 747 | |
| 748 | |
| 749 | def _validate_external_server_keypair(nova_client): |
| 750 | keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type( |
| 751 | ctx, KEYPAIR_OPENSTACK_TYPE, True) |
| 752 | if not keypair_id: |
| 753 | return |
| 754 | |
| 755 | keypair_instance_id = \ |
| 756 | [node_instance_id for node_instance_id, runtime_props in |
| 757 | ctx.capabilities.get_all().iteritems() if |
| 758 | runtime_props.get(OPENSTACK_ID_PROPERTY) == keypair_id][0] |
| 759 | keypair_node_properties = _get_properties_by_node_instance_id( |
| 760 | keypair_instance_id) |
| 761 | if not is_external_resource_by_properties(keypair_node_properties): |
| 762 | raise NonRecoverableError( |
| 763 | "Can't connect a new keypair node to a server node " |
| 764 | "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY)) |
| 765 | |
| 766 | server = get_server_by_context(nova_client) |
| 767 | if keypair_id == _get_keypair_name_by_id(nova_client, server.key_name): |
| 768 | return |
| 769 | raise NonRecoverableError( |
| 770 | "Expected external resources server {0} and keypair {1} to be " |
| 771 | "connected".format(server.id, keypair_id)) |
| 772 | |
| 773 | |
| 774 | def _get_keypair_name_by_id(nova_client, key_name): |
| 775 | keypair = nova_client.cosmo_get_named(KEYPAIR_OPENSTACK_TYPE, key_name) |
| 776 | return keypair.id |
| 777 | |
| 778 | |
| 779 | def _validate_external_server_nics(neutron_client, network_ids, port_ids): |
| 780 | # validate no new nics are being assigned to an existing server (which |
| 781 | # isn't possible on Openstack) |
| 782 | new_nic_nodes = \ |
| 783 | [node_instance_id for node_instance_id, runtime_props in |
| 784 | ctx.capabilities.get_all().iteritems() if runtime_props.get( |
| 785 | OPENSTACK_TYPE_PROPERTY) in (PORT_OPENSTACK_TYPE, |
| 786 | NETWORK_OPENSTACK_TYPE) and |
| 787 | not is_external_resource_by_properties( |
| 788 | _get_properties_by_node_instance_id(node_instance_id))] |
| 789 | if new_nic_nodes: |
| 790 | raise NonRecoverableError( |
| 791 | "Can't connect new port and/or network nodes to a server node " |
| 792 | "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY)) |
| 793 | |
| 794 | # validate all expected connected networks and ports are indeed already |
| 795 | # connected to the server. note that additional networks (e.g. the |
| 796 | # management network) may be connected as well with no error raised |
| 797 | if not network_ids and not port_ids: |
| 798 | return |
| 799 | |
| 800 | server_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] |
| 801 | connected_ports = neutron_client.list_ports(device_id=server_id)['ports'] |
| 802 | |
| 803 | # not counting networks connected by a connected port since allegedly |
| 804 | # the connection should be on a separate port |
| 805 | connected_ports_networks = {port['network_id'] for port in |
| 806 | connected_ports if port['id'] not in port_ids} |
| 807 | connected_ports_ids = {port['id'] for port in |
| 808 | connected_ports} |
| 809 | disconnected_networks = [network_id for network_id in network_ids if |
| 810 | network_id not in connected_ports_networks] |
| 811 | disconnected_ports = [port_id for port_id in port_ids if port_id not |
| 812 | in connected_ports_ids] |
| 813 | if disconnected_networks or disconnected_ports: |
| 814 | raise NonRecoverableError( |
| 815 | 'Expected external resources to be connected to external server {' |
| 816 | '0}: Networks - {1}; Ports - {2}'.format(server_id, |
| 817 | disconnected_networks, |
| 818 | disconnected_ports)) |
| 819 | |
| 820 | |
| 821 | def _get_properties_by_node_instance_id(node_instance_id): |
| 822 | client = get_rest_client() |
| 823 | node_instance = client.node_instances.get(node_instance_id) |
| 824 | node = client.nodes.get(ctx.deployment.id, node_instance.node_id) |
| 825 | return node.properties |
| 826 | |
| 827 | |
| 828 | @operation |
| 829 | @with_nova_client |
| 830 | def creation_validation(nova_client, args, **kwargs): |
| 831 | |
| 832 | def validate_server_property_value_exists(server_props, property_name): |
| 833 | ctx.logger.debug( |
| 834 | 'checking whether {0} exists...'.format(property_name)) |
| 835 | |
| 836 | serv_props_copy = server_props.copy() |
| 837 | try: |
| 838 | handle_image_from_relationship(serv_props_copy, 'image', ctx) |
| 839 | _handle_image_or_flavor(serv_props_copy, nova_client, |
| 840 | property_name) |
| 841 | except (NonRecoverableError, nova_exceptions.NotFound) as e: |
| 842 | # temporary error - once image/flavor_name get removed, these |
| 843 | # errors won't be relevant anymore |
| 844 | err = str(e) |
| 845 | ctx.logger.error('VALIDATION ERROR: ' + err) |
| 846 | raise NonRecoverableError(err) |
| 847 | |
| 848 | prop_value_id = str(serv_props_copy[property_name]) |
| 849 | prop_values = list(nova_client.cosmo_list(property_name)) |
| 850 | for f in prop_values: |
| 851 | if prop_value_id == f.id: |
| 852 | ctx.logger.debug('OK: {0} exists'.format(property_name)) |
| 853 | return |
| 854 | err = '{0} {1} does not exist'.format(property_name, prop_value_id) |
| 855 | ctx.logger.error('VALIDATION ERROR: ' + err) |
| 856 | if prop_values: |
| 857 | ctx.logger.info('list of available {0}s:'.format(property_name)) |
| 858 | for f in prop_values: |
| 859 | ctx.logger.info(' {0:>10} - {1}'.format(f.id, f.name)) |
| 860 | else: |
| 861 | ctx.logger.info('there are no available {0}s'.format( |
| 862 | property_name)) |
| 863 | raise NonRecoverableError(err) |
| 864 | |
| 865 | validate_resource(ctx, nova_client, SERVER_OPENSTACK_TYPE) |
| 866 | |
| 867 | server_props = dict(ctx.node.properties['server'], **args) |
| 868 | validate_server_property_value_exists(server_props, 'flavor') |
| 869 | |
| 870 | |
| 871 | def _get_private_key(private_key_path): |
| 872 | pk_node_by_rel = \ |
| 873 | get_single_connected_node_by_openstack_type( |
| 874 | ctx, KEYPAIR_OPENSTACK_TYPE, True) |
| 875 | |
| 876 | if private_key_path: |
| 877 | if pk_node_by_rel: |
| 878 | raise NonRecoverableError("server can't both have a " |
| 879 | '"private_key_path" input and be ' |
| 880 | 'connected to a keypair via a ' |
| 881 | 'relationship at the same time') |
| 882 | key_path = private_key_path |
| 883 | else: |
| 884 | if pk_node_by_rel and pk_node_by_rel.properties['private_key_path']: |
| 885 | key_path = pk_node_by_rel.properties['private_key_path'] |
| 886 | else: |
| 887 | key_path = ctx.bootstrap_context.cloudify_agent.agent_key_path |
| 888 | |
| 889 | if key_path: |
| 890 | key_path = os.path.expanduser(key_path) |
| 891 | if os.path.isfile(key_path): |
| 892 | return key_path |
| 893 | |
| 894 | err_message = 'Cannot find private key file' |
| 895 | if key_path: |
| 896 | err_message += '; expected file path was {0}'.format(key_path) |
| 897 | raise NonRecoverableError(err_message) |
| 898 | |
| 899 | |
| 900 | def _validate_security_group_and_server_connection_status( |
| 901 | nova_client, server_id, sg_id, sg_name, is_connected): |
| 902 | |
| 903 | # verifying the security group got connected or disconnected |
| 904 | # successfully - this is due to Openstack concurrency issues that may |
| 905 | # take place when attempting to connect/disconnect multiple SGs to the |
| 906 | # same server at the same time |
| 907 | server = nova_client.servers.get(server_id) |
| 908 | |
| 909 | if is_connected ^ any(sg for sg in server.list_security_group() if |
| 910 | sg.id == sg_id): |
| 911 | raise RecoverableError( |
| 912 | message='Security group {0} did not get {2} server {1} ' |
| 913 | 'properly' |
| 914 | .format( |
| 915 | sg_name, |
| 916 | server.name, |
| 917 | 'connected to' if is_connected else 'disconnected from')) |
| 918 | |
| 919 | |
| 920 | def _handle_image_or_flavor(server, nova_client, prop_name): |
| 921 | if prop_name not in server and '{0}_name'.format(prop_name) not in server: |
| 922 | # setting image or flavor - looking it up by name; if not found, then |
| 923 | # the value is assumed to be the id |
| 924 | server[prop_name] = ctx.node.properties[prop_name] |
| 925 | |
| 926 | # temporary error message: once the 'image' and 'flavor' properties |
| 927 | # become mandatory, this will become less relevant |
| 928 | if not server[prop_name]: |
| 929 | raise NonRecoverableError( |
| 930 | 'must set {0} by either setting a "{0}" property or by setting' |
| 931 | ' a "{0}" or "{0}_name" (deprecated) field under the "server" ' |
| 932 | 'property'.format(prop_name)) |
| 933 | |
| 934 | image_or_flavor = \ |
| 935 | nova_client.cosmo_get_if_exists(prop_name, name=server[prop_name]) |
| 936 | if image_or_flavor: |
| 937 | server[prop_name] = image_or_flavor.id |
| 938 | else: # Deprecated sugar |
| 939 | if '{0}_name'.format(prop_name) in server: |
| 940 | prop_name_plural = nova_client.cosmo_plural(prop_name) |
| 941 | server[prop_name] = \ |
| 942 | getattr(nova_client, prop_name_plural).find( |
| 943 | name=server['{0}_name'.format(prop_name)]).id |
| 944 | del server['{0}_name'.format(prop_name)] |