John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 1 | # Copyright (c) 2016 Cisco and/or its affiliates. |
| 2 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 3 | # you may not use this file except in compliance with the License. |
| 4 | # You may obtain a copy of the License at: |
| 5 | # |
| 6 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 7 | # |
| 8 | # Unless required by applicable law or agreed to in writing, software |
| 9 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 11 | # See the License for the specific language governing permissions and |
| 12 | # limitations under the License. |
| 13 | |
| 14 | """QEMU utilities library.""" |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 15 | from __future__ import absolute_import, division |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 16 | |
| 17 | from time import time, sleep |
| 18 | import json |
| 19 | import logging |
| 20 | |
| 21 | from vpplib.VPPUtil import VPPUtil |
| 22 | from vpplib.constants import Constants |
| 23 | |
| 24 | |
| 25 | class NodeType(object): |
| 26 | """Defines node types used in topology dictionaries.""" |
| 27 | # Device Under Test (this node has VPP running on it) |
| 28 | DUT = 'DUT' |
| 29 | # Traffic Generator (this node has traffic generator on it) |
| 30 | TG = 'TG' |
| 31 | # Virtual Machine (this node running on DUT node) |
| 32 | VM = 'VM' |
| 33 | |
| 34 | |
| 35 | class QemuUtils(object): |
| 36 | """QEMU utilities.""" |
| 37 | |
John DeNisco | a7da67f | 2018-01-26 14:55:33 -0500 | [diff] [blame] | 38 | # noinspection PyDictCreation |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 39 | def __init__(self, qemu_id=1): |
| 40 | self._qemu_id = qemu_id |
| 41 | # Path to QEMU binary |
| 42 | self._qemu_bin = '/usr/bin/qemu-system-x86_64' |
| 43 | # QEMU Machine Protocol socket |
| 44 | self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id) |
| 45 | # QEMU Guest Agent socket |
| 46 | self._qga_sock = '/tmp/qga{0}.sock'.format(self._qemu_id) |
| 47 | # QEMU PID file |
| 48 | self._pid_file = '/tmp/qemu{0}.pid'.format(self._qemu_id) |
| 49 | self._qemu_opt = {} |
| 50 | # Default 1 CPU. |
| 51 | self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1' |
| 52 | # Daemonize the QEMU process after initialization. Default one |
| 53 | # management interface. |
| 54 | self._qemu_opt['options'] = '-cpu host -daemonize -enable-kvm ' \ |
| 55 | '-machine pc,accel=kvm,usb=off,mem-merge=off ' \ |
| 56 | '-net nic,macaddr=52:54:00:00:{0:02x}:ff -balloon none'\ |
| 57 | .format(self._qemu_id) |
| 58 | self._qemu_opt['ssh_fwd_port'] = 10021 + qemu_id |
| 59 | # Default serial console port |
| 60 | self._qemu_opt['serial_port'] = 4555 + qemu_id |
| 61 | # Default 512MB virtual RAM |
| 62 | self._qemu_opt['mem_size'] = 512 |
| 63 | # Default huge page mount point, required for Vhost-user interfaces. |
| 64 | self._qemu_opt['huge_mnt'] = '/mnt/huge' |
| 65 | # Default do not allocate huge pages. |
| 66 | self._qemu_opt['huge_allocate'] = False |
| 67 | # Default image for CSIT virl setup |
| 68 | self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img' |
| 69 | # VM node info dict |
| 70 | self._vm_info = { |
| 71 | 'type': NodeType.VM, |
| 72 | 'port': self._qemu_opt['ssh_fwd_port'], |
| 73 | 'username': 'cisco', |
| 74 | 'password': 'cisco', |
| 75 | 'interfaces': {}, |
| 76 | } |
| 77 | # Virtio queue count |
| 78 | self._qemu_opt['queues'] = 1 |
| 79 | self._vhost_id = 0 |
| 80 | self._ssh = None |
| 81 | self._node = None |
| 82 | self._socks = [self._qmp_sock, self._qga_sock] |
| 83 | |
| 84 | def qemu_set_bin(self, path): |
| 85 | """Set binary path for QEMU. |
| 86 | |
| 87 | :param path: Absolute path in filesystem. |
| 88 | :type path: str |
| 89 | """ |
| 90 | self._qemu_bin = path |
| 91 | |
| 92 | def qemu_set_smp(self, cpus, cores, threads, sockets): |
| 93 | """Set SMP option for QEMU. |
| 94 | |
| 95 | :param cpus: Number of CPUs. |
| 96 | :param cores: Number of CPU cores on one socket. |
| 97 | :param threads: Number of threads on one CPU core. |
| 98 | :param sockets: Number of discrete sockets in the system. |
| 99 | :type cpus: int |
| 100 | :type cores: int |
| 101 | :type threads: int |
| 102 | :type sockets: int |
| 103 | """ |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 104 | self._qemu_opt['smp'] = \ |
| 105 | '-smp {},cores={},threads={},sockets={}'.format( |
| 106 | cpus, cores, threads, sockets) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 107 | |
| 108 | def qemu_set_ssh_fwd_port(self, fwd_port): |
| 109 | """Set host port for guest SSH forwarding. |
| 110 | |
| 111 | :param fwd_port: Port number on host for guest SSH forwarding. |
| 112 | :type fwd_port: int |
| 113 | """ |
| 114 | self._qemu_opt['ssh_fwd_port'] = fwd_port |
| 115 | self._vm_info['port'] = fwd_port |
| 116 | |
| 117 | def qemu_set_serial_port(self, port): |
| 118 | """Set serial console port. |
| 119 | |
| 120 | :param port: Serial console port. |
| 121 | :type port: int |
| 122 | """ |
| 123 | self._qemu_opt['serial_port'] = port |
| 124 | |
| 125 | def qemu_set_mem_size(self, mem_size): |
| 126 | """Set virtual RAM size. |
| 127 | |
| 128 | :param mem_size: RAM size in Mega Bytes. |
| 129 | :type mem_size: int |
| 130 | """ |
| 131 | self._qemu_opt['mem_size'] = int(mem_size) |
| 132 | |
| 133 | def qemu_set_huge_mnt(self, huge_mnt): |
| 134 | """Set hugefile mount point. |
| 135 | |
| 136 | :param huge_mnt: System hugefile mount point. |
| 137 | :type huge_mnt: int |
| 138 | """ |
| 139 | self._qemu_opt['huge_mnt'] = huge_mnt |
| 140 | |
| 141 | def qemu_set_huge_allocate(self): |
| 142 | """Set flag to allocate more huge pages if needed.""" |
| 143 | self._qemu_opt['huge_allocate'] = True |
| 144 | |
| 145 | def qemu_set_disk_image(self, disk_image): |
| 146 | """Set disk image. |
| 147 | |
| 148 | :param disk_image: Path of the disk image. |
| 149 | :type disk_image: str |
| 150 | """ |
| 151 | self._qemu_opt['disk_image'] = disk_image |
| 152 | |
| 153 | def qemu_set_affinity(self, *host_cpus): |
| 154 | """Set qemu affinity by getting thread PIDs via QMP and taskset to list |
| 155 | of CPU cores. |
| 156 | |
| 157 | :param host_cpus: List of CPU cores. |
| 158 | :type host_cpus: list |
| 159 | """ |
| 160 | qemu_cpus = self._qemu_qmp_exec('query-cpus')['return'] |
| 161 | |
| 162 | if len(qemu_cpus) != len(host_cpus): |
| 163 | logging.debug('Host CPU count {0}, Qemu Thread count {1}'.format( |
| 164 | len(host_cpus), len(qemu_cpus))) |
| 165 | raise ValueError('Host CPU count must match Qemu Thread count') |
| 166 | |
| 167 | for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus): |
| 168 | cmd = 'taskset -pc {0} {1}'.format(host_cpu, qemu_cpu['thread_id']) |
| 169 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) |
| 170 | if int(ret_code) != 0: |
| 171 | logging.debug('Set affinity failed {0}'.format(stderr)) |
| 172 | raise RuntimeError('Set affinity failed on {0}'.format( |
| 173 | self._node['host'])) |
| 174 | |
| 175 | def qemu_set_scheduler_policy(self): |
| 176 | """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU |
| 177 | processes. |
| 178 | |
| 179 | :raises RuntimeError: Set scheduler policy failed. |
| 180 | """ |
| 181 | qemu_cpus = self._qemu_qmp_exec('query-cpus')['return'] |
| 182 | |
| 183 | for qemu_cpu in qemu_cpus: |
| 184 | cmd = 'chrt -r -p 1 {0}'.format(qemu_cpu['thread_id']) |
| 185 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) |
| 186 | if int(ret_code) != 0: |
| 187 | logging.debug('Set SCHED_RR failed {0}'.format(stderr)) |
| 188 | raise RuntimeError('Set SCHED_RR failed on {0}'.format( |
| 189 | self._node['host'])) |
| 190 | |
| 191 | def qemu_set_node(self, node): |
| 192 | """Set node to run QEMU on. |
| 193 | |
| 194 | :param node: Node to run QEMU on. |
| 195 | :type node: dict |
| 196 | """ |
| 197 | self._node = node |
| 198 | self._vm_info['host'] = node['host'] |
| 199 | |
| 200 | def qemu_add_vhost_user_if(self, socket, server=True, mac=None): |
| 201 | """Add Vhost-user interface. |
| 202 | |
| 203 | :param socket: Path of the unix socket. |
| 204 | :param server: If True the socket shall be a listening socket. |
| 205 | :param mac: Vhost-user interface MAC address (optional, otherwise is |
| 206 | used auto-generated MAC 52:54:00:00:xx:yy). |
| 207 | :type socket: str |
| 208 | :type server: bool |
| 209 | :type mac: str |
| 210 | """ |
| 211 | self._vhost_id += 1 |
| 212 | # Create unix socket character device. |
| 213 | chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id, |
| 214 | socket) |
| 215 | if server is True: |
| 216 | chardev += ',server' |
| 217 | self._qemu_opt['options'] += chardev |
| 218 | # Create Vhost-user network backend. |
| 219 | netdev = (' -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}' |
| 220 | .format(self._vhost_id, self._qemu_opt['queues'])) |
| 221 | self._qemu_opt['options'] += netdev |
| 222 | # If MAC is not specified use auto-generated MAC address based on |
| 223 | # template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU |
| 224 | # with ID 1 is 52:54:00:00:01:01 |
| 225 | if mac is None: |
| 226 | mac = '52:54:00:00:{0:02x}:{1:02x}'.\ |
| 227 | format(self._qemu_id, self._vhost_id) |
| 228 | extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\ |
| 229 | 'guest_tso6=off,guest_ecn=off,mrg_rxbuf=off' |
| 230 | # Create Virtio network device. |
| 231 | device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format( |
| 232 | self._vhost_id, mac, extend_options) |
| 233 | self._qemu_opt['options'] += device |
| 234 | # Add interface MAC and socket to the node dict |
| 235 | if_data = {'mac_address': mac, 'socket': socket} |
| 236 | if_name = 'vhost{}'.format(self._vhost_id) |
| 237 | self._vm_info['interfaces'][if_name] = if_data |
| 238 | # Add socket to the socket list |
| 239 | self._socks.append(socket) |
| 240 | |
| 241 | def _qemu_qmp_exec(self, cmd): |
| 242 | """Execute QMP command. |
| 243 | |
| 244 | QMP is JSON based protocol which allows to control QEMU instance. |
| 245 | |
| 246 | :param cmd: QMP command to execute. |
| 247 | :type cmd: str |
| 248 | :return: Command output in python representation of JSON format. The |
| 249 | { "return": {} } response is QMP's success response. An error |
| 250 | response will contain the "error" keyword instead of "return". |
| 251 | """ |
| 252 | # To enter command mode, the qmp_capabilities command must be issued. |
| 253 | qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \ |
| 254 | '{ \\"execute\\": \\"' + cmd + \ |
| 255 | '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock |
| 256 | |
| 257 | (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd) |
| 258 | if int(ret_code) != 0: |
| 259 | logging.debug('QMP execute failed {0}'.format(stderr)) |
| 260 | raise RuntimeError('QMP execute "{0}"' |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 261 | ' failed on {1}'.format( |
| 262 | cmd, self._node['host'])) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 263 | logging.debug(stdout) |
| 264 | # Skip capabilities negotiation messages. |
| 265 | out_list = stdout.splitlines() |
| 266 | if len(out_list) < 3: |
| 267 | raise RuntimeError('Invalid QMP output on {0}'.format( |
| 268 | self._node['host'])) |
| 269 | return json.loads(out_list[2]) |
| 270 | |
| 271 | def _qemu_qga_flush(self): |
| 272 | """Flush the QGA parser state |
| 273 | """ |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 274 | qga_cmd = '(printf "\xFF"; sleep 1) | ' \ |
| 275 | 'sudo -S socat - UNIX-CONNECT:' + \ |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 276 | self._qga_sock |
| 277 | # TODO: probably need something else |
| 278 | (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) |
| 279 | if int(ret_code) != 0: |
| 280 | logging.debug('QGA execute failed {0}'.format(stderr)) |
| 281 | raise RuntimeError('QGA execute "{0}" ' |
| 282 | 'failed on {1}'.format(qga_cmd, |
| 283 | self._node['host'])) |
| 284 | logging.debug(stdout) |
| 285 | if not stdout: |
| 286 | return {} |
| 287 | return json.loads(stdout.split('\n', 1)[0]) |
| 288 | |
| 289 | def _qemu_qga_exec(self, cmd): |
| 290 | """Execute QGA command. |
| 291 | |
| 292 | QGA provide access to a system-level agent via standard QMP commands. |
| 293 | |
| 294 | :param cmd: QGA command to execute. |
| 295 | :type cmd: str |
| 296 | """ |
| 297 | qga_cmd = '(echo "{ \\"execute\\": \\"' + \ |
| 298 | cmd + \ |
| 299 | '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \ |
| 300 | self._qga_sock |
| 301 | (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) |
| 302 | if int(ret_code) != 0: |
| 303 | logging.debug('QGA execute failed {0}'.format(stderr)) |
| 304 | raise RuntimeError('QGA execute "{0}"' |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 305 | ' failed on {1}'.format( |
| 306 | cmd, self._node['host'])) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 307 | logging.debug(stdout) |
| 308 | if not stdout: |
| 309 | return {} |
| 310 | return json.loads(stdout.split('\n', 1)[0]) |
| 311 | |
| 312 | def _wait_until_vm_boot(self, timeout=60): |
| 313 | """Wait until QEMU VM is booted. |
| 314 | |
| 315 | Ping QEMU guest agent each 5s until VM booted or timeout. |
| 316 | |
| 317 | :param timeout: Waiting timeout in seconds (optional, default 60s). |
| 318 | :type timeout: int |
| 319 | """ |
| 320 | start = time() |
| 321 | while True: |
| 322 | if time() - start > timeout: |
| 323 | raise RuntimeError('timeout, VM {0} not booted on {1}'.format( |
| 324 | self._qemu_opt['disk_image'], self._node['host'])) |
| 325 | out = None |
| 326 | try: |
| 327 | self._qemu_qga_flush() |
| 328 | out = self._qemu_qga_exec('guest-ping') |
| 329 | except ValueError: |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 330 | logging.debug( |
| 331 | 'QGA guest-ping unexpected output {}'.format(out)) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 332 | # Empty output - VM not booted yet |
| 333 | if not out: |
| 334 | sleep(5) |
| 335 | # Non-error return - VM booted |
| 336 | elif out.get('return') is not None: |
| 337 | break |
| 338 | # Skip error and wait |
| 339 | elif out.get('error') is not None: |
| 340 | sleep(5) |
| 341 | else: |
| 342 | # If there is an unexpected output from QGA guest-info, try |
| 343 | # again until timeout. |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 344 | logging.debug( |
| 345 | 'QGA guest-ping unexpected output {}'.format(out)) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 346 | |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 347 | logging.debug( |
| 348 | 'VM {0} booted on {1}'.format(self._qemu_opt['disk_image'], |
| 349 | self._node['host'])) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 350 | |
| 351 | def _update_vm_interfaces(self): |
| 352 | """Update interface names in VM node dict.""" |
| 353 | # Send guest-network-get-interfaces command via QGA, output example: |
| 354 | # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"}, |
| 355 | # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]} |
| 356 | out = self._qemu_qga_exec('guest-network-get-interfaces') |
| 357 | interfaces = out.get('return') |
| 358 | mac_name = {} |
| 359 | if not interfaces: |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 360 | raise RuntimeError( |
| 361 | 'Get VM {0} interface list failed on {1}'.format( |
| 362 | self._qemu_opt['disk_image'], self._node['host'])) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 363 | # Create MAC-name dict |
| 364 | for interface in interfaces: |
| 365 | if 'hardware-address' not in interface: |
| 366 | continue |
| 367 | mac_name[interface['hardware-address']] = interface['name'] |
| 368 | # Match interface by MAC and save interface name |
| 369 | for interface in self._vm_info['interfaces'].values(): |
| 370 | mac = interface.get('mac_address') |
| 371 | if_name = mac_name.get(mac) |
| 372 | if if_name is None: |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 373 | logging.debug( |
| 374 | 'Interface name for MAC {} not found'.format(mac)) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 375 | else: |
| 376 | interface['name'] = if_name |
| 377 | |
| 378 | def _huge_page_check(self, allocate=False): |
| 379 | """Huge page check.""" |
| 380 | huge_mnt = self._qemu_opt.get('huge_mnt') |
| 381 | mem_size = self._qemu_opt.get('mem_size') |
| 382 | |
| 383 | # Get huge pages information |
| 384 | huge_size = self._get_huge_page_size() |
| 385 | huge_free = self._get_huge_page_free(huge_size) |
| 386 | huge_total = self._get_huge_page_total(huge_size) |
| 387 | |
| 388 | # Check if memory reqested by qemu is available on host |
| 389 | if (mem_size * 1024) > (huge_free * huge_size): |
| 390 | # If we want to allocate hugepage dynamically |
| 391 | if allocate: |
| 392 | mem_needed = abs((huge_free * huge_size) - (mem_size * 1024)) |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 393 | huge_to_allocate = ((mem_needed // huge_size) * 2) + huge_total |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 394 | max_map_count = huge_to_allocate*4 |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 395 | # Increase maximum number of memory map areas a |
| 396 | # process may have |
| 397 | cmd = \ |
| 398 | 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format( |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 399 | max_map_count) |
| 400 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) |
| 401 | # Increase hugepage count |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 402 | cmd = \ |
| 403 | 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format( |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 404 | huge_to_allocate) |
| 405 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) |
| 406 | if int(ret_code) != 0: |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 407 | logging.debug( |
| 408 | 'Mount huge pages failed {0}'.format(stderr)) |
| 409 | raise RuntimeError( |
| 410 | 'Mount huge pages failed on {0}'.format( |
| 411 | self._node['host'])) |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 412 | # If we do not want to allocate dynamicaly end with error |
| 413 | else: |
| 414 | raise RuntimeError( |
| 415 | 'Not enough free huge pages: {0}, ' |
| 416 | '{1} MB'.format(huge_free, huge_free * huge_size) |
| 417 | ) |
| 418 | # Check if huge pages mount point exist |
| 419 | has_huge_mnt = False |
| 420 | (_, output, _) = self._ssh.exec_command('cat /proc/mounts') |
| 421 | for line in output.splitlines(): |
| 422 | # Try to find something like: |
| 423 | # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0 |
| 424 | mount = line.split() |
| 425 | if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt: |
| 426 | has_huge_mnt = True |
| 427 | break |
| 428 | # If huge page mount point not exist create one |
| 429 | if not has_huge_mnt: |
| 430 | cmd = 'mkdir -p {0}'.format(huge_mnt) |
| 431 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) |
| 432 | if int(ret_code) != 0: |
| 433 | logging.debug('Create mount dir failed: {0}'.format(stderr)) |
| 434 | raise RuntimeError('Create mount dir failed on {0}'.format( |
| 435 | self._node['host'])) |
| 436 | cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format( |
| 437 | huge_mnt) |
| 438 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) |
| 439 | if int(ret_code) != 0: |
| 440 | logging.debug('Mount huge pages failed {0}'.format(stderr)) |
| 441 | raise RuntimeError('Mount huge pages failed on {0}'.format( |
| 442 | self._node['host'])) |
| 443 | |
| 444 | def _get_huge_page_size(self): |
| 445 | """Get default size of huge pages in system. |
| 446 | |
| 447 | :returns: Default size of free huge pages in system. |
| 448 | :rtype: int |
| 449 | :raises: RuntimeError if reading failed for three times. |
| 450 | """ |
| 451 | # TODO: remove to dedicated library |
| 452 | cmd_huge_size = "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'" |
| 453 | for _ in range(3): |
| 454 | (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_size) |
| 455 | if ret == 0: |
| 456 | try: |
| 457 | huge_size = int(out) |
| 458 | except ValueError: |
| 459 | logging.debug('Reading huge page size information failed') |
| 460 | else: |
| 461 | break |
| 462 | else: |
| 463 | raise RuntimeError('Getting huge page size information failed.') |
| 464 | return huge_size |
| 465 | |
| 466 | def _get_huge_page_free(self, huge_size): |
| 467 | """Get total number of huge pages in system. |
| 468 | |
| 469 | :param huge_size: Size of hugepages. |
| 470 | :type huge_size: int |
| 471 | :returns: Number of free huge pages in system. |
| 472 | :rtype: int |
| 473 | :raises: RuntimeError if reading failed for three times. |
| 474 | """ |
| 475 | # TODO: add numa aware option |
| 476 | # TODO: remove to dedicated library |
| 477 | cmd_huge_free = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\ |
| 478 | 'free_hugepages'.format(huge_size) |
| 479 | for _ in range(3): |
| 480 | (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_free) |
| 481 | if ret == 0: |
| 482 | try: |
| 483 | huge_free = int(out) |
| 484 | except ValueError: |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 485 | logging.debug( |
| 486 | 'Reading free huge pages information failed') |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 487 | else: |
| 488 | break |
| 489 | else: |
| 490 | raise RuntimeError('Getting free huge pages information failed.') |
| 491 | return huge_free |
| 492 | |
| 493 | def _get_huge_page_total(self, huge_size): |
| 494 | """Get total number of huge pages in system. |
| 495 | |
| 496 | :param huge_size: Size of hugepages. |
| 497 | :type huge_size: int |
| 498 | :returns: Total number of huge pages in system. |
| 499 | :rtype: int |
| 500 | :raises: RuntimeError if reading failed for three times. |
| 501 | """ |
| 502 | # TODO: add numa aware option |
| 503 | # TODO: remove to dedicated library |
| 504 | cmd_huge_total = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\ |
| 505 | 'nr_hugepages'.format(huge_size) |
| 506 | for _ in range(3): |
| 507 | (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_total) |
| 508 | if ret == 0: |
| 509 | try: |
| 510 | huge_total = int(out) |
| 511 | except ValueError: |
Paul Vinciguerra | 339bc6b | 2018-12-19 02:05:25 -0800 | [diff] [blame] | 512 | logging.debug( |
| 513 | 'Reading total huge pages information failed') |
John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 514 | else: |
| 515 | break |
| 516 | else: |
| 517 | raise RuntimeError('Getting total huge pages information failed.') |
| 518 | return huge_total |
| 519 | |
| 520 | def qemu_start(self): |
| 521 | """Start QEMU and wait until VM boot. |
| 522 | |
| 523 | :return: VM node info. |
| 524 | :rtype: dict |
| 525 | .. note:: First set at least node to run QEMU on. |
| 526 | .. warning:: Starts only one VM on the node. |
| 527 | """ |
| 528 | # SSH forwarding |
| 529 | ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format( |
| 530 | self._qemu_opt.get('ssh_fwd_port')) |
| 531 | # Memory and huge pages |
| 532 | mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \ |
| 533 | 'share=on -m {0} -numa node,memdev=mem'.format( |
| 534 | self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt')) |
| 535 | |
| 536 | # By default check only if hugepages are available. |
| 537 | # If 'huge_allocate' is set to true try to allocate as well. |
| 538 | self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate')) |
| 539 | |
| 540 | # Disk option |
| 541 | drive = '-drive file={0},format=raw,cache=none,if=virtio'.format( |
| 542 | self._qemu_opt.get('disk_image')) |
| 543 | # Setup QMP via unix socket |
| 544 | qmp = '-qmp unix:{0},server,nowait'.format(self._qmp_sock) |
| 545 | # Setup serial console |
| 546 | serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \ |
| 547 | 'nowait -device isa-serial,chardev=gnc0'.format( |
| 548 | self._qemu_opt.get('serial_port')) |
| 549 | # Setup QGA via chardev (unix socket) and isa-serial channel |
| 550 | qga = '-chardev socket,path={0},server,nowait,id=qga0 ' \ |
| 551 | '-device isa-serial,chardev=qga0'.format(self._qga_sock) |
| 552 | # Graphic setup |
| 553 | graphic = '-monitor none -display none -vga none' |
| 554 | # PID file |
| 555 | pid = '-pidfile {}'.format(self._pid_file) |
| 556 | |
| 557 | # Run QEMU |
| 558 | cmd = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'.format( |
| 559 | self._qemu_bin, self._qemu_opt.get('smp'), mem, ssh_fwd, |
| 560 | self._qemu_opt.get('options'), |
| 561 | drive, qmp, serial, qga, graphic, pid) |
| 562 | (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd, timeout=300) |
| 563 | if int(ret_code) != 0: |
| 564 | logging.debug('QEMU start failed {0}'.format(stderr)) |
| 565 | raise RuntimeError('QEMU start failed on {0}'.format( |
| 566 | self._node['host'])) |
| 567 | logging.debug('QEMU running') |
| 568 | # Wait until VM boot |
| 569 | try: |
| 570 | self._wait_until_vm_boot() |
| 571 | except RuntimeError: |
| 572 | self.qemu_kill_all() |
| 573 | self.qemu_clear_socks() |
| 574 | raise |
| 575 | # Update interface names in VM node dict |
| 576 | self._update_vm_interfaces() |
| 577 | # Return VM node dict |
| 578 | return self._vm_info |
| 579 | |
| 580 | def qemu_quit(self): |
| 581 | """Quit the QEMU emulator.""" |
| 582 | out = self._qemu_qmp_exec('quit') |
| 583 | err = out.get('error') |
| 584 | if err is not None: |
| 585 | raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format( |
| 586 | self._node['host'], json.dumps(err))) |
| 587 | |
| 588 | def qemu_system_powerdown(self): |
| 589 | """Power down the system (if supported).""" |
| 590 | out = self._qemu_qmp_exec('system_powerdown') |
| 591 | err = out.get('error') |
| 592 | if err is not None: |
| 593 | raise RuntimeError( |
| 594 | 'QEMU system powerdown failed on {0}, ' |
| 595 | 'error: {1}'.format(self._node['host'], json.dumps(err)) |
| 596 | ) |
| 597 | |
| 598 | def qemu_system_reset(self): |
| 599 | """Reset the system.""" |
| 600 | out = self._qemu_qmp_exec('system_reset') |
| 601 | err = out.get('error') |
| 602 | if err is not None: |
| 603 | raise RuntimeError( |
| 604 | 'QEMU system reset failed on {0}, ' |
| 605 | 'error: {1}'.format(self._node['host'], json.dumps(err))) |
| 606 | |
| 607 | def qemu_kill(self): |
| 608 | """Kill qemu process.""" |
| 609 | # Note: in QEMU start phase there are 3 QEMU processes because we |
| 610 | # daemonize QEMU |
| 611 | self._ssh.exec_command_sudo('chmod +r {}'.format(self._pid_file)) |
| 612 | self._ssh.exec_command_sudo('kill -SIGKILL $(cat {})' |
| 613 | .format(self._pid_file)) |
| 614 | # Delete PID file |
| 615 | cmd = 'rm -f {}'.format(self._pid_file) |
| 616 | self._ssh.exec_command_sudo(cmd) |
| 617 | |
| 618 | def qemu_kill_all(self, node=None): |
| 619 | """Kill all qemu processes on DUT node if specified. |
| 620 | |
| 621 | :param node: Node to kill all QEMU processes on. |
| 622 | :type node: dict |
| 623 | """ |
| 624 | if node: |
| 625 | self.qemu_set_node(node) |
| 626 | self._ssh.exec_command_sudo('pkill -SIGKILL qemu') |
| 627 | |
| 628 | def qemu_clear_socks(self): |
| 629 | """Remove all sockets created by QEMU.""" |
| 630 | # If serial console port still open kill process |
| 631 | cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port')) |
| 632 | self._ssh.exec_command_sudo(cmd) |
| 633 | # Delete all created sockets |
| 634 | for sock in self._socks: |
| 635 | cmd = 'rm -f {}'.format(sock) |
| 636 | self._ssh.exec_command_sudo(cmd) |
| 637 | |
| 638 | def qemu_system_status(self): |
| 639 | """Return current VM status. |
| 640 | |
| 641 | VM should be in following status: |
| 642 | |
| 643 | - debug: QEMU running on a debugger |
| 644 | - finish-migrate: paused to finish the migration process |
| 645 | - inmigrate: waiting for an incoming migration |
| 646 | - internal-error: internal error has occurred |
| 647 | - io-error: the last IOP has failed |
| 648 | - paused: paused |
| 649 | - postmigrate: paused following a successful migrate |
| 650 | - prelaunch: QEMU was started with -S and guest has not started |
| 651 | - restore-vm: paused to restore VM state |
| 652 | - running: actively running |
| 653 | - save-vm: paused to save the VM state |
| 654 | - shutdown: shut down (and -no-shutdown is in use) |
| 655 | - suspended: suspended (ACPI S3) |
| 656 | - watchdog: watchdog action has been triggered |
| 657 | - guest-panicked: panicked as a result of guest OS panic |
| 658 | |
| 659 | :return: VM status. |
| 660 | :rtype: str |
| 661 | """ |
| 662 | out = self._qemu_qmp_exec('query-status') |
| 663 | ret = out.get('return') |
| 664 | if ret is not None: |
| 665 | return ret.get('status') |
| 666 | else: |
| 667 | err = out.get('error') |
| 668 | raise RuntimeError( |
| 669 | 'QEMU query-status failed on {0}, ' |
| 670 | 'error: {1}'.format(self._node['host'], json.dumps(err))) |
| 671 | |
| 672 | @staticmethod |
| 673 | def build_qemu(node, force_install=False, apply_patch=False): |
| 674 | """Build QEMU from sources. |
| 675 | |
| 676 | :param node: Node to build QEMU on. |
| 677 | :param force_install: If True, then remove previous build. |
| 678 | :param apply_patch: If True, then apply patches from qemu_patches dir. |
| 679 | :type node: dict |
| 680 | :type force_install: bool |
| 681 | :type apply_patch: bool |
| 682 | :raises: RuntimeError if building QEMU failed. |
| 683 | """ |
| 684 | |
| 685 | directory = ' --directory={0}'.format(Constants.QEMU_INSTALL_DIR) |
| 686 | version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION) |
| 687 | force = ' --force' if force_install else '' |
| 688 | patch = ' --patch' if apply_patch else '' |
| 689 | |
| 690 | (ret_code, stdout, stderr) = VPPUtil. \ |
| 691 | exec_command( |
| 692 | "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}'". |
| 693 | format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH, |
| 694 | version, directory, force, patch), 1000) |
| 695 | |
| 696 | if int(ret_code) != 0: |
| 697 | logging.debug('QEMU build failed {0}'.format(stdout + stderr)) |
| 698 | raise RuntimeError('QEMU build failed on {0}'.format(node['host'])) |