blob: b662042bc4ac4adbac6f64641d5bab6a1d860374 [file] [log] [blame]
Pratikshya Prasai657bdf72022-08-18 11:09:38 -04001#!/usr/bin/env python3
2
3from __future__ import print_function
4import logging
5import sys
6import os
7import select
8import signal
9import subprocess
10import unittest
11import re
12import time
13import faulthandler
14import random
15import copy
16import platform
17import shutil
18from collections import deque
19from threading import Thread, Event
20from inspect import getdoc, isclass
21from traceback import format_exception
22from logging import FileHandler, DEBUG, Formatter
23from enum import Enum
24from abc import ABC, abstractmethod
25from struct import pack, unpack
26
27from config import config, available_cpus, num_cpus, max_vpp_cpus
28import hook as hookmodule
29from vpp_pg_interface import VppPGInterface
30from vpp_sub_interface import VppSubInterface
31from vpp_lo_interface import VppLoInterface
32from vpp_bvi_interface import VppBviInterface
33from vpp_papi_provider import VppPapiProvider
34from vpp_papi import VppEnum
35import vpp_papi
36from vpp_papi.vpp_stats import VPPStats
37from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
38from log import (
39 RED,
40 GREEN,
41 YELLOW,
42 double_line_delim,
43 single_line_delim,
44 get_logger,
45 colorize,
46)
47from vpp_object import VppObjectRegistry
48from util import ppp, is_core_present
49
50logger = logging.getLogger(__name__)
51
52# Set up an empty logger for the testcase that can be overridden as necessary
53null_logger = logging.getLogger("VppTestCase")
54null_logger.addHandler(logging.NullHandler())
55
56PASS = 0
57FAIL = 1
58ERROR = 2
59SKIP = 3
60TEST_RUN = 4
61SKIP_CPU_SHORTAGE = 5
62
63
64if config.debug_framework:
65 import debug_internal
66
67"""
68 Test framework module.
69
70 The module provides a set of tools for constructing and running tests and
71 representing the results.
72"""
73
74
75class VppDiedError(Exception):
76 """exception for reporting that the subprocess has died."""
77
78 signals_by_value = {
79 v: k
80 for k, v in signal.__dict__.items()
81 if k.startswith("SIG") and not k.startswith("SIG_")
82 }
83
84 def __init__(self, rv=None, testcase=None, method_name=None):
85 self.rv = rv
86 self.signal_name = None
87 self.testcase = testcase
88 self.method_name = method_name
89
90 try:
91 self.signal_name = VppDiedError.signals_by_value[-rv]
92 except (KeyError, TypeError):
93 pass
94
95 if testcase is None and method_name is None:
96 in_msg = ""
97 else:
98 in_msg = " while running %s.%s" % (testcase, method_name)
99
100 if self.rv:
101 msg = "VPP subprocess died unexpectedly%s with return code: %d%s." % (
102 in_msg,
103 self.rv,
104 " [%s]" % (self.signal_name if self.signal_name is not None else ""),
105 )
106 else:
107 msg = "VPP subprocess died unexpectedly%s." % in_msg
108
109 super(VppDiedError, self).__init__(msg)
110
111
112class _PacketInfo(object):
113 """Private class to create packet info object.
114
115 Help process information about the next packet.
116 Set variables to default values.
117 """
118
119 #: Store the index of the packet.
120 index = -1
121 #: Store the index of the source packet generator interface of the packet.
122 src = -1
123 #: Store the index of the destination packet generator interface
124 #: of the packet.
125 dst = -1
126 #: Store expected ip version
127 ip = -1
128 #: Store expected upper protocol
129 proto = -1
130 #: Store the copy of the former packet.
131 data = None
132
133 def __eq__(self, other):
134 index = self.index == other.index
135 src = self.src == other.src
136 dst = self.dst == other.dst
137 data = self.data == other.data
138 return index and src and dst and data
139
140
141def pump_output(testclass):
142 """pump output from vpp stdout/stderr to proper queues"""
143 stdout_fragment = ""
144 stderr_fragment = ""
145 while not testclass.pump_thread_stop_flag.is_set():
146 readable = select.select(
147 [
148 testclass.vpp.stdout.fileno(),
149 testclass.vpp.stderr.fileno(),
150 testclass.pump_thread_wakeup_pipe[0],
151 ],
152 [],
153 [],
154 )[0]
155 if testclass.vpp.stdout.fileno() in readable:
156 read = os.read(testclass.vpp.stdout.fileno(), 102400)
157 if len(read) > 0:
158 split = read.decode("ascii", errors="backslashreplace").splitlines(True)
159 if len(stdout_fragment) > 0:
160 split[0] = "%s%s" % (stdout_fragment, split[0])
161 if len(split) > 0 and split[-1].endswith("\n"):
162 limit = None
163 else:
164 limit = -1
165 stdout_fragment = split[-1]
166 testclass.vpp_stdout_deque.extend(split[:limit])
167 if not config.cache_vpp_output:
168 for line in split[:limit]:
169 testclass.logger.info("VPP STDOUT: %s" % line.rstrip("\n"))
170 if testclass.vpp.stderr.fileno() in readable:
171 read = os.read(testclass.vpp.stderr.fileno(), 102400)
172 if len(read) > 0:
173 split = read.decode("ascii", errors="backslashreplace").splitlines(True)
174 if len(stderr_fragment) > 0:
175 split[0] = "%s%s" % (stderr_fragment, split[0])
176 if len(split) > 0 and split[-1].endswith("\n"):
177 limit = None
178 else:
179 limit = -1
180 stderr_fragment = split[-1]
181
182 testclass.vpp_stderr_deque.extend(split[:limit])
183 if not config.cache_vpp_output:
184 for line in split[:limit]:
185 testclass.logger.error("VPP STDERR: %s" % line.rstrip("\n"))
186 # ignoring the dummy pipe here intentionally - the
187 # flag will take care of properly terminating the loop
188
189
190def _is_platform_aarch64():
191 return platform.machine() == "aarch64"
192
193
194is_platform_aarch64 = _is_platform_aarch64()
195
196
197class KeepAliveReporter(object):
198 """
199 Singleton object which reports test start to parent process
200 """
201
202 _shared_state = {}
203
204 def __init__(self):
205 self.__dict__ = self._shared_state
206 self._pipe = None
207
208 @property
209 def pipe(self):
210 return self._pipe
211
212 @pipe.setter
213 def pipe(self, pipe):
214 if self._pipe is not None:
215 raise Exception("Internal error - pipe should only be set once.")
216 self._pipe = pipe
217
218 def send_keep_alive(self, test, desc=None):
219 """
220 Write current test tmpdir & desc to keep-alive pipe to signal liveness
221 """
222 if self.pipe is None:
223 # if not running forked..
224 return
225
226 if isclass(test):
227 desc = "%s (%s)" % (desc, unittest.util.strclass(test))
228 else:
229 desc = test.id()
230
231 self.pipe.send((desc, config.vpp, test.tempdir, test.vpp.pid))
232
233
234class TestCaseTag(Enum):
235 # marks the suites that must run at the end
236 # using only a single test runner
237 RUN_SOLO = 1
238 # marks the suites broken on VPP multi-worker
239 FIXME_VPP_WORKERS = 2
240 # marks the suites broken when ASan is enabled
241 FIXME_ASAN = 3
242
243
244def create_tag_decorator(e):
245 def decorator(cls):
246 try:
247 cls.test_tags.append(e)
248 except AttributeError:
249 cls.test_tags = [e]
250 return cls
251
252 return decorator
253
254
255tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
256tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
257tag_fixme_asan = create_tag_decorator(TestCaseTag.FIXME_ASAN)
258
259
260class DummyVpp:
261 returncode = None
262 pid = 0xCAFEBAFE
263
264 def poll(self):
265 pass
266
267 def terminate(self):
268 pass
269
270
271class CPUInterface(ABC):
272 cpus = []
273 skipped_due_to_cpu_lack = False
274
275 @classmethod
276 @abstractmethod
277 def get_cpus_required(cls):
278 pass
279
280 @classmethod
281 def assign_cpus(cls, cpus):
282 cls.cpus = cpus
283
284
285class VppTestCase(CPUInterface, unittest.TestCase):
286 """This subclass is a base class for VPP test cases that are implemented as
287 classes. It provides methods to create and run test case.
288 """
289
290 extra_vpp_statseg_config = ""
291 extra_vpp_punt_config = []
292 extra_vpp_plugin_config = []
293 logger = null_logger
294 vapi_response_timeout = 5
295 remove_configured_vpp_objects_on_tear_down = True
296
297 @property
298 def packet_infos(self):
299 """List of packet infos"""
300 return self._packet_infos
301
302 @classmethod
303 def get_packet_count_for_if_idx(cls, dst_if_index):
304 """Get the number of packet info for specified destination if index"""
305 if dst_if_index in cls._packet_count_for_dst_if_idx:
306 return cls._packet_count_for_dst_if_idx[dst_if_index]
307 else:
308 return 0
309
310 @classmethod
311 def has_tag(cls, tag):
312 """if the test case has a given tag - return true"""
313 try:
314 return tag in cls.test_tags
315 except AttributeError:
316 pass
317 return False
318
319 @classmethod
320 def is_tagged_run_solo(cls):
321 """if the test case class is timing-sensitive - return true"""
322 return cls.has_tag(TestCaseTag.RUN_SOLO)
323
324 @classmethod
325 def skip_fixme_asan(cls):
326 """if @tag_fixme_asan & ASan is enabled - mark for skip"""
327 if cls.has_tag(TestCaseTag.FIXME_ASAN):
328 vpp_extra_cmake_args = os.environ.get("VPP_EXTRA_CMAKE_ARGS", "")
329 if "DVPP_ENABLE_SANITIZE_ADDR=ON" in vpp_extra_cmake_args:
330 cls = unittest.skip("Skipping @tag_fixme_asan tests")(cls)
331
332 @classmethod
333 def instance(cls):
334 """Return the instance of this testcase"""
335 return cls.test_instance
336
337 @classmethod
338 def set_debug_flags(cls, d):
339 cls.gdbserver_port = 7777
340 cls.debug_core = False
341 cls.debug_gdb = False
342 cls.debug_gdbserver = False
343 cls.debug_all = False
344 cls.debug_attach = False
345 if d is None:
346 return
347 dl = d.lower()
348 if dl == "core":
349 cls.debug_core = True
350 elif dl == "gdb" or dl == "gdb-all":
351 cls.debug_gdb = True
352 elif dl == "gdbserver" or dl == "gdbserver-all":
353 cls.debug_gdbserver = True
354 elif dl == "attach":
355 cls.debug_attach = True
356 else:
357 raise Exception("Unrecognized DEBUG option: '%s'" % d)
358 if dl == "gdb-all" or dl == "gdbserver-all":
359 cls.debug_all = True
360
361 @classmethod
362 def get_vpp_worker_count(cls):
363 if not hasattr(cls, "vpp_worker_count"):
364 if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
365 cls.vpp_worker_count = 0
366 else:
367 cls.vpp_worker_count = config.vpp_worker_count
368 return cls.vpp_worker_count
369
370 @classmethod
371 def get_cpus_required(cls):
372 return 1 + cls.get_vpp_worker_count()
373
374 @classmethod
375 def setUpConstants(cls):
376 """Set-up the test case class based on environment variables"""
377 cls.step = config.step
378 cls.plugin_path = ":".join(config.vpp_plugin_dir)
379 cls.test_plugin_path = ":".join(config.vpp_test_plugin_dir)
380 cls.extern_plugin_path = ":".join(config.extern_plugin_dir)
381 debug_cli = ""
382 if cls.step or cls.debug_gdb or cls.debug_gdbserver:
383 debug_cli = "cli-listen localhost:5002"
384 size = re.search(r"\d+[gG]", config.coredump_size)
385 if size:
386 coredump_size = f"coredump-size {config.coredump_size}".lower()
387 else:
388 coredump_size = "coredump-size unlimited"
389 default_variant = config.variant
390 if default_variant is not None:
391 default_variant = "defaults { %s 100 }" % default_variant
392 else:
393 default_variant = ""
394
395 api_fuzzing = config.api_fuzz
396 if api_fuzzing is None:
397 api_fuzzing = "off"
398
399 cls.vpp_cmdline = [
400 config.vpp,
401 "unix",
402 "{",
403 "nodaemon",
404 debug_cli,
405 "full-coredump",
406 coredump_size,
407 "runtime-dir",
408 cls.tempdir,
409 "}",
410 "api-trace",
411 "{",
412 "on",
413 "}",
414 "api-segment",
415 "{",
416 "prefix",
417 cls.get_api_segment_prefix(),
418 "}",
419 "cpu",
420 "{",
421 "main-core",
422 str(cls.cpus[0]),
423 ]
424 if cls.extern_plugin_path not in (None, ""):
425 cls.extra_vpp_plugin_config.append("add-path %s" % cls.extern_plugin_path)
426 if cls.get_vpp_worker_count():
427 cls.vpp_cmdline.extend(
428 ["corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])]
429 )
430 cls.vpp_cmdline.extend(
431 [
432 "}",
433 "physmem",
434 "{",
435 "max-size",
436 "32m",
437 "}",
438 "statseg",
439 "{",
440 "socket-name",
441 cls.get_stats_sock_path(),
442 cls.extra_vpp_statseg_config,
443 "}",
444 "socksvr",
445 "{",
446 "socket-name",
447 cls.get_api_sock_path(),
448 "}",
449 "node { ",
450 default_variant,
451 "}",
452 "api-fuzz {",
453 api_fuzzing,
454 "}",
455 "plugins",
456 "{",
457 "plugin",
458 "dpdk_plugin.so",
459 "{",
460 "disable",
461 "}",
462 "plugin",
463 "rdma_plugin.so",
464 "{",
465 "disable",
466 "}",
467 "plugin",
468 "lisp_unittest_plugin.so",
469 "{",
470 "enable",
471 "}",
472 "plugin",
473 "unittest_plugin.so",
474 "{",
475 "enable",
476 "}",
477 ]
478 + cls.extra_vpp_plugin_config
479 + [
480 "}",
481 ]
482 )
483
484 if cls.extra_vpp_punt_config is not None:
485 cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
486
487 if not cls.debug_attach:
488 cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
489 cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
490
491 @classmethod
492 def wait_for_enter(cls):
493 if cls.debug_gdbserver:
494 print(double_line_delim)
495 print("Spawned GDB server with PID: %d" % cls.vpp.pid)
496 elif cls.debug_gdb:
497 print(double_line_delim)
498 print("Spawned VPP with PID: %d" % cls.vpp.pid)
499 else:
500 cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
501 return
502 print(single_line_delim)
503 print("You can debug VPP using:")
504 if cls.debug_gdbserver:
505 print(
506 f"sudo gdb {config.vpp} "
507 f"-ex 'target remote localhost:{cls.gdbserver_port}'"
508 )
509 print(
510 "Now is the time to attach gdb by running the above "
511 "command, set up breakpoints etc., then resume VPP from "
512 "within gdb by issuing the 'continue' command"
513 )
514 cls.gdbserver_port += 1
515 elif cls.debug_gdb:
516 print(f"sudo gdb {config.vpp} -ex 'attach {cls.vpp.pid}'")
517 print(
518 "Now is the time to attach gdb by running the above "
519 "command and set up breakpoints etc., then resume VPP from"
520 " within gdb by issuing the 'continue' command"
521 )
522 print(single_line_delim)
523 input("Press ENTER to continue running the testcase...")
524
525 @classmethod
526 def attach_vpp(cls):
527 cls.vpp = DummyVpp()
528
529 @classmethod
530 def run_vpp(cls):
531 cls.logger.debug(f"Assigned cpus: {cls.cpus}")
532 cmdline = cls.vpp_cmdline
533
534 if cls.debug_gdbserver:
535 gdbserver = "/usr/bin/gdbserver"
536 if not os.path.isfile(gdbserver) or not os.access(gdbserver, os.X_OK):
537 raise Exception(
538 "gdbserver binary '%s' does not exist or is "
539 "not executable" % gdbserver
540 )
541
542 cmdline = [
543 gdbserver,
544 "localhost:{port}".format(port=cls.gdbserver_port),
545 ] + cls.vpp_cmdline
546 cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
547
548 try:
549 cls.vpp = subprocess.Popen(
550 cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE
551 )
552 except subprocess.CalledProcessError as e:
553 cls.logger.critical(
554 "Subprocess returned with non-0 return code: (%s)", e.returncode
555 )
556 raise
557 except OSError as e:
558 cls.logger.critical(
559 "Subprocess returned with OS error: (%s) %s", e.errno, e.strerror
560 )
561 raise
562 except Exception as e:
563 cls.logger.exception("Subprocess returned unexpected from %s:", cmdline)
564 raise
565
566 cls.wait_for_enter()
567
568 @classmethod
569 def wait_for_coredump(cls):
570 corefile = cls.tempdir + "/core"
571 if os.path.isfile(corefile):
572 cls.logger.error("Waiting for coredump to complete: %s", corefile)
573 curr_size = os.path.getsize(corefile)
574 deadline = time.time() + 60
575 ok = False
576 while time.time() < deadline:
577 cls.sleep(1)
578 size = curr_size
579 curr_size = os.path.getsize(corefile)
580 if size == curr_size:
581 ok = True
582 break
583 if not ok:
584 cls.logger.error(
585 "Timed out waiting for coredump to complete: %s", corefile
586 )
587 else:
588 cls.logger.error("Coredump complete: %s, size %d", corefile, curr_size)
589
590 @classmethod
591 def get_stats_sock_path(cls):
592 return "%s/stats.sock" % cls.tempdir
593
594 @classmethod
595 def get_api_sock_path(cls):
596 return "%s/api.sock" % cls.tempdir
597
598 @classmethod
599 def get_api_segment_prefix(cls):
600 return os.path.basename(cls.tempdir) # Only used for VAPI
601
602 @classmethod
603 def get_tempdir(cls):
604 if cls.debug_attach:
605 tmpdir = f"{config.tmp_dir}/unittest-attach-gdb"
606 else:
607 tmpdir = f"{config.tmp_dir}/vpp-unittest-{cls.__name__}"
608 if config.wipe_tmp_dir:
609 shutil.rmtree(tmpdir, ignore_errors=True)
610 os.mkdir(tmpdir)
611 return tmpdir
612
613 @classmethod
614 def create_file_handler(cls):
615 if config.log_dir is None:
616 cls.file_handler = FileHandler(f"{cls.tempdir}/log.txt")
617 return
618
619 logdir = f"{config.log_dir}/vpp-unittest-{cls.__name__}"
620 if config.wipe_tmp_dir:
621 shutil.rmtree(logdir, ignore_errors=True)
622 os.mkdir(logdir)
623 cls.file_handler = FileHandler(f"{logdir}/log.txt")
624
625 @classmethod
626 def setUpClass(cls):
627 """
628 Perform class setup before running the testcase
629 Remove shared memory files, start vpp and connect the vpp-api
630 """
631 super(VppTestCase, cls).setUpClass()
632 cls.logger = get_logger(cls.__name__)
633 random.seed(config.rnd_seed)
634 if hasattr(cls, "parallel_handler"):
635 cls.logger.addHandler(cls.parallel_handler)
636 cls.logger.propagate = False
637 cls.set_debug_flags(config.debug)
638 cls.tempdir = cls.get_tempdir()
639 cls.create_file_handler()
640 cls.file_handler.setFormatter(
641 Formatter(fmt="%(asctime)s,%(msecs)03d %(message)s", datefmt="%H:%M:%S")
642 )
643 cls.file_handler.setLevel(DEBUG)
644 cls.logger.addHandler(cls.file_handler)
645 cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__)
646 os.chdir(cls.tempdir)
647 cls.logger.info(
648 "Temporary dir is %s, api socket is %s",
649 cls.tempdir,
650 cls.get_api_sock_path(),
651 )
652 cls.logger.debug("Random seed is %s", config.rnd_seed)
653 cls.setUpConstants()
654 cls.reset_packet_infos()
655 cls._pcaps = []
656 cls._old_pcaps = []
657 cls.verbose = 0
658 cls.vpp_dead = False
659 cls.registry = VppObjectRegistry()
660 cls.vpp_startup_failed = False
661 cls.reporter = KeepAliveReporter()
662 # need to catch exceptions here because if we raise, then the cleanup
663 # doesn't get called and we might end with a zombie vpp
664 try:
665 if cls.debug_attach:
666 cls.attach_vpp()
667 else:
668 cls.run_vpp()
669 cls.reporter.send_keep_alive(cls, "setUpClass")
670 VppTestResult.current_test_case_info = TestCaseInfo(
671 cls.logger, cls.tempdir, cls.vpp.pid, config.vpp
672 )
673 cls.vpp_stdout_deque = deque()
674 cls.vpp_stderr_deque = deque()
675 if not cls.debug_attach:
676 cls.pump_thread_stop_flag = Event()
677 cls.pump_thread_wakeup_pipe = os.pipe()
678 cls.pump_thread = Thread(target=pump_output, args=(cls,))
679 cls.pump_thread.daemon = True
680 cls.pump_thread.start()
681 if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach:
682 cls.vapi_response_timeout = 0
683 cls.vapi = VppPapiProvider(cls.__name__, cls, cls.vapi_response_timeout)
684 if cls.step:
685 hook = hookmodule.StepHook(cls)
686 else:
687 hook = hookmodule.PollHook(cls)
688 cls.vapi.register_hook(hook)
689 cls.statistics = VPPStats(socketname=cls.get_stats_sock_path())
690 try:
691 hook.poll_vpp()
692 except VppDiedError:
693 cls.vpp_startup_failed = True
694 cls.logger.critical(
695 "VPP died shortly after startup, check the"
696 " output to standard error for possible cause"
697 )
698 raise
699 try:
700 cls.vapi.connect()
701 except (vpp_papi.VPPIOError, Exception) as e:
702 cls.logger.debug("Exception connecting to vapi: %s" % e)
703 cls.vapi.disconnect()
704
705 if cls.debug_gdbserver:
706 print(
707 colorize(
708 "You're running VPP inside gdbserver but "
709 "VPP-API connection failed, did you forget "
710 "to 'continue' VPP from within gdb?",
711 RED,
712 )
713 )
714 raise e
715 if cls.debug_attach:
716 last_line = cls.vapi.cli("show thread").split("\n")[-2]
717 cls.vpp_worker_count = int(last_line.split(" ")[0])
718 print("Detected VPP with %s workers." % cls.vpp_worker_count)
719 except vpp_papi.VPPRuntimeError as e:
720 cls.logger.debug("%s" % e)
721 cls.quit()
722 raise e
723 except Exception as e:
724 cls.logger.debug("Exception connecting to VPP: %s" % e)
725 cls.quit()
726 raise e
727
728 @classmethod
729 def _debug_quit(cls):
730 if cls.debug_gdbserver or cls.debug_gdb:
731 try:
732 cls.vpp.poll()
733
734 if cls.vpp.returncode is None:
735 print()
736 print(double_line_delim)
737 print("VPP or GDB server is still running")
738 print(single_line_delim)
739 input(
740 "When done debugging, press ENTER to kill the "
741 "process and finish running the testcase..."
742 )
743 except AttributeError:
744 pass
745
746 @classmethod
747 def quit(cls):
748 """
749 Disconnect vpp-api, kill vpp and cleanup shared memory files
750 """
751 cls._debug_quit()
752
753 # first signal that we want to stop the pump thread, then wake it up
754 if hasattr(cls, "pump_thread_stop_flag"):
755 cls.pump_thread_stop_flag.set()
756 if hasattr(cls, "pump_thread_wakeup_pipe"):
757 os.write(cls.pump_thread_wakeup_pipe[1], b"ding dong wake up")
758 if hasattr(cls, "pump_thread"):
759 cls.logger.debug("Waiting for pump thread to stop")
760 cls.pump_thread.join()
761 if hasattr(cls, "vpp_stderr_reader_thread"):
762 cls.logger.debug("Waiting for stderr pump to stop")
763 cls.vpp_stderr_reader_thread.join()
764
765 if hasattr(cls, "vpp"):
766 if hasattr(cls, "vapi"):
767 cls.logger.debug(cls.vapi.vpp.get_stats())
768 cls.logger.debug("Disconnecting class vapi client on %s", cls.__name__)
769 cls.vapi.disconnect()
770 cls.logger.debug("Deleting class vapi attribute on %s", cls.__name__)
771 del cls.vapi
772 cls.vpp.poll()
773 if not cls.debug_attach and cls.vpp.returncode is None:
774 cls.wait_for_coredump()
775 cls.logger.debug("Sending TERM to vpp")
776 cls.vpp.terminate()
777 cls.logger.debug("Waiting for vpp to die")
778 try:
779 outs, errs = cls.vpp.communicate(timeout=5)
780 except subprocess.TimeoutExpired:
781 cls.vpp.kill()
782 outs, errs = cls.vpp.communicate()
783 cls.logger.debug("Deleting class vpp attribute on %s", cls.__name__)
784 if not cls.debug_attach:
785 cls.vpp.stdout.close()
786 cls.vpp.stderr.close()
787 del cls.vpp
788
789 if cls.vpp_startup_failed:
790 stdout_log = cls.logger.info
791 stderr_log = cls.logger.critical
792 else:
793 stdout_log = cls.logger.info
794 stderr_log = cls.logger.info
795
796 if hasattr(cls, "vpp_stdout_deque"):
797 stdout_log(single_line_delim)
798 stdout_log("VPP output to stdout while running %s:", cls.__name__)
799 stdout_log(single_line_delim)
800 vpp_output = "".join(cls.vpp_stdout_deque)
801 with open(cls.tempdir + "/vpp_stdout.txt", "w") as f:
802 f.write(vpp_output)
803 stdout_log("\n%s", vpp_output)
804 stdout_log(single_line_delim)
805
806 if hasattr(cls, "vpp_stderr_deque"):
807 stderr_log(single_line_delim)
808 stderr_log("VPP output to stderr while running %s:", cls.__name__)
809 stderr_log(single_line_delim)
810 vpp_output = "".join(cls.vpp_stderr_deque)
811 with open(cls.tempdir + "/vpp_stderr.txt", "w") as f:
812 f.write(vpp_output)
813 stderr_log("\n%s", vpp_output)
814 stderr_log(single_line_delim)
815
816 @classmethod
817 def tearDownClass(cls):
818 """Perform final cleanup after running all tests in this test-case"""
819 cls.logger.debug("--- tearDownClass() for %s called ---" % cls.__name__)
820 cls.reporter.send_keep_alive(cls, "tearDownClass")
821 cls.quit()
822 cls.file_handler.close()
823 cls.reset_packet_infos()
824 if config.debug_framework:
825 debug_internal.on_tear_down_class(cls)
826
827 def show_commands_at_teardown(self):
828 """Allow subclass specific teardown logging additions."""
829 self.logger.info("--- No test specific show commands provided. ---")
830
831 def tearDown(self):
832 """Show various debug prints after each test"""
833 self.logger.debug(
834 "--- tearDown() for %s.%s(%s) called ---"
835 % (self.__class__.__name__, self._testMethodName, self._testMethodDoc)
836 )
837
838 try:
839 if not self.vpp_dead:
840 self.logger.debug(self.vapi.cli("show trace max 1000"))
841 self.logger.info(self.vapi.ppcli("show interface"))
842 self.logger.info(self.vapi.ppcli("show hardware"))
843 self.logger.info(self.statistics.set_errors_str())
844 self.logger.info(self.vapi.ppcli("show run"))
845 self.logger.info(self.vapi.ppcli("show log"))
846 self.logger.info(self.vapi.ppcli("show bihash"))
847 self.logger.info("Logging testcase specific show commands.")
848 self.show_commands_at_teardown()
849 if self.remove_configured_vpp_objects_on_tear_down:
850 self.registry.remove_vpp_config(self.logger)
851 # Save/Dump VPP api trace log
852 m = self._testMethodName
853 api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
854 tmp_api_trace = "/tmp/%s" % api_trace
855 vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
856 self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
857 self.logger.info("Moving %s to %s\n" % (tmp_api_trace, vpp_api_trace_log))
858 os.rename(tmp_api_trace, vpp_api_trace_log)
859 except VppTransportSocketIOError:
860 self.logger.debug(
861 "VppTransportSocketIOError: Vpp dead. Cannot log show commands."
862 )
863 self.vpp_dead = True
864 else:
865 self.registry.unregister_all(self.logger)
866
867 def setUp(self):
868 """Clear trace before running each test"""
869 super(VppTestCase, self).setUp()
870 self.reporter.send_keep_alive(self)
871 if self.vpp_dead:
872 raise VppDiedError(
873 rv=None,
874 testcase=self.__class__.__name__,
875 method_name=self._testMethodName,
876 )
877 self.sleep(0.1, "during setUp")
878 self.vpp_stdout_deque.append(
879 "--- test setUp() for %s.%s(%s) starts here ---\n"
880 % (self.__class__.__name__, self._testMethodName, self._testMethodDoc)
881 )
882 self.vpp_stderr_deque.append(
883 "--- test setUp() for %s.%s(%s) starts here ---\n"
884 % (self.__class__.__name__, self._testMethodName, self._testMethodDoc)
885 )
886 self.vapi.cli("clear trace")
887 # store the test instance inside the test class - so that objects
888 # holding the class can access instance methods (like assertEqual)
889 type(self).test_instance = self
890
891 @classmethod
892 def pg_enable_capture(cls, interfaces=None):
893 """
894 Enable capture on packet-generator interfaces
895
896 :param interfaces: iterable interface indexes (if None,
897 use self.pg_interfaces)
898
899 """
900 if interfaces is None:
901 interfaces = cls.pg_interfaces
902 for i in interfaces:
903 i.enable_capture()
904
905 @classmethod
906 def register_pcap(cls, intf, worker):
907 """Register a pcap in the testclass"""
908 # add to the list of captures with current timestamp
909 cls._pcaps.append((intf, worker))
910
911 @classmethod
912 def get_vpp_time(cls):
913 # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
914 # returns float("2.190522")
915 timestr = cls.vapi.cli("show clock")
916 head, sep, tail = timestr.partition(",")
917 head, sep, tail = head.partition("Time now")
918 return float(tail)
919
920 @classmethod
921 def sleep_on_vpp_time(cls, sec):
922 """Sleep according to time in VPP world"""
923 # On a busy system with many processes
924 # we might end up with VPP time being slower than real world
925 # So take that into account when waiting for VPP to do something
926 start_time = cls.get_vpp_time()
927 while cls.get_vpp_time() - start_time < sec:
928 cls.sleep(0.1)
929
930 @classmethod
931 def pg_start(cls, trace=True):
932 """Enable the PG, wait till it is done, then clean up"""
933 for (intf, worker) in cls._old_pcaps:
934 intf.handle_old_pcap_file(intf.get_in_path(worker), intf.in_history_counter)
935 cls._old_pcaps = []
936 if trace:
937 cls.vapi.cli("clear trace")
938 cls.vapi.cli("trace add pg-input 1000")
939 cls.vapi.cli("packet-generator enable")
940 # PG, when starts, runs to completion -
941 # so let's avoid a race condition,
942 # and wait a little till it's done.
943 # Then clean it up - and then be gone.
944 deadline = time.time() + 300
945 while cls.vapi.cli("show packet-generator").find("Yes") != -1:
946 cls.sleep(0.01) # yield
947 if time.time() > deadline:
948 cls.logger.error("Timeout waiting for pg to stop")
949 break
950 for intf, worker in cls._pcaps:
951 cls.vapi.cli("packet-generator delete %s" % intf.get_cap_name(worker))
952 cls._old_pcaps = cls._pcaps
953 cls._pcaps = []
954
955 @classmethod
956 def create_pg_interfaces_internal(cls, interfaces, gso=0, gso_size=0, mode=None):
957 """
958 Create packet-generator interfaces.
959
960 :param interfaces: iterable indexes of the interfaces.
961 :returns: List of created interfaces.
962
963 """
964 result = []
965 for i in interfaces:
966 intf = VppPGInterface(cls, i, gso, gso_size, mode)
967 setattr(cls, intf.name, intf)
968 result.append(intf)
969 cls.pg_interfaces = result
970 return result
971
972 @classmethod
973 def create_pg_ip4_interfaces(cls, interfaces, gso=0, gso_size=0):
974 pgmode = VppEnum.vl_api_pg_interface_mode_t
975 return cls.create_pg_interfaces_internal(
976 interfaces, gso, gso_size, pgmode.PG_API_MODE_IP4
977 )
978
979 @classmethod
980 def create_pg_ip6_interfaces(cls, interfaces, gso=0, gso_size=0):
981 pgmode = VppEnum.vl_api_pg_interface_mode_t
982 return cls.create_pg_interfaces_internal(
983 interfaces, gso, gso_size, pgmode.PG_API_MODE_IP6
984 )
985
986 @classmethod
987 def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
988 pgmode = VppEnum.vl_api_pg_interface_mode_t
989 return cls.create_pg_interfaces_internal(
990 interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET
991 )
992
993 @classmethod
994 def create_pg_ethernet_interfaces(cls, interfaces, gso=0, gso_size=0):
995 pgmode = VppEnum.vl_api_pg_interface_mode_t
996 return cls.create_pg_interfaces_internal(
997 interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET
998 )
999
1000 @classmethod
1001 def create_loopback_interfaces(cls, count):
1002 """
1003 Create loopback interfaces.
1004
1005 :param count: number of interfaces created.
1006 :returns: List of created interfaces.
1007 """
1008 result = [VppLoInterface(cls) for i in range(count)]
1009 for intf in result:
1010 setattr(cls, intf.name, intf)
1011 cls.lo_interfaces = result
1012 return result
1013
1014 @classmethod
1015 def create_bvi_interfaces(cls, count):
1016 """
1017 Create BVI interfaces.
1018
1019 :param count: number of interfaces created.
1020 :returns: List of created interfaces.
1021 """
1022 result = [VppBviInterface(cls) for i in range(count)]
1023 for intf in result:
1024 setattr(cls, intf.name, intf)
1025 cls.bvi_interfaces = result
1026 return result
1027
1028 @classmethod
1029 def reset_packet_infos(cls):
1030 """Reset the list of packet info objects and packet counts to zero"""
1031 cls._packet_infos = {}
1032 cls._packet_count_for_dst_if_idx = {}
1033
1034 @classmethod
1035 def create_packet_info(cls, src_if, dst_if):
1036 """
1037 Create packet info object containing the source and destination indexes
1038 and add it to the testcase's packet info list
1039
1040 :param VppInterface src_if: source interface
1041 :param VppInterface dst_if: destination interface
1042
1043 :returns: _PacketInfo object
1044
1045 """
1046 info = _PacketInfo()
1047 info.index = len(cls._packet_infos)
1048 info.src = src_if.sw_if_index
1049 info.dst = dst_if.sw_if_index
1050 if isinstance(dst_if, VppSubInterface):
1051 dst_idx = dst_if.parent.sw_if_index
1052 else:
1053 dst_idx = dst_if.sw_if_index
1054 if dst_idx in cls._packet_count_for_dst_if_idx:
1055 cls._packet_count_for_dst_if_idx[dst_idx] += 1
1056 else:
1057 cls._packet_count_for_dst_if_idx[dst_idx] = 1
1058 cls._packet_infos[info.index] = info
1059 return info
1060
1061 @staticmethod
1062 def info_to_payload(info):
1063 """
1064 Convert _PacketInfo object to packet payload
1065
1066 :param info: _PacketInfo object
1067
1068 :returns: string containing serialized data from packet info
1069 """
1070
1071 # retrieve payload, currently 18 bytes (4 x ints + 1 short)
1072 return pack("iiiih", info.index, info.src, info.dst, info.ip, info.proto)
1073
1074 def get_next_packet_info(self, info):
1075 """
1076 Iterate over the packet info list stored in the testcase
1077 Start iteration with first element if info is None
1078 Continue based on index in info if info is specified
1079
1080 :param info: info or None
1081 :returns: next info in list or None if no more infos
1082 """
1083 if info is None:
1084 next_index = 0
1085 else:
1086 next_index = info.index + 1
1087 if next_index == len(self._packet_infos):
1088 return None
1089 else:
1090 return self._packet_infos[next_index]
1091
1092 def get_next_packet_info_for_interface(self, src_index, info):
1093 """
1094 Search the packet info list for the next packet info with same source
1095 interface index
1096
1097 :param src_index: source interface index to search for
1098 :param info: packet info - where to start the search
1099 :returns: packet info or None
1100
1101 """
1102 while True:
1103 info = self.get_next_packet_info(info)
1104 if info is None:
1105 return None
1106 if info.src == src_index:
1107 return info
1108
1109 def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
1110 """
1111 Search the packet info list for the next packet info with same source
1112 and destination interface indexes
1113
1114 :param src_index: source interface index to search for
1115 :param dst_index: destination interface index to search for
1116 :param info: packet info - where to start the search
1117 :returns: packet info or None
1118
1119 """
1120 while True:
1121 info = self.get_next_packet_info_for_interface(src_index, info)
1122 if info is None:
1123 return None
1124 if info.dst == dst_index:
1125 return info
1126
1127 def assert_equal(self, real_value, expected_value, name_or_class=None):
1128 if name_or_class is None:
1129 self.assertEqual(real_value, expected_value)
1130 return
1131 try:
1132 msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
1133 msg = msg % (
1134 getdoc(name_or_class).strip(),
1135 real_value,
1136 str(name_or_class(real_value)),
1137 expected_value,
1138 str(name_or_class(expected_value)),
1139 )
1140 except Exception:
1141 msg = "Invalid %s: %s does not match expected value %s" % (
1142 name_or_class,
1143 real_value,
1144 expected_value,
1145 )
1146
1147 self.assertEqual(real_value, expected_value, msg)
1148
1149 def assert_in_range(self, real_value, expected_min, expected_max, name=None):
1150 if name is None:
1151 msg = None
1152 else:
1153 msg = "Invalid %s: %s out of range <%s,%s>" % (
1154 name,
1155 real_value,
1156 expected_min,
1157 expected_max,
1158 )
1159 self.assertTrue(expected_min <= real_value <= expected_max, msg)
1160
1161 def assert_ip_checksum_valid(self, received_packet, ignore_zero_checksum=False):
1162 self.assert_checksum_valid(
1163 received_packet, "IP", ignore_zero_checksum=ignore_zero_checksum
1164 )
1165
1166 def assert_tcp_checksum_valid(self, received_packet, ignore_zero_checksum=False):
1167 self.assert_checksum_valid(
1168 received_packet, "TCP", ignore_zero_checksum=ignore_zero_checksum
1169 )
1170
1171 def assert_udp_checksum_valid(self, received_packet, ignore_zero_checksum=True):
1172 self.assert_checksum_valid(
1173 received_packet, "UDP", ignore_zero_checksum=ignore_zero_checksum
1174 )
1175
1176 def assert_icmp_checksum_valid(self, received_packet):
1177 self.assert_checksum_valid(received_packet, "ICMP")
1178 self.assert_embedded_icmp_checksum_valid(received_packet)
1179
1180 def get_counter(self, counter):
1181 if counter.startswith("/"):
1182 counter_value = self.statistics.get_counter(counter)
1183 else:
1184 counters = self.vapi.cli("sh errors").split("\n")
1185 counter_value = 0
1186 for i in range(1, len(counters) - 1):
1187 results = counters[i].split()
1188 if results[1] == counter:
1189 counter_value = int(results[0])
1190 break
1191 return counter_value
1192
1193 def assert_counter_equal(self, counter, expected_value, thread=None, index=0):
1194 c = self.get_counter(counter)
1195 if thread is not None:
1196 c = c[thread][index]
1197 else:
1198 c = sum(x[index] for x in c)
1199 self.assert_equal(c, expected_value, "counter `%s'" % counter)
1200
1201 def assert_packet_counter_equal(self, counter, expected_value):
1202 counter_value = self.get_counter(counter)
1203 self.assert_equal(
1204 counter_value, expected_value, "packet counter `%s'" % counter
1205 )
1206
1207 def assert_error_counter_equal(self, counter, expected_value):
1208 counter_value = self.statistics[counter].sum()
1209 self.assert_equal(counter_value, expected_value, "error counter `%s'" % counter)
1210
1211 @classmethod
1212 def sleep(cls, timeout, remark=None):
1213
1214 # /* Allow sleep(0) to maintain win32 semantics, and as decreed
1215 # * by Guido, only the main thread can be interrupted.
1216 # */
1217 # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
1218 if timeout == 0:
1219 # yield quantum
1220 if hasattr(os, "sched_yield"):
1221 os.sched_yield()
1222 else:
1223 time.sleep(0)
1224 return
1225
1226 cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
1227 before = time.time()
1228 time.sleep(timeout)
1229 after = time.time()
1230 if after - before > 2 * timeout:
1231 cls.logger.error(
1232 "unexpected self.sleep() result - slept for %es instead of ~%es!",
1233 after - before,
1234 timeout,
1235 )
1236
1237 cls.logger.debug(
1238 "Finished sleep (%s) - slept %es (wanted %es)",
1239 remark,
1240 after - before,
1241 timeout,
1242 )
1243
1244 def virtual_sleep(self, timeout, remark=None):
1245 self.logger.debug("Moving VPP time by %s (%s)", timeout, remark)
1246 self.vapi.cli("set clock adjust %s" % timeout)
1247
1248 def pg_send(self, intf, pkts, worker=None, trace=True):
1249 intf.add_stream(pkts, worker=worker)
1250 self.pg_enable_capture(self.pg_interfaces)
1251 self.pg_start(trace=trace)
1252
1253 def snapshot_stats(self, stats_diff):
1254 """Return snapshot of interesting stats based on diff dictionary."""
1255 stats_snapshot = {}
1256 for sw_if_index in stats_diff:
1257 for counter in stats_diff[sw_if_index]:
1258 stats_snapshot[counter] = self.statistics[counter]
1259 self.logger.debug(f"Took statistics stats_snapshot: {stats_snapshot}")
1260 return stats_snapshot
1261
1262 def compare_stats_with_snapshot(self, stats_diff, stats_snapshot):
1263 """Assert appropriate difference between current stats and snapshot."""
1264 for sw_if_index in stats_diff:
1265 for cntr, diff in stats_diff[sw_if_index].items():
1266 if sw_if_index == "err":
1267 self.assert_equal(
1268 self.statistics[cntr].sum(),
1269 stats_snapshot[cntr].sum() + diff,
1270 f"'{cntr}' counter value (previous value: "
1271 f"{stats_snapshot[cntr].sum()}, "
1272 f"expected diff: {diff})",
1273 )
1274 else:
1275 try:
1276 self.assert_equal(
1277 self.statistics[cntr][:, sw_if_index].sum(),
1278 stats_snapshot[cntr][:, sw_if_index].sum() + diff,
1279 f"'{cntr}' counter value (previous value: "
1280 f"{stats_snapshot[cntr][:, sw_if_index].sum()}, "
1281 f"expected diff: {diff})",
1282 )
1283 except IndexError:
1284 # if diff is 0, then this most probably a case where
1285 # test declares multiple interfaces but traffic hasn't
1286 # passed through this one yet - which means the counter
1287 # value is 0 and can be ignored
1288 if 0 != diff:
1289 raise
1290
1291 def send_and_assert_no_replies(
1292 self, intf, pkts, remark="", timeout=None, stats_diff=None, trace=True, msg=None
1293 ):
1294 if stats_diff:
1295 stats_snapshot = self.snapshot_stats(stats_diff)
1296
1297 self.pg_send(intf, pkts)
1298
1299 try:
1300 if not timeout:
1301 timeout = 1
1302 for i in self.pg_interfaces:
1303 i.assert_nothing_captured(timeout=timeout, remark=remark)
1304 timeout = 0.1
1305 finally:
1306 if trace:
1307 if msg:
1308 self.logger.debug(f"send_and_assert_no_replies: {msg}")
1309 self.logger.debug(self.vapi.cli("show trace"))
1310
1311 if stats_diff:
1312 self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
1313
1314 def send_and_expect_load_balancing(
1315 self, input, pkts, outputs, worker=None, trace=True
1316 ):
1317 self.pg_send(input, pkts, worker=worker, trace=trace)
1318 rxs = []
1319 for oo in outputs:
1320 rx = oo._get_capture(1)
1321 self.assertNotEqual(0, len(rx))
1322 rxs.append(rx)
1323 if trace:
1324 self.logger.debug(self.vapi.cli("show trace"))
1325 return rxs
1326
1327 def send_and_expect_some(self, intf, pkts, output, worker=None, trace=True):
1328 self.pg_send(intf, pkts, worker=worker, trace=trace)
1329 rx = output._get_capture(1)
1330 if trace:
1331 self.logger.debug(self.vapi.cli("show trace"))
1332 self.assertTrue(len(rx) > 0)
1333 self.assertTrue(len(rx) < len(pkts))
1334 return rx
1335
1336 def send_and_expect_only(self, intf, pkts, output, timeout=None, stats_diff=None):
1337 if stats_diff:
1338 stats_snapshot = self.snapshot_stats(stats_diff)
1339
1340 self.pg_send(intf, pkts)
1341 rx = output.get_capture(len(pkts))
1342 outputs = [output]
1343 if not timeout:
1344 timeout = 1
1345 for i in self.pg_interfaces:
1346 if i not in outputs:
1347 i.assert_nothing_captured(timeout=timeout)
1348 timeout = 0.1
1349
1350 if stats_diff:
1351 self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
1352
1353 return rx
1354
1355
1356def get_testcase_doc_name(test):
1357 return getdoc(test.__class__).splitlines()[0]
1358
1359
1360def get_test_description(descriptions, test):
1361 short_description = test.shortDescription()
1362 if descriptions and short_description:
1363 return short_description
1364 else:
1365 return str(test)
1366
1367
1368class TestCaseInfo(object):
1369 def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
1370 self.logger = logger
1371 self.tempdir = tempdir
1372 self.vpp_pid = vpp_pid
1373 self.vpp_bin_path = vpp_bin_path
1374 self.core_crash_test = None
1375
1376
1377class VppTestResult(unittest.TestResult):
1378 """
1379 @property result_string
1380 String variable to store the test case result string.
1381 @property errors
1382 List variable containing 2-tuples of TestCase instances and strings
1383 holding formatted tracebacks. Each tuple represents a test which
1384 raised an unexpected exception.
1385 @property failures
1386 List variable containing 2-tuples of TestCase instances and strings
1387 holding formatted tracebacks. Each tuple represents a test where
1388 a failure was explicitly signalled using the TestCase.assert*()
1389 methods.
1390 """
1391
1392 failed_test_cases_info = set()
1393 core_crash_test_cases_info = set()
1394 current_test_case_info = None
1395
1396 def __init__(self, stream=None, descriptions=None, verbosity=None, runner=None):
1397 """
1398 :param stream File descriptor to store where to report test results.
1399 Set to the standard error stream by default.
1400 :param descriptions Boolean variable to store information if to use
1401 test case descriptions.
1402 :param verbosity Integer variable to store required verbosity level.
1403 """
1404 super(VppTestResult, self).__init__(stream, descriptions, verbosity)
1405 self.stream = stream
1406 self.descriptions = descriptions
1407 self.verbosity = verbosity
1408 self.result_string = None
1409 self.runner = runner
1410 self.printed = []
1411
1412 def addSuccess(self, test):
1413 """
1414 Record a test succeeded result
1415
1416 :param test:
1417
1418 """
1419 if self.current_test_case_info:
1420 self.current_test_case_info.logger.debug(
1421 "--- addSuccess() %s.%s(%s) called"
1422 % (test.__class__.__name__, test._testMethodName, test._testMethodDoc)
1423 )
1424 unittest.TestResult.addSuccess(self, test)
1425 self.result_string = colorize("OK", GREEN)
1426
1427 self.send_result_through_pipe(test, PASS)
1428
1429 def addSkip(self, test, reason):
1430 """
1431 Record a test skipped.
1432
1433 :param test:
1434 :param reason:
1435
1436 """
1437 if self.current_test_case_info:
1438 self.current_test_case_info.logger.debug(
1439 "--- addSkip() %s.%s(%s) called, reason is %s"
1440 % (
1441 test.__class__.__name__,
1442 test._testMethodName,
1443 test._testMethodDoc,
1444 reason,
1445 )
1446 )
1447 unittest.TestResult.addSkip(self, test, reason)
1448 self.result_string = colorize("SKIP", YELLOW)
1449
1450 if reason == "not enough cpus":
1451 self.send_result_through_pipe(test, SKIP_CPU_SHORTAGE)
1452 else:
1453 self.send_result_through_pipe(test, SKIP)
1454
1455 def symlink_failed(self):
1456 if self.current_test_case_info:
1457 try:
1458 failed_dir = config.failed_dir
1459 link_path = os.path.join(
1460 failed_dir,
1461 "%s-FAILED" % os.path.basename(self.current_test_case_info.tempdir),
1462 )
1463
1464 self.current_test_case_info.logger.debug(
1465 "creating a link to the failed test"
1466 )
1467 self.current_test_case_info.logger.debug(
1468 "os.symlink(%s, %s)"
1469 % (self.current_test_case_info.tempdir, link_path)
1470 )
1471 if os.path.exists(link_path):
1472 self.current_test_case_info.logger.debug("symlink already exists")
1473 else:
1474 os.symlink(self.current_test_case_info.tempdir, link_path)
1475
1476 except Exception as e:
1477 self.current_test_case_info.logger.error(e)
1478
1479 def send_result_through_pipe(self, test, result):
1480 if hasattr(self, "test_framework_result_pipe"):
1481 pipe = self.test_framework_result_pipe
1482 if pipe:
1483 pipe.send((test.id(), result))
1484
1485 def log_error(self, test, err, fn_name):
1486 if self.current_test_case_info:
1487 if isinstance(test, unittest.suite._ErrorHolder):
1488 test_name = test.description
1489 else:
1490 test_name = "%s.%s(%s)" % (
1491 test.__class__.__name__,
1492 test._testMethodName,
1493 test._testMethodDoc,
1494 )
1495 self.current_test_case_info.logger.debug(
1496 "--- %s() %s called, err is %s" % (fn_name, test_name, err)
1497 )
1498 self.current_test_case_info.logger.debug(
1499 "formatted exception is:\n%s" % "".join(format_exception(*err))
1500 )
1501
1502 def add_error(self, test, err, unittest_fn, error_type):
1503 if error_type == FAIL:
1504 self.log_error(test, err, "addFailure")
1505 error_type_str = colorize("FAIL", RED)
1506 elif error_type == ERROR:
1507 self.log_error(test, err, "addError")
1508 error_type_str = colorize("ERROR", RED)
1509 else:
1510 raise Exception(
1511 "Error type %s cannot be used to record an "
1512 "error or a failure" % error_type
1513 )
1514
1515 unittest_fn(self, test, err)
1516 if self.current_test_case_info:
1517 self.result_string = "%s [ temp dir used by test case: %s ]" % (
1518 error_type_str,
1519 self.current_test_case_info.tempdir,
1520 )
1521 self.symlink_failed()
1522 self.failed_test_cases_info.add(self.current_test_case_info)
1523 if is_core_present(self.current_test_case_info.tempdir):
1524 if not self.current_test_case_info.core_crash_test:
1525 if isinstance(test, unittest.suite._ErrorHolder):
1526 test_name = str(test)
1527 else:
1528 test_name = "'{!s}' ({!s})".format(
1529 get_testcase_doc_name(test), test.id()
1530 )
1531 self.current_test_case_info.core_crash_test = test_name
1532 self.core_crash_test_cases_info.add(self.current_test_case_info)
1533 else:
1534 self.result_string = "%s [no temp dir]" % error_type_str
1535
1536 self.send_result_through_pipe(test, error_type)
1537
1538 def addFailure(self, test, err):
1539 """
1540 Record a test failed result
1541
1542 :param test:
1543 :param err: error message
1544
1545 """
1546 self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
1547
1548 def addError(self, test, err):
1549 """
1550 Record a test error result
1551
1552 :param test:
1553 :param err: error message
1554
1555 """
1556 self.add_error(test, err, unittest.TestResult.addError, ERROR)
1557
1558 def getDescription(self, test):
1559 """
1560 Get test description
1561
1562 :param test:
1563 :returns: test description
1564
1565 """
1566 return get_test_description(self.descriptions, test)
1567
1568 def startTest(self, test):
1569 """
1570 Start a test
1571
1572 :param test:
1573
1574 """
1575
1576 def print_header(test):
1577 if test.__class__ in self.printed:
1578 return
1579
1580 test_doc = getdoc(test)
1581 if not test_doc:
1582 raise Exception("No doc string for test '%s'" % test.id())
1583
1584 test_title = test_doc.splitlines()[0].rstrip()
1585 test_title = colorize(test_title, GREEN)
1586 if test.is_tagged_run_solo():
1587 test_title = colorize(f"SOLO RUN: {test_title}", YELLOW)
1588
1589 # This block may overwrite the colorized title above,
1590 # but we want this to stand out and be fixed
1591 if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
1592 test_title = colorize(f"FIXME with VPP workers: {test_title}", RED)
1593
1594 if test.has_tag(TestCaseTag.FIXME_ASAN):
1595 test_title = colorize(f"FIXME with ASAN: {test_title}", RED)
1596 test.skip_fixme_asan()
1597
1598 if hasattr(test, "vpp_worker_count"):
1599 if test.vpp_worker_count == 0:
1600 test_title += " [main thread only]"
1601 elif test.vpp_worker_count == 1:
1602 test_title += " [1 worker thread]"
1603 else:
1604 test_title += f" [{test.vpp_worker_count} worker threads]"
1605
1606 if test.__class__.skipped_due_to_cpu_lack:
1607 test_title = colorize(
1608 f"{test_title} [skipped - not enough cpus, "
1609 f"required={test.__class__.get_cpus_required()}, "
1610 f"available={max_vpp_cpus}]",
1611 YELLOW,
1612 )
1613
1614 print(double_line_delim)
1615 print(test_title)
1616 print(double_line_delim)
1617 self.printed.append(test.__class__)
1618
1619 print_header(test)
1620 self.start_test = time.time()
1621 unittest.TestResult.startTest(self, test)
1622 if self.verbosity > 0:
1623 self.stream.writeln("Starting " + self.getDescription(test) + " ...")
1624 self.stream.writeln(single_line_delim)
1625
1626 def stopTest(self, test):
1627 """
1628 Called when the given test has been run
1629
1630 :param test:
1631
1632 """
1633 unittest.TestResult.stopTest(self, test)
1634
1635 if self.verbosity > 0:
1636 self.stream.writeln(single_line_delim)
1637 self.stream.writeln(
1638 "%-73s%s" % (self.getDescription(test), self.result_string)
1639 )
1640 self.stream.writeln(single_line_delim)
1641 else:
1642 self.stream.writeln(
1643 "%-68s %4.2f %s"
1644 % (
1645 self.getDescription(test),
1646 time.time() - self.start_test,
1647 self.result_string,
1648 )
1649 )
1650
1651 self.send_result_through_pipe(test, TEST_RUN)
1652
1653 def printErrors(self):
1654 """
1655 Print errors from running the test case
1656 """
1657 if len(self.errors) > 0 or len(self.failures) > 0:
1658 self.stream.writeln()
1659 self.printErrorList("ERROR", self.errors)
1660 self.printErrorList("FAIL", self.failures)
1661
1662 # ^^ that is the last output from unittest before summary
1663 if not self.runner.print_summary:
1664 devnull = unittest.runner._WritelnDecorator(open(os.devnull, "w"))
1665 self.stream = devnull
1666 self.runner.stream = devnull
1667
1668 def printErrorList(self, flavour, errors):
1669 """
1670 Print error list to the output stream together with error type
1671 and test case description.
1672
1673 :param flavour: error type
1674 :param errors: iterable errors
1675
1676 """
1677 for test, err in errors:
1678 self.stream.writeln(double_line_delim)
1679 self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
1680 self.stream.writeln(single_line_delim)
1681 self.stream.writeln("%s" % err)
1682
1683
1684class VppTestRunner(unittest.TextTestRunner):
1685 """
1686 A basic test runner implementation which prints results to standard error.
1687 """
1688
1689 @property
1690 def resultclass(self):
1691 """Class maintaining the results of the tests"""
1692 return VppTestResult
1693
1694 def __init__(
1695 self,
1696 keep_alive_pipe=None,
1697 descriptions=True,
1698 verbosity=1,
1699 result_pipe=None,
1700 failfast=False,
1701 buffer=False,
1702 resultclass=None,
1703 print_summary=True,
1704 **kwargs,
1705 ):
1706 # ignore stream setting here, use hard-coded stdout to be in sync
1707 # with prints from VppTestCase methods ...
1708 super(VppTestRunner, self).__init__(
1709 sys.stdout, descriptions, verbosity, failfast, buffer, resultclass, **kwargs
1710 )
1711 KeepAliveReporter.pipe = keep_alive_pipe
1712
1713 self.orig_stream = self.stream
1714 self.resultclass.test_framework_result_pipe = result_pipe
1715
1716 self.print_summary = print_summary
1717
1718 def _makeResult(self):
1719 return self.resultclass(self.stream, self.descriptions, self.verbosity, self)
1720
1721 def run(self, test):
1722 """
1723 Run the tests
1724
1725 :param test:
1726
1727 """
1728 faulthandler.enable() # emit stack trace to stderr if killed by signal
1729
1730 result = super(VppTestRunner, self).run(test)
1731 if not self.print_summary:
1732 self.stream = self.orig_stream
1733 result.stream = self.orig_stream
1734 return result
1735
1736
1737class Worker(Thread):
1738 def __init__(self, executable_args, logger, env=None, *args, **kwargs):
1739 super(Worker, self).__init__(*args, **kwargs)
1740 self.logger = logger
1741 self.args = executable_args
1742 if hasattr(self, "testcase") and self.testcase.debug_all:
1743 if self.testcase.debug_gdbserver:
1744 self.args = [
1745 "/usr/bin/gdbserver",
1746 "localhost:{port}".format(port=self.testcase.gdbserver_port),
1747 ] + args
1748 elif self.testcase.debug_gdb and hasattr(self, "wait_for_gdb"):
1749 self.args.append(self.wait_for_gdb)
1750 self.app_bin = executable_args[0]
1751 self.app_name = os.path.basename(self.app_bin)
1752 if hasattr(self, "role"):
1753 self.app_name += " {role}".format(role=self.role)
1754 self.process = None
1755 self.result = None
1756 env = {} if env is None else env
1757 self.env = copy.deepcopy(env)
1758
1759 def wait_for_enter(self):
1760 if not hasattr(self, "testcase"):
1761 return
1762 if self.testcase.debug_all and self.testcase.debug_gdbserver:
1763 print()
1764 print(double_line_delim)
1765 print(
1766 "Spawned GDB Server for '{app}' with PID: {pid}".format(
1767 app=self.app_name, pid=self.process.pid
1768 )
1769 )
1770 elif self.testcase.debug_all and self.testcase.debug_gdb:
1771 print()
1772 print(double_line_delim)
1773 print(
1774 "Spawned '{app}' with PID: {pid}".format(
1775 app=self.app_name, pid=self.process.pid
1776 )
1777 )
1778 else:
1779 return
1780 print(single_line_delim)
1781 print("You can debug '{app}' using:".format(app=self.app_name))
1782 if self.testcase.debug_gdbserver:
1783 print(
1784 "sudo gdb "
1785 + self.app_bin
1786 + " -ex 'target remote localhost:{port}'".format(
1787 port=self.testcase.gdbserver_port
1788 )
1789 )
1790 print(
1791 "Now is the time to attach gdb by running the above "
1792 "command, set up breakpoints etc., then resume from "
1793 "within gdb by issuing the 'continue' command"
1794 )
1795 self.testcase.gdbserver_port += 1
1796 elif self.testcase.debug_gdb:
1797 print(
1798 "sudo gdb "
1799 + self.app_bin
1800 + " -ex 'attach {pid}'".format(pid=self.process.pid)
1801 )
1802 print(
1803 "Now is the time to attach gdb by running the above "
1804 "command and set up breakpoints etc., then resume from"
1805 " within gdb by issuing the 'continue' command"
1806 )
1807 print(single_line_delim)
1808 input("Press ENTER to continue running the testcase...")
1809
1810 def run(self):
1811 executable = self.args[0]
1812 if not os.path.exists(executable) or not os.access(
1813 executable, os.F_OK | os.X_OK
1814 ):
1815 # Exit code that means some system file did not exist,
1816 # could not be opened, or had some other kind of error.
1817 self.result = os.EX_OSFILE
1818 raise EnvironmentError(
1819 "executable '%s' is not found or executable." % executable
1820 )
1821 self.logger.debug(
1822 "Running executable '{app}': '{cmd}'".format(
1823 app=self.app_name, cmd=" ".join(self.args)
1824 )
1825 )
1826 env = os.environ.copy()
1827 env.update(self.env)
1828 env["CK_LOG_FILE_NAME"] = "-"
1829 self.process = subprocess.Popen(
1830 ["stdbuf", "-o0", "-e0"] + self.args,
1831 shell=False,
1832 env=env,
1833 preexec_fn=os.setpgrp,
1834 stdout=subprocess.PIPE,
1835 stderr=subprocess.PIPE,
1836 )
1837 self.wait_for_enter()
1838 out, err = self.process.communicate()
1839 self.logger.debug("Finished running `{app}'".format(app=self.app_name))
1840 self.logger.info("Return code is `%s'" % self.process.returncode)
1841 self.logger.info(single_line_delim)
1842 self.logger.info(
1843 "Executable `{app}' wrote to stdout:".format(app=self.app_name)
1844 )
1845 self.logger.info(single_line_delim)
1846 self.logger.info(out.decode("utf-8"))
1847 self.logger.info(single_line_delim)
1848 self.logger.info(
1849 "Executable `{app}' wrote to stderr:".format(app=self.app_name)
1850 )
1851 self.logger.info(single_line_delim)
1852 self.logger.info(err.decode("utf-8"))
1853 self.logger.info(single_line_delim)
1854 self.result = self.process.returncode
1855
1856
1857if __name__ == "__main__":
1858 pass