Damjan Marion | f56b77a | 2016-10-03 19:44:57 +0200 | [diff] [blame] | 1 | #!/usr/bin/env python |
| 2 | |
Klement Sekera | 993e0ed | 2017-03-16 09:14:59 +0100 | [diff] [blame] | 3 | import sys |
Dave Wallace | e2efd12 | 2017-09-30 22:04:21 -0400 | [diff] [blame] | 4 | import shutil |
Damjan Marion | f56b77a | 2016-10-03 19:44:57 +0200 | [diff] [blame] | 5 | import os |
Klement Sekera | 909a6a1 | 2017-08-08 04:33:53 +0200 | [diff] [blame] | 6 | import select |
Damjan Marion | f56b77a | 2016-10-03 19:44:57 +0200 | [diff] [blame] | 7 | import unittest |
Klement Sekera | 993e0ed | 2017-03-16 09:14:59 +0100 | [diff] [blame] | 8 | import argparse |
Klement Sekera | 545be52 | 2018-02-16 19:25:06 +0100 | [diff] [blame] | 9 | import time |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 10 | import threading |
| 11 | import signal |
| 12 | import psutil |
| 13 | from multiprocessing import Process, Pipe, cpu_count |
| 14 | from multiprocessing.queues import Queue |
| 15 | from multiprocessing.managers import BaseManager |
| 16 | from framework import VppTestRunner, running_extended_tests, VppTestCase, \ |
| 17 | get_testcase_doc_name, get_test_description |
Klement Sekera | 909a6a1 | 2017-08-08 04:33:53 +0200 | [diff] [blame] | 18 | from debug import spawn_gdb |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 19 | from log import get_parallel_logger, double_line_delim, RED, YELLOW, GREEN, \ |
| 20 | colorize |
Klement Sekera | fcbf444 | 2017-08-17 07:38:42 +0200 | [diff] [blame] | 21 | from discover_tests import discover_tests |
Klement Sekera | 9b6ece7 | 2018-03-23 10:50:11 +0100 | [diff] [blame] | 22 | from subprocess import check_output, CalledProcessError |
Andrew Yourtchenko | 57612eb | 2018-03-28 15:32:10 +0200 | [diff] [blame] | 23 | from util import check_core_path |
Klement Sekera | 993e0ed | 2017-03-16 09:14:59 +0100 | [diff] [blame] | 24 | |
Klement Sekera | 0574226 | 2018-03-14 18:14:49 +0100 | [diff] [blame] | 25 | # timeout which controls how long the child has to finish after seeing |
| 26 | # a core dump in test temporary directory. If this is exceeded, parent assumes |
| 27 | # that child process is stuck (e.g. waiting for shm mutex, which will never |
| 28 | # get unlocked) and kill the child |
| 29 | core_timeout = 3 |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 30 | min_req_shm = 536870912 # min 512MB shm required |
| 31 | # 128MB per extra process |
| 32 | shm_per_process = 134217728 |
Klement Sekera | 0574226 | 2018-03-14 18:14:49 +0100 | [diff] [blame] | 33 | |
Klement Sekera | 909a6a1 | 2017-08-08 04:33:53 +0200 | [diff] [blame] | 34 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 35 | class StreamQueue(Queue): |
| 36 | def write(self, msg): |
| 37 | self.put(msg) |
| 38 | |
| 39 | def flush(self): |
| 40 | sys.__stdout__.flush() |
| 41 | sys.__stderr__.flush() |
| 42 | |
| 43 | def fileno(self): |
| 44 | return self._writer.fileno() |
| 45 | |
| 46 | |
| 47 | class StreamQueueManager(BaseManager): |
| 48 | pass |
| 49 | |
| 50 | |
| 51 | StreamQueueManager.register('Queue', StreamQueue) |
| 52 | |
| 53 | |
| 54 | def test_runner_wrapper(suite, keep_alive_pipe, result_pipe, stdouterr_queue, |
| 55 | logger): |
| 56 | sys.stdout = stdouterr_queue |
| 57 | sys.stderr = stdouterr_queue |
| 58 | VppTestCase.logger = logger |
| 59 | result = VppTestRunner(keep_alive_pipe=keep_alive_pipe, |
| 60 | descriptions=descriptions, |
| 61 | verbosity=verbose, |
| 62 | failfast=failfast).run(suite) |
Klement Sekera | 909a6a1 | 2017-08-08 04:33:53 +0200 | [diff] [blame] | 63 | result_pipe.send(result) |
| 64 | result_pipe.close() |
| 65 | keep_alive_pipe.close() |
| 66 | |
| 67 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 68 | class TestCaseWrapper(object): |
| 69 | def __init__(self, testcase_suite, manager): |
| 70 | self.keep_alive_parent_end, self.keep_alive_child_end = Pipe( |
| 71 | duplex=False) |
| 72 | self.result_parent_end, self.result_child_end = Pipe(duplex=False) |
| 73 | self.testcase_suite = testcase_suite |
| 74 | self.stdouterr_queue = manager.Queue() |
| 75 | self.logger = get_parallel_logger(self.stdouterr_queue) |
| 76 | self.child = Process(target=test_runner_wrapper, |
| 77 | args=(testcase_suite, self.keep_alive_child_end, |
| 78 | self.result_child_end, self.stdouterr_queue, |
| 79 | self.logger) |
| 80 | ) |
| 81 | self.child.start() |
| 82 | self.pid = self.child.pid |
| 83 | self.last_test_temp_dir = None |
| 84 | self.last_test_vpp_binary = None |
| 85 | self.last_test = None |
| 86 | self.result = None |
| 87 | self.last_heard = time.time() |
| 88 | self.core_detected_at = None |
| 89 | self.failed_tests = [] |
| 90 | |
| 91 | def close_pipes(self): |
| 92 | self.keep_alive_child_end.close() |
| 93 | self.result_child_end.close() |
| 94 | self.keep_alive_parent_end.close() |
| 95 | self.result_parent_end.close() |
| 96 | |
| 97 | |
| 98 | def stdouterr_reader_wrapper(unread_testcases, finished_unread_testcases, |
| 99 | read_testcases): |
| 100 | read_testcase = None |
| 101 | while read_testcases.is_set() or len(unread_testcases) > 0: |
| 102 | if not read_testcase: |
| 103 | if len(finished_unread_testcases) > 0: |
| 104 | read_testcase = finished_unread_testcases.pop() |
| 105 | unread_testcases.remove(read_testcase) |
| 106 | elif len(unread_testcases) > 0: |
| 107 | read_testcase = unread_testcases.pop() |
| 108 | if read_testcase: |
| 109 | data = '' |
| 110 | while data is not None: |
| 111 | sys.stdout.write(data) |
| 112 | data = read_testcase.stdouterr_queue.get() |
| 113 | |
| 114 | read_testcase.stdouterr_queue.close() |
| 115 | finished_unread_testcases.discard(read_testcase) |
| 116 | read_testcase = None |
| 117 | |
| 118 | |
| 119 | def run_forked(testcases): |
| 120 | wrapped_testcase_suites = set() |
| 121 | |
| 122 | # suites are unhashable, need to use list |
| 123 | results = [] |
| 124 | debug_core = os.getenv("DEBUG", "").lower() == "core" |
| 125 | unread_testcases = set() |
| 126 | finished_unread_testcases = set() |
| 127 | manager = StreamQueueManager() |
| 128 | manager.start() |
| 129 | for i in range(concurrent_tests): |
| 130 | if len(testcases) > 0: |
| 131 | wrapped_testcase_suite = TestCaseWrapper(testcases.pop(0), manager) |
| 132 | wrapped_testcase_suites.add(wrapped_testcase_suite) |
| 133 | unread_testcases.add(wrapped_testcase_suite) |
| 134 | # time.sleep(1) |
| 135 | else: |
| 136 | break |
| 137 | |
| 138 | read_from_testcases = threading.Event() |
| 139 | read_from_testcases.set() |
| 140 | stdouterr_thread = threading.Thread(target=stdouterr_reader_wrapper, |
| 141 | args=(unread_testcases, |
| 142 | finished_unread_testcases, |
| 143 | read_from_testcases)) |
| 144 | stdouterr_thread.start() |
| 145 | |
| 146 | while len(wrapped_testcase_suites) > 0: |
| 147 | finished_testcase_suites = set() |
| 148 | for wrapped_testcase_suite in wrapped_testcase_suites: |
| 149 | readable = select.select( |
| 150 | [wrapped_testcase_suite.keep_alive_parent_end.fileno(), |
| 151 | wrapped_testcase_suite.result_parent_end.fileno()], |
| 152 | [], [], 1)[0] |
| 153 | if wrapped_testcase_suite.result_parent_end.fileno() in readable: |
| 154 | results.append( |
| 155 | (wrapped_testcase_suite.testcase_suite, |
| 156 | wrapped_testcase_suite.result_parent_end.recv())) |
| 157 | finished_testcase_suites.add(wrapped_testcase_suite) |
| 158 | continue |
| 159 | |
| 160 | if wrapped_testcase_suite.keep_alive_parent_end.fileno() \ |
| 161 | in readable: |
| 162 | while wrapped_testcase_suite.keep_alive_parent_end.poll(): |
| 163 | wrapped_testcase_suite.last_test, \ |
| 164 | wrapped_testcase_suite.last_test_vpp_binary, \ |
| 165 | wrapped_testcase_suite.last_test_temp_dir, \ |
| 166 | wrapped_testcase_suite.vpp_pid = \ |
| 167 | wrapped_testcase_suite.keep_alive_parent_end.recv() |
| 168 | wrapped_testcase_suite.last_heard = time.time() |
| 169 | |
| 170 | fail = False |
| 171 | if wrapped_testcase_suite.last_heard + test_timeout < time.time() \ |
| 172 | and not os.path.isfile( |
| 173 | "%s/_core_handled" % |
| 174 | wrapped_testcase_suite.last_test_temp_dir): |
| 175 | fail = True |
| 176 | wrapped_testcase_suite.logger.critical( |
| 177 | "Timeout while waiting for child test " |
| 178 | "runner process (last test running was " |
| 179 | "`%s' in `%s')!" % |
| 180 | (wrapped_testcase_suite.last_test, |
| 181 | wrapped_testcase_suite.last_test_temp_dir)) |
| 182 | elif not wrapped_testcase_suite.child.is_alive(): |
| 183 | fail = True |
| 184 | wrapped_testcase_suite.logger.critical( |
| 185 | "Child python process unexpectedly died " |
| 186 | "(last test running was `%s' in `%s')!" % |
| 187 | (wrapped_testcase_suite.last_test, |
| 188 | wrapped_testcase_suite.last_test_temp_dir)) |
| 189 | elif wrapped_testcase_suite.last_test_temp_dir and \ |
| 190 | wrapped_testcase_suite.last_test_vpp_binary: |
| 191 | core_path = "%s/core" % \ |
| 192 | wrapped_testcase_suite.last_test_temp_dir |
| 193 | if os.path.isfile(core_path): |
| 194 | if wrapped_testcase_suite.core_detected_at is None: |
| 195 | wrapped_testcase_suite.core_detected_at = time.time() |
| 196 | elif wrapped_testcase_suite.core_detected_at + \ |
| 197 | core_timeout < time.time(): |
| 198 | if not os.path.isfile( |
| 199 | "%s/_core_handled" % |
| 200 | wrapped_testcase_suite. |
| 201 | last_test_temp_dir): |
| 202 | wrapped_testcase_suite.logger.critical( |
| 203 | "Child python process unresponsive and core-" |
| 204 | "file exists in test temporary directory!") |
| 205 | fail = True |
| 206 | |
| 207 | if fail: |
| 208 | failed_dir = os.getenv('VPP_TEST_FAILED_DIR') |
| 209 | lttd = os.path.basename( |
| 210 | wrapped_testcase_suite.last_test_temp_dir) |
| 211 | link_path = '%s%s-FAILED' % (failed_dir, lttd) |
| 212 | wrapped_testcase_suite.logger.error( |
| 213 | "Creating a link to the failed test: %s -> %s" % |
| 214 | (link_path, lttd)) |
| 215 | if not os.path.exists(link_path): |
| 216 | os.symlink(wrapped_testcase_suite.last_test_temp_dir, |
| 217 | link_path) |
| 218 | api_post_mortem_path = "/tmp/api_post_mortem.%d" % \ |
| 219 | wrapped_testcase_suite.vpp_pid |
| 220 | if os.path.isfile(api_post_mortem_path): |
| 221 | wrapped_testcase_suite.logger.error( |
| 222 | "Copying api_post_mortem.%d to %s" % |
| 223 | (wrapped_testcase_suite.vpp_pid, |
| 224 | wrapped_testcase_suite.last_test_temp_dir)) |
| 225 | shutil.copy2(api_post_mortem_path, |
| 226 | wrapped_testcase_suite.last_test_temp_dir) |
| 227 | if wrapped_testcase_suite.last_test_temp_dir and \ |
| 228 | wrapped_testcase_suite.last_test_vpp_binary: |
| 229 | core_path = "%s/core" % \ |
| 230 | wrapped_testcase_suite.last_test_temp_dir |
| 231 | if os.path.isfile(core_path): |
| 232 | wrapped_testcase_suite.logger.error( |
| 233 | "Core-file exists in test temporary directory: %s!" |
| 234 | % core_path) |
| 235 | check_core_path(wrapped_testcase_suite.logger, |
| 236 | core_path) |
| 237 | wrapped_testcase_suite.logger.debug( |
| 238 | "Running `file %s':" % core_path) |
| 239 | try: |
| 240 | info = check_output(["file", core_path]) |
| 241 | wrapped_testcase_suite.logger.debug(info) |
| 242 | except CalledProcessError as e: |
| 243 | wrapped_testcase_suite.logger.error( |
| 244 | "Could not run `file' utility on core-file, " |
| 245 | "rc=%s" % e.returncode) |
| 246 | pass |
| 247 | if debug_core: |
| 248 | spawn_gdb( |
| 249 | wrapped_testcase_suite.last_test_vpp_binary, |
| 250 | core_path, wrapped_testcase_suite.logger) |
| 251 | wrapped_testcase_suite.child.terminate() |
| 252 | try: |
| 253 | # terminating the child process tends to leave orphan |
| 254 | # VPP process around |
| 255 | os.kill(wrapped_testcase_suite.vpp_pid, signal.SIGTERM) |
| 256 | except OSError: |
| 257 | # already dead |
| 258 | pass |
| 259 | results.append((wrapped_testcase_suite.testcase_suite, None)) |
| 260 | finished_testcase_suites.add(wrapped_testcase_suite) |
| 261 | |
| 262 | for finished_testcase in finished_testcase_suites: |
| 263 | finished_testcase.child.join() |
| 264 | finished_testcase.close_pipes() |
| 265 | wrapped_testcase_suites.remove(finished_testcase) |
| 266 | finished_unread_testcases.add(finished_testcase) |
| 267 | finished_testcase.stdouterr_queue.put(None) |
| 268 | if len(testcases) > 0: |
| 269 | new_testcase = TestCaseWrapper(testcases.pop(0), manager) |
| 270 | wrapped_testcase_suites.add(new_testcase) |
| 271 | unread_testcases.add(new_testcase) |
| 272 | |
| 273 | read_from_testcases.clear() |
| 274 | stdouterr_thread.join(test_timeout) |
| 275 | manager.shutdown() |
| 276 | return results |
| 277 | |
| 278 | |
| 279 | class SplitToSuitesCallback: |
| 280 | def __init__(self, filter_callback): |
| 281 | self.suites = {} |
| 282 | self.suite_name = 'default' |
| 283 | self.filter_callback = filter_callback |
| 284 | self.filtered = unittest.TestSuite() |
Klement Sekera | fcbf444 | 2017-08-17 07:38:42 +0200 | [diff] [blame] | 285 | |
| 286 | def __call__(self, file_name, cls, method): |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 287 | test_method = cls(method) |
| 288 | if self.filter_callback(file_name, cls.__name__, method): |
| 289 | self.suite_name = file_name + cls.__name__ |
| 290 | if self.suite_name not in self.suites: |
| 291 | self.suites[self.suite_name] = unittest.TestSuite() |
| 292 | self.suites[self.suite_name].addTest(test_method) |
| 293 | |
| 294 | else: |
| 295 | self.filtered.addTest(test_method) |
Klement Sekera | fcbf444 | 2017-08-17 07:38:42 +0200 | [diff] [blame] | 296 | |
| 297 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 298 | test_option = "TEST" |
| 299 | |
| 300 | |
| 301 | def parse_test_option(): |
| 302 | f = os.getenv(test_option, None) |
| 303 | filter_file_name = None |
| 304 | filter_class_name = None |
| 305 | filter_func_name = None |
| 306 | if f: |
| 307 | if '.' in f: |
| 308 | parts = f.split('.') |
| 309 | if len(parts) > 3: |
| 310 | raise Exception("Unrecognized %s option: %s" % |
| 311 | (test_option, f)) |
| 312 | if len(parts) > 2: |
| 313 | if parts[2] not in ('*', ''): |
| 314 | filter_func_name = parts[2] |
| 315 | if parts[1] not in ('*', ''): |
| 316 | filter_class_name = parts[1] |
| 317 | if parts[0] not in ('*', ''): |
| 318 | if parts[0].startswith('test_'): |
| 319 | filter_file_name = parts[0] |
| 320 | else: |
| 321 | filter_file_name = 'test_%s' % parts[0] |
| 322 | else: |
| 323 | if f.startswith('test_'): |
| 324 | filter_file_name = f |
| 325 | else: |
| 326 | filter_file_name = 'test_%s' % f |
| 327 | if filter_file_name: |
| 328 | filter_file_name = '%s.py' % filter_file_name |
| 329 | return filter_file_name, filter_class_name, filter_func_name |
| 330 | |
| 331 | |
| 332 | def filter_tests(tests, filter_cb): |
| 333 | result = unittest.suite.TestSuite() |
| 334 | for t in tests: |
| 335 | if isinstance(t, unittest.suite.TestSuite): |
| 336 | # this is a bunch of tests, recursively filter... |
| 337 | x = filter_tests(t, filter_cb) |
| 338 | if x.countTestCases() > 0: |
| 339 | result.addTest(x) |
| 340 | elif isinstance(t, unittest.TestCase): |
| 341 | # this is a single test |
| 342 | parts = t.id().split('.') |
| 343 | # t.id() for common cases like this: |
| 344 | # test_classifier.TestClassifier.test_acl_ip |
| 345 | # apply filtering only if it is so |
| 346 | if len(parts) == 3: |
| 347 | if not filter_cb(parts[0], parts[1], parts[2]): |
| 348 | continue |
| 349 | result.addTest(t) |
| 350 | else: |
| 351 | # unexpected object, don't touch it |
| 352 | result.addTest(t) |
| 353 | return result |
| 354 | |
| 355 | |
| 356 | class FilterByTestOption: |
| 357 | def __init__(self, filter_file_name, filter_class_name, filter_func_name): |
| 358 | self.filter_file_name = filter_file_name |
| 359 | self.filter_class_name = filter_class_name |
| 360 | self.filter_func_name = filter_func_name |
| 361 | |
| 362 | def __call__(self, file_name, class_name, func_name): |
| 363 | if self.filter_file_name and file_name != self.filter_file_name: |
| 364 | return False |
| 365 | if self.filter_class_name and class_name != self.filter_class_name: |
| 366 | return False |
| 367 | if self.filter_func_name and func_name != self.filter_func_name: |
| 368 | return False |
| 369 | return True |
| 370 | |
| 371 | |
| 372 | class FilterByClassList: |
Klement Sekera | df2b980 | 2017-10-05 10:26:03 +0200 | [diff] [blame] | 373 | def __init__(self, class_list): |
| 374 | self.class_list = class_list |
| 375 | |
| 376 | def __call__(self, file_name, class_name, func_name): |
| 377 | return class_name in self.class_list |
| 378 | |
| 379 | |
| 380 | def suite_from_failed(suite, failed): |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 381 | filter_cb = FilterByClassList(failed) |
| 382 | suite = filter_tests(suite, filter_cb) |
Klement Sekera | 4c5422e | 2018-06-22 13:19:45 +0200 | [diff] [blame] | 383 | return suite |
Klement Sekera | df2b980 | 2017-10-05 10:26:03 +0200 | [diff] [blame] | 384 | |
| 385 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 386 | class NonPassedResults(dict): |
| 387 | def __init__(self): |
| 388 | super(NonPassedResults, self).__init__() |
| 389 | self.all_testcases = 0 |
| 390 | self.results_per_suite = {} |
| 391 | self.failures_id = 'failures' |
| 392 | self.errors_id = 'errors' |
| 393 | self.crashes_id = 'crashes' |
| 394 | self.skipped_id = 'skipped' |
| 395 | self.expectedFailures_id = 'expectedFailures' |
| 396 | self.unexpectedSuccesses_id = 'unexpectedSuccesses' |
| 397 | self.rerun = [] |
| 398 | self[self.failures_id] = 0 |
| 399 | self[self.errors_id] = 0 |
| 400 | self[self.crashes_id] = 0 |
| 401 | self[self.skipped_id] = 0 |
| 402 | self[self.expectedFailures_id] = 0 |
| 403 | self[self.unexpectedSuccesses_id] = 0 |
Klement Sekera | 909a6a1 | 2017-08-08 04:33:53 +0200 | [diff] [blame] | 404 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 405 | def _add_result(self, test, result_id): |
| 406 | if isinstance(test, VppTestCase): |
| 407 | parts = test.id().split('.') |
| 408 | if len(parts) == 3: |
| 409 | tc_class = get_testcase_doc_name(test) |
| 410 | if tc_class not in self.results_per_suite: |
| 411 | # failed, errored, skipped, expectedly failed, |
| 412 | # unexpectedly passed |
| 413 | self.results_per_suite[tc_class] = \ |
| 414 | {self.failures_id: [], |
| 415 | self.errors_id: [], |
| 416 | self.crashes_id: [], |
| 417 | self.skipped_id: [], |
| 418 | self.expectedFailures_id: [], |
| 419 | self.unexpectedSuccesses_id: []} |
| 420 | self.results_per_suite[tc_class][result_id].append(test) |
| 421 | return True |
| 422 | return False |
Klement Sekera | 0574226 | 2018-03-14 18:14:49 +0100 | [diff] [blame] | 423 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 424 | def add_results(self, testcases, testcase_result, |
| 425 | duplicates=None): |
| 426 | for failed_testcase, _ in testcases: |
| 427 | if self._add_result(failed_testcase, testcase_result): |
| 428 | if duplicates: |
| 429 | if failed_testcase not in duplicates: |
| 430 | self[testcase_result] += 1 |
| 431 | else: |
| 432 | self[testcase_result] += 1 |
| 433 | |
| 434 | def add_result(self, testcase_suite, result): |
| 435 | retval = 0 |
| 436 | self.all_testcases += testcase_suite.countTestCases() |
| 437 | if result: |
| 438 | # suite finished properly |
| 439 | if not result.wasSuccessful(): |
| 440 | retval = 1 |
| 441 | |
| 442 | self.add_results(result.failures, self.failures_id) |
| 443 | self.add_results(result.errors, self.errors_id, |
| 444 | result.failures + result.errors) |
| 445 | self.add_results(result.skipped, self.skipped_id) |
| 446 | self.add_results(result.expectedFailures, |
| 447 | self.expectedFailures_id) |
| 448 | self.add_results(result.unexpectedSuccesses, |
| 449 | self.unexpectedSuccesses_id) |
| 450 | |
| 451 | else: |
| 452 | # suite crashed |
| 453 | retval = -1 |
| 454 | self.add_results([(x, None) for x in testcase_suite], |
| 455 | self.crashes_id) |
| 456 | |
| 457 | if retval != 0: |
| 458 | if concurrent_tests == 1: |
| 459 | if result: |
| 460 | rerun_classes = {x[0].__class__.__name__ for |
| 461 | x in result.errors} |
| 462 | rerun_classes.update({x[0].__class__.__name__ for |
| 463 | x in result.failures}) |
| 464 | self.rerun.append(suite_from_failed(testcase_suite, |
| 465 | rerun_classes)) |
| 466 | else: |
| 467 | self.rerun.append(testcase_suite) |
| 468 | else: |
| 469 | self.rerun.append(testcase_suite) |
| 470 | |
| 471 | return retval |
| 472 | |
| 473 | def print_results(self): |
| 474 | print('') |
| 475 | print(double_line_delim) |
| 476 | print('TEST RESULTS:') |
| 477 | print(' Executed tests: {}'.format(self.all_testcases)) |
| 478 | print(' Passed tests: {}'.format( |
| 479 | colorize(str(self.all_testcases - |
| 480 | self.all_nonpassed), GREEN))) |
| 481 | if self[self.failures_id] > 0: |
| 482 | print(' Failed tests: {}'.format( |
| 483 | colorize(str(self[self.failures_id]), RED))) |
| 484 | if self[self.errors_id] > 0: |
| 485 | print(' Errored tests: {}'.format( |
| 486 | colorize(str(self[self.errors_id]), RED))) |
| 487 | if self[self.crashes_id] > 0: |
| 488 | print(' Crashed tests: {}'.format( |
| 489 | colorize(str(self[self.crashes_id]), RED))) |
| 490 | if self[self.skipped_id] > 0: |
| 491 | print(' Skipped tests: {}'.format( |
| 492 | colorize(str(self[self.skipped_id]), YELLOW))) |
| 493 | if self[self.expectedFailures_id] > 0: |
| 494 | print(' Expected failures: {}'.format( |
| 495 | colorize(str(self[self.expectedFailures_id]), GREEN))) |
| 496 | if self[self.unexpectedSuccesses_id] > 0: |
| 497 | print(' Unexpected successes: {}'.format( |
| 498 | colorize(str(self[self.unexpectedSuccesses_id]), YELLOW))) |
| 499 | |
| 500 | if self.all_failed > 0: |
| 501 | print('FAILED TESTS:') |
| 502 | for testcase_class, suite_results in \ |
| 503 | self.results_per_suite.items(): |
| 504 | failed_testcases = suite_results[ |
| 505 | self.failures_id] |
| 506 | errored_testcases = suite_results[ |
| 507 | self.errors_id] |
| 508 | crashed_testcases = suite_results[ |
| 509 | self.crashes_id] |
| 510 | if len(failed_testcases) or len(errored_testcases) \ |
| 511 | or len(crashed_testcases): |
| 512 | print(' Testcase name: {}'.format( |
| 513 | colorize(testcase_class, RED))) |
| 514 | for failed_test in failed_testcases: |
| 515 | print(' FAILED: {}'.format( |
| 516 | colorize(get_test_description( |
| 517 | descriptions, failed_test), RED))) |
| 518 | for failed_test in errored_testcases: |
| 519 | print(' ERRORED: {}'.format( |
| 520 | colorize(get_test_description( |
| 521 | descriptions, failed_test), RED))) |
| 522 | for failed_test in crashed_testcases: |
| 523 | print(' CRASHED: {}'.format( |
| 524 | colorize(get_test_description( |
| 525 | descriptions, failed_test), RED))) |
| 526 | |
| 527 | print(double_line_delim) |
| 528 | print('') |
| 529 | |
| 530 | @property |
| 531 | def all_nonpassed(self): |
| 532 | return self[self.failures_id] + self[self.errors_id] + \ |
| 533 | self[self.crashes_id] + self[self.skipped_id] + \ |
| 534 | self[self.expectedFailures_id] + \ |
| 535 | self[self.unexpectedSuccesses_id] |
| 536 | |
| 537 | @property |
| 538 | def all_failed(self): |
| 539 | return self[self.failures_id] + self[self.errors_id] + \ |
| 540 | self[self.crashes_id] |
| 541 | |
| 542 | |
| 543 | def parse_results(results): |
| 544 | """ |
| 545 | Prints the number of executed, passed, failed, errored, skipped, |
| 546 | expectedly failed and unexpectedly passed tests and details about |
| 547 | failed, errored, expectedly failed and unexpectedly passed tests. |
| 548 | |
| 549 | Also returns any suites where any test failed. |
| 550 | |
| 551 | :param results: |
| 552 | :return: |
| 553 | """ |
| 554 | |
| 555 | results_per_suite = NonPassedResults() |
| 556 | crashed = False |
| 557 | failed = False |
| 558 | for testcase_suite, result in results: |
| 559 | result_code = results_per_suite.add_result(testcase_suite, result) |
| 560 | if result_code == 1: |
| 561 | failed = True |
| 562 | elif result_code == -1: |
| 563 | crashed = True |
| 564 | |
| 565 | results_per_suite.print_results() |
| 566 | |
| 567 | if crashed: |
| 568 | return_code = -1 |
| 569 | elif failed: |
| 570 | return_code = 1 |
| 571 | else: |
| 572 | return_code = 0 |
| 573 | return return_code, results_per_suite.rerun |
| 574 | |
| 575 | |
| 576 | def parse_digit_env(env_var, default): |
| 577 | value = os.getenv(env_var, default) |
| 578 | if value != default: |
| 579 | if value.isdigit(): |
| 580 | value = int(value) |
| 581 | else: |
| 582 | print('WARNING: unsupported value "%s" for env var "%s",' |
| 583 | 'defaulting to %s' % (value, env_var, default)) |
| 584 | value = default |
| 585 | return value |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 586 | |
| 587 | |
| 588 | if __name__ == '__main__': |
| 589 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 590 | verbose = parse_digit_env("V", 0) |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 591 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 592 | test_timeout = parse_digit_env("TIMEOUT", 600) # default = 10 minutes |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 593 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 594 | retries = parse_digit_env("RETRIES", 0) |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 595 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 596 | debug = os.getenv("DEBUG", "n").lower() in ["gdb", "gdbserver"] |
| 597 | |
| 598 | step = os.getenv("STEP", "n").lower() in ("y", "yes", "1") |
| 599 | |
| 600 | force_foreground = \ |
| 601 | os.getenv("FORCE_FOREGROUND", "n").lower() in ("y", "yes", "1") |
| 602 | |
| 603 | run_interactive = debug or step or force_foreground |
| 604 | |
| 605 | test_jobs = os.getenv("TEST_JOBS", "1").lower() # default = 1 process |
| 606 | if test_jobs == 'auto': |
| 607 | if run_interactive: |
| 608 | concurrent_tests = 1 |
| 609 | print('Interactive mode required, running on one core') |
| 610 | else: |
| 611 | shm_free = psutil.disk_usage('/dev/shm').free |
| 612 | shm_max_processes = 1 |
| 613 | if shm_free < min_req_shm: |
| 614 | raise Exception('Not enough free space in /dev/shm. Required ' |
| 615 | 'free space is at least %sM.' |
| 616 | % (min_req_shm >> 20)) |
| 617 | else: |
| 618 | extra_shm = shm_free - min_req_shm |
| 619 | shm_max_processes += extra_shm / shm_per_process |
| 620 | concurrent_tests = max(cpu_count(), shm_max_processes) |
| 621 | print('Found enough resources to run tests with %s cores' |
| 622 | % concurrent_tests) |
| 623 | elif test_jobs.isdigit(): |
| 624 | concurrent_tests = int(test_jobs) |
| 625 | else: |
| 626 | concurrent_tests = 1 |
| 627 | |
| 628 | if run_interactive and concurrent_tests > 1: |
| 629 | raise NotImplementedError( |
| 630 | 'Running tests interactively (DEBUG, STEP or FORCE_FOREGROUND is ' |
| 631 | 'set) in parallel (TEST_JOBS is more than 1) is not ' |
| 632 | 'supported') |
Klement Sekera | 13a83ef | 2018-03-21 12:35:51 +0100 | [diff] [blame] | 633 | |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 634 | parser = argparse.ArgumentParser(description="VPP unit tests") |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 635 | parser.add_argument("-f", "--failfast", action='store_true', |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 636 | help="fast failure flag") |
| 637 | parser.add_argument("-d", "--dir", action='append', type=str, |
| 638 | help="directory containing test files " |
| 639 | "(may be specified multiple times)") |
| 640 | args = parser.parse_args() |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 641 | failfast = args.failfast |
| 642 | descriptions = True |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 643 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 644 | print("Running tests using custom test runner") # debug message |
| 645 | filter_file, filter_class, filter_func = parse_test_option() |
| 646 | |
| 647 | print("Active filters: file=%s, class=%s, function=%s" % ( |
| 648 | filter_file, filter_class, filter_func)) |
| 649 | |
| 650 | filter_cb = FilterByTestOption(filter_file, filter_class, filter_func) |
| 651 | |
| 652 | cb = SplitToSuitesCallback(filter_cb) |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 653 | for d in args.dir: |
Klement Sekera | df2b980 | 2017-10-05 10:26:03 +0200 | [diff] [blame] | 654 | print("Adding tests from directory tree %s" % d) |
Klement Sekera | fcbf444 | 2017-08-17 07:38:42 +0200 | [diff] [blame] | 655 | discover_tests(d, cb) |
Klement Sekera | 3f6ff19 | 2017-08-11 06:56:05 +0200 | [diff] [blame] | 656 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 657 | # suites are not hashable, need to use list |
| 658 | suites = [] |
| 659 | tests_amount = 0 |
| 660 | for testcase_suite in cb.suites.values(): |
| 661 | tests_amount += testcase_suite.countTestCases() |
| 662 | suites.append(testcase_suite) |
Klement Sekera | bbfa5fd | 2018-06-27 13:54:32 +0200 | [diff] [blame] | 663 | |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 664 | if concurrent_tests == 1: |
| 665 | new_suite = unittest.TestSuite() |
| 666 | for suite in suites: |
| 667 | new_suite.addTest(suite) |
| 668 | |
| 669 | suites = [new_suite] |
| 670 | |
| 671 | print("%s out of %s tests match specified filters" % ( |
| 672 | tests_amount, tests_amount + cb.filtered.countTestCases())) |
| 673 | |
| 674 | if not running_extended_tests(): |
| 675 | print("Not running extended tests (some tests will be skipped)") |
| 676 | |
Klement Sekera | df2b980 | 2017-10-05 10:26:03 +0200 | [diff] [blame] | 677 | attempts = retries + 1 |
| 678 | if attempts > 1: |
| 679 | print("Perform %s attempts to pass the suite..." % attempts) |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 680 | |
| 681 | if run_interactive: |
| 682 | # don't fork if requiring interactive terminal |
Klement Sekera | 13a83ef | 2018-03-21 12:35:51 +0100 | [diff] [blame] | 683 | sys.exit(not VppTestRunner( |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 684 | verbosity=verbose, failfast=failfast) |
| 685 | .run(suites[0]).wasSuccessful()) |
Klement Sekera | 13a83ef | 2018-03-21 12:35:51 +0100 | [diff] [blame] | 686 | else: |
juraj.linkes | 184870a | 2018-07-16 14:22:01 +0200 | [diff] [blame] | 687 | exit_code = 0 |
| 688 | while len(suites) > 0 and attempts > 0: |
| 689 | tests_amount = sum([x.countTestCases() for x in suites]) |
| 690 | results = run_forked(suites) |
| 691 | exit_code, suites = parse_results(results) |
| 692 | attempts -= 1 |
| 693 | if exit_code == 0: |
| 694 | print('Test run was successful') |
| 695 | else: |
| 696 | print('%s attempt(s) left.' % attempts) |
| 697 | sys.exit(exit_code) |