Andrew Yourtchenko | 0acb398 | 2024-01-23 11:57:51 +0000 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
| 2 | """ PVTI tests """ |
| 3 | |
| 4 | import datetime |
| 5 | import base64 |
| 6 | import os |
| 7 | import copy |
| 8 | import struct |
| 9 | |
| 10 | from hashlib import blake2s |
| 11 | from config import config |
| 12 | from scapy.packet import Raw |
| 13 | from scapy.compat import raw |
| 14 | from scapy.layers.l2 import Ether |
| 15 | from scapy.layers.inet import IP, UDP |
| 16 | from scapy.layers.inet6 import IPv6 |
| 17 | from scapy.layers.vxlan import VXLAN |
| 18 | |
| 19 | from vpp_interface import VppInterface |
| 20 | from vpp_pg_interface import is_ipv6_misc |
| 21 | from vpp_ip_route import VppIpRoute, VppRoutePath |
| 22 | from vpp_l2 import VppBridgeDomain, VppBridgeDomainPort |
| 23 | from vpp_vxlan_tunnel import VppVxlanTunnel |
| 24 | from vpp_object import VppObject |
| 25 | from vpp_papi import VppEnum |
| 26 | from asfframework import tag_run_solo, tag_fixme_vpp_debug |
| 27 | from framework import VppTestCase |
| 28 | from re import compile |
| 29 | import unittest |
| 30 | |
| 31 | |
| 32 | from scapy.packet import Packet, bind_layers |
| 33 | from scapy.layers.l2 import Ether |
| 34 | from scapy.layers.inet import IP, UDP |
| 35 | from scapy.layers.inet6 import IPv6 |
| 36 | from scapy.fields import ( |
| 37 | FlagsField, |
| 38 | XByteField, |
| 39 | XShortField, |
| 40 | ThreeBytesField, |
| 41 | ConditionalField, |
| 42 | ShortField, |
| 43 | ByteEnumField, |
| 44 | X3BytesField, |
| 45 | LEIntField, |
| 46 | ByteField, |
| 47 | StrLenField, |
| 48 | PacketListField, |
| 49 | LEShortField, |
| 50 | IntField, |
| 51 | ShortField, |
| 52 | XIntField, |
| 53 | ) |
| 54 | |
| 55 | import sys |
| 56 | |
| 57 | |
| 58 | def eprint(*args, **kwargs): |
| 59 | print(*args, file=sys.stderr, **kwargs) |
| 60 | |
| 61 | |
| 62 | # |
| 63 | # A custom decoder for Scapy for PVTI packet format |
| 64 | # |
| 65 | |
| 66 | |
| 67 | class PVTIChunk(Packet): |
| 68 | name = "PVTIChunk" |
| 69 | fields_desc = [ |
| 70 | ShortField("total_chunk_length", None), |
| 71 | XShortField("_pad0", 0), |
| 72 | XIntField("_pad1", 0), |
| 73 | StrLenField("data", "", length_from=lambda pkt: pkt.total_chunk_length - 8), |
| 74 | ] |
| 75 | |
| 76 | # This prevents the first chunk from consuming the entire remaining |
| 77 | # contents of the packet |
| 78 | def extract_padding(self, s): |
| 79 | return "", s |
| 80 | |
| 81 | def post_build(self, p, pay): |
| 82 | if self.total_chunk_length is None and self.data: |
| 83 | chunk_header_size = 8 |
| 84 | l = chunk_header_size + len(self.data) |
| 85 | p = struct.pack("!H", l) + p[2:] |
| 86 | return p + pay |
| 87 | |
| 88 | |
| 89 | class PVTI(Packet): |
| 90 | name = "PVTI" |
| 91 | PVTI_ALIGN_BYTES = 9 |
| 92 | fields_desc = [ |
| 93 | IntField("seq", 0x0), |
| 94 | ByteField("stream_index", 0), |
| 95 | ByteField("chunk_count", None), |
| 96 | ByteField("reass_chunk_count", 0), |
| 97 | ByteField("mandatory_flags_mask", 0), |
| 98 | ByteField("flags_value", 0), |
| 99 | ByteField("pad_bytes", PVTI_ALIGN_BYTES), |
| 100 | StrLenField( |
| 101 | "pad", b"\xca" * PVTI_ALIGN_BYTES, length_from=lambda pkt: pkt.pad_bytes |
| 102 | ), |
| 103 | PacketListField("chunks", [], PVTIChunk, count_from=lambda p: p.chunk_count), |
| 104 | ] |
| 105 | |
| 106 | def mysummary(self): |
| 107 | return self.sprintf("PVTI (len=%PVTI.total_len%)") |
| 108 | |
| 109 | def post_build(self, p, pay): |
| 110 | if self.chunk_count is None: |
| 111 | l = len(self.chunks) |
| 112 | # offset of the chunk count within the fields |
| 113 | offset_of_chunk_count = 5 |
| 114 | p = ( |
| 115 | p[:offset_of_chunk_count] |
| 116 | + struct.pack("b", l) |
| 117 | + p[offset_of_chunk_count + 1 :] |
| 118 | ) |
| 119 | return p + pay |
| 120 | |
| 121 | |
| 122 | bind_layers(UDP, PVTI, dport=12312) |
| 123 | # By default, set both ports to the test |
| 124 | # bind_layers(UDP, PVTI, sport=6192, dport=6192) |
| 125 | |
| 126 | |
| 127 | # PVTI ENcapsulator/DEcapsulator |
| 128 | class PvtiEnDe(object): |
| 129 | """ |
| 130 | PVTI encapsulator/decapsulator |
| 131 | """ |
| 132 | |
| 133 | def __init__( |
| 134 | self, |
| 135 | local_ip, |
| 136 | local_port, |
| 137 | remote_ip, |
| 138 | remote_port, |
| 139 | underlay_mtu=1500, |
| 140 | for_rx_test=False, |
| 141 | ): |
| 142 | self.for_rx_test = for_rx_test |
| 143 | self.local_ip = local_ip |
| 144 | self.local_port = local_port |
| 145 | self.remote_ip = remote_ip |
| 146 | self.remote_port = remote_port |
| 147 | self.underlay_mtu = underlay_mtu |
| 148 | self.stream_index = 0 |
| 149 | self.tx_chunks = [] |
| 150 | self.tx_n_reass_chunks = 0 |
| 151 | self.tx_seq = 42 |
| 152 | # payload = chunk headers + data |
| 153 | self.max_payload_len = underlay_mtu - len(raw(IP() / UDP() / PVTI())) |
| 154 | self.pvti_header_len = len(raw(PVTI())) |
| 155 | self.chunk_header_len = len(raw(PVTIChunk())) |
| 156 | |
| 157 | def get_curr_payload_len(self): |
| 158 | tx_len = 0 |
| 159 | for c in self.tx_chunks: |
| 160 | tx_len = tx_len + len(c.data) + self.chunk_header_len |
| 161 | return tx_len |
| 162 | |
| 163 | def get_payload_room(self): |
| 164 | return self.max_payload_len - self.get_curr_payload_len() |
| 165 | |
| 166 | def flush_tx_chunks(self, more_frags=False): |
| 167 | if self.for_rx_test: |
| 168 | ip_dst = self.local_ip |
| 169 | ip_src = self.remote_ip |
| 170 | else: |
| 171 | ip_src = self.local_ip |
| 172 | ip_dst = self.remote_ip |
| 173 | p = ( |
| 174 | IP( |
| 175 | src=ip_src, |
| 176 | dst=ip_dst, |
| 177 | ttl=127, |
| 178 | frag=0, |
| 179 | flags=0, |
| 180 | id=self.tx_seq, |
| 181 | ) |
| 182 | / UDP(sport=self.local_port, dport=self.remote_port, chksum=0) |
| 183 | / PVTI( |
| 184 | reass_chunk_count=self.tx_n_reass_chunks, |
| 185 | seq=self.tx_seq, |
| 186 | stream_index=self.stream_index, |
| 187 | chunks=self.tx_chunks, |
| 188 | ) |
| 189 | ) |
| 190 | |
| 191 | p = IP(raw(p)) |
| 192 | |
| 193 | self.tx_n_reass_chunks = 0 |
| 194 | self.tx_chunks = [] |
| 195 | self.tx_seq = self.tx_seq + 1 |
| 196 | return p |
| 197 | |
| 198 | def encap_pkt(self, p): |
| 199 | out = [] |
| 200 | if IP in p: |
| 201 | p[IP].ttl = p[IP].ttl - 1 |
| 202 | payload_wip = p[IP].build() |
| 203 | elif IPv6 in p: |
| 204 | p[IPv6].hlim = p[IPv6].hlim - 1 |
| 205 | payload_wip = p[IPv6].build() |
| 206 | |
| 207 | split_chunks = False |
| 208 | huge_solo_packet = ( |
| 209 | len(payload_wip) + self.chunk_header_len > self.get_payload_room() |
| 210 | ) and len(self.tx_chunks) == 0 |
| 211 | |
| 212 | while True: |
| 213 | available_room = self.get_payload_room() |
| 214 | chunk_wip_len = len(payload_wip) + self.chunk_header_len |
| 215 | xpad0 = 0xABAB |
| 216 | xpad1 = 0xABABABAB |
| 217 | |
| 218 | if chunk_wip_len <= available_room: |
| 219 | # happy case - there is enough space to fit the entire chunk |
| 220 | if split_chunks: |
| 221 | self.tx_n_reass_chunks = self.tx_n_reass_chunks + 1 |
| 222 | tx = PVTIChunk(data=payload_wip, _pad0=xpad0, _pad1=xpad1) |
| 223 | self.tx_chunks.append(tx) |
| 224 | if chunk_wip_len == available_room: |
| 225 | # an unlikely perfect fit - send this packet. |
| 226 | out.append(self.flush_tx_chunks()) |
| 227 | break |
| 228 | elif available_room < self.chunk_header_len + 1: |
| 229 | # Can not fit even a chunk header + 1 byte of data |
| 230 | # Flush and retry |
| 231 | out.append(self.flush_tx_chunks()) |
| 232 | continue |
| 233 | else: |
| 234 | # Chop as much as we can from the packet |
| 235 | chop_len = available_room - self.chunk_header_len |
| 236 | if split_chunks: |
| 237 | self.tx_n_reass_chunks = self.tx_n_reass_chunks + 1 |
| 238 | tx = PVTIChunk(data=payload_wip[:chop_len], _pad0=xpad0, _pad1=xpad1) |
| 239 | self.tx_chunks.append(tx) |
| 240 | out.append(self.flush_tx_chunks()) |
| 241 | split_chunks = True |
| 242 | payload_wip = payload_wip[chop_len:] |
| 243 | continue |
| 244 | return out |
| 245 | |
| 246 | def encap_packets(self, pkts): |
| 247 | out = [] |
| 248 | self.start_encap() |
| 249 | for p in pkts: |
| 250 | out.extend(self.encap_pkt(p)) |
| 251 | last_pkt = self.finish_encap() |
| 252 | if last_pkt != None: |
| 253 | out.append(last_pkt) |
| 254 | return out |
| 255 | |
| 256 | def start_encap(self): |
| 257 | return None |
| 258 | |
| 259 | def finish_encap(self): |
| 260 | out = None |
| 261 | if len(self.tx_chunks) > 0: |
| 262 | out = self.flush_tx_chunks() |
| 263 | return out |
| 264 | |
| 265 | |
| 266 | """ TestPvti is a subclass of VPPTestCase classes. |
| 267 | |
| 268 | PVTI test. |
| 269 | |
| 270 | """ |
| 271 | |
| 272 | |
| 273 | def get_field_bytes(pkt, name): |
| 274 | fld, val = pkt.getfield_and_val(name) |
| 275 | return fld.i2m(pkt, val) |
| 276 | |
| 277 | |
| 278 | class VppPvtiInterface(VppInterface): |
| 279 | """ |
| 280 | VPP PVTI interface |
| 281 | """ |
| 282 | |
| 283 | def __init__( |
| 284 | self, test, local_ip, local_port, remote_ip, remote_port, underlay_mtu=1500 |
| 285 | ): |
| 286 | super(VppPvtiInterface, self).__init__(test) |
| 287 | |
| 288 | self.local_ip = local_ip |
| 289 | self.local_port = local_port |
| 290 | self.remote_ip = remote_ip |
| 291 | self.remote_port = remote_port |
| 292 | self.underlay_mtu = underlay_mtu |
| 293 | |
| 294 | def get_ende(self, for_rx_test=False): |
| 295 | return PvtiEnDe( |
| 296 | self.local_ip, |
| 297 | self.local_port, |
| 298 | self.remote_ip, |
| 299 | self.remote_port, |
| 300 | self.underlay_mtu, |
| 301 | for_rx_test, |
| 302 | ) |
| 303 | |
| 304 | def verify_encap_packets(self, orig_pkts, recv_pkts): |
| 305 | ende = self.get_ende() |
| 306 | recv2_pkts = ende.encap_packets(orig_pkts) |
| 307 | out1 = [] |
| 308 | out2 = [] |
| 309 | for i, pkt in enumerate(recv_pkts): |
| 310 | if IP in pkt: |
| 311 | rx_pkt = pkt[IP] |
| 312 | elif IPv6 in pkt: |
| 313 | rx_pkt = pkt[IPv6] |
| 314 | else: |
| 315 | raise "Neither IPv4 nor IPv6" |
| 316 | py_pkt = recv2_pkts[i] |
| 317 | if rx_pkt != py_pkt: |
| 318 | eprint("received packet:") |
| 319 | rx_pkt.show() |
| 320 | eprint("python packet:") |
| 321 | py_pkt.show() |
| 322 | out1.append(rx_pkt) |
| 323 | out2.append(py_pkt) |
| 324 | return (out1, out2) |
| 325 | |
| 326 | def add_vpp_config(self): |
| 327 | r = self.test.vapi.pvti_interface_create( |
| 328 | interface={ |
| 329 | "local_ip": self.local_ip, |
| 330 | "local_port": self.local_port, |
| 331 | "remote_ip": self.remote_ip, |
| 332 | "remote_port": self.remote_port, |
| 333 | "underlay_mtu": self.underlay_mtu, |
| 334 | } |
| 335 | ) |
| 336 | self.set_sw_if_index(r.sw_if_index) |
| 337 | self.test.registry.register(self, self.test.logger) |
| 338 | return self |
| 339 | |
| 340 | def remove_vpp_config(self): |
| 341 | self.test.vapi.pvti_interface_delete(sw_if_index=self._sw_if_index) |
| 342 | |
| 343 | def query_vpp_config(self): |
| 344 | ts = self.test.vapi.pvti_interface_dump(sw_if_index=0xFFFFFFFF) |
| 345 | for t in ts: |
| 346 | if ( |
| 347 | t.interface.sw_if_index == self._sw_if_index |
| 348 | and str(t.interface.local_ip) == self.local_ip |
| 349 | and t.interface.local_port == self.local_port |
| 350 | and t.interface.remote_port == self.remote_port |
| 351 | and str(t.interface.remote_ip) == self.remote_ip |
| 352 | ): |
| 353 | self.test.logger.info("QUERY AYXX: true") |
| 354 | return True |
| 355 | return False |
| 356 | |
| 357 | def __str__(self): |
| 358 | return self.object_id() |
| 359 | |
| 360 | def object_id(self): |
| 361 | return "pvti-%d" % self._sw_if_index |
| 362 | |
| 363 | |
| 364 | @unittest.skipIf("pvti" in config.excluded_plugins, "Exclude PVTI plugin tests") |
| 365 | # @tag_run_solo |
| 366 | class TestPvti(VppTestCase): |
| 367 | """Packet Vector Tunnel Interface (PVTI) Test Case""" |
| 368 | |
| 369 | error_str = compile(r"Error") |
| 370 | |
| 371 | # maxDiff = None |
| 372 | |
| 373 | wg4_output_node_name = "/err/wg4-output-tun/" |
| 374 | wg4_input_node_name = "/err/wg4-input/" |
| 375 | wg6_output_node_name = "/err/wg6-output-tun/" |
| 376 | wg6_input_node_name = "/err/wg6-input/" |
| 377 | kp4_error = wg4_output_node_name + "Keypair error" |
| 378 | mac4_error = wg4_input_node_name + "Invalid MAC handshake" |
| 379 | peer4_in_err = wg4_input_node_name + "Peer error" |
| 380 | peer4_out_err = wg4_output_node_name + "Peer error" |
| 381 | kp6_error = wg6_output_node_name + "Keypair error" |
| 382 | mac6_error = wg6_input_node_name + "Invalid MAC handshake" |
| 383 | peer6_in_err = wg6_input_node_name + "Peer error" |
| 384 | peer6_out_err = wg6_output_node_name + "Peer error" |
| 385 | cookie_dec4_err = wg4_input_node_name + "Failed during Cookie decryption" |
| 386 | cookie_dec6_err = wg6_input_node_name + "Failed during Cookie decryption" |
| 387 | ratelimited4_err = wg4_input_node_name + "Handshake ratelimited" |
| 388 | ratelimited6_err = wg6_input_node_name + "Handshake ratelimited" |
| 389 | |
| 390 | @classmethod |
| 391 | def setUpClass(cls): |
| 392 | super(TestPvti, cls).setUpClass() |
| 393 | try: |
| 394 | cls.create_pg_interfaces(range(2)) |
| 395 | for i in cls.pg_interfaces: |
| 396 | i.admin_up() |
| 397 | i.config_ip4() |
| 398 | i.config_ip6() |
| 399 | i.resolve_arp() |
| 400 | i.resolve_ndp() |
| 401 | |
| 402 | except Exception: |
| 403 | super(TestPvti, cls).tearDownClass() |
| 404 | raise |
| 405 | |
| 406 | @classmethod |
| 407 | def tearDownClass(cls): |
| 408 | super(TestPvti, cls).tearDownClass() |
| 409 | |
| 410 | def setUp(self): |
| 411 | super(VppTestCase, self).setUp() |
| 412 | self.base_kp4_err = self.statistics.get_err_counter(self.kp4_error) |
| 413 | self.base_mac4_err = self.statistics.get_err_counter(self.mac4_error) |
| 414 | self.base_peer4_in_err = self.statistics.get_err_counter(self.peer4_in_err) |
| 415 | self.base_peer4_out_err = self.statistics.get_err_counter(self.peer4_out_err) |
| 416 | self.base_kp6_err = self.statistics.get_err_counter(self.kp6_error) |
| 417 | self.base_mac6_err = self.statistics.get_err_counter(self.mac6_error) |
| 418 | self.base_peer6_in_err = self.statistics.get_err_counter(self.peer6_in_err) |
| 419 | self.base_peer6_out_err = self.statistics.get_err_counter(self.peer6_out_err) |
| 420 | self.base_cookie_dec4_err = self.statistics.get_err_counter( |
| 421 | self.cookie_dec4_err |
| 422 | ) |
| 423 | self.base_cookie_dec6_err = self.statistics.get_err_counter( |
| 424 | self.cookie_dec6_err |
| 425 | ) |
| 426 | self.base_ratelimited4_err = self.statistics.get_err_counter( |
| 427 | self.ratelimited4_err |
| 428 | ) |
| 429 | self.base_ratelimited6_err = self.statistics.get_err_counter( |
| 430 | self.ratelimited6_err |
| 431 | ) |
| 432 | |
| 433 | def create_packets( |
| 434 | self, src_ip_if, count=1, size=150, for_rx=False, is_ip6=False, af_mix=False |
| 435 | ): |
| 436 | pkts = [] |
| 437 | total_packet_count = count |
| 438 | padstr0 = "" |
| 439 | padstr1 = "" |
| 440 | for i in range(0, 2000): |
| 441 | padstr0 = padstr0 + (".%03x" % i) |
| 442 | padstr1 = padstr1 + ("+%03x" % i) |
| 443 | |
| 444 | for i in range(0, total_packet_count): |
| 445 | if af_mix: |
| 446 | is_ip6 = i % 2 == 1 |
| 447 | |
| 448 | dst_mac = src_ip_if.local_mac |
| 449 | src_mac = src_ip_if.remote_mac |
| 450 | if for_rx: |
| 451 | dst_ip4 = src_ip_if.remote_ip4 |
| 452 | dst_ip6 = src_ip_if.remote_ip6 |
| 453 | src_ip4 = "10.0.%d.4" % i |
| 454 | src_ip6 = "2001:db8::%x" % i |
| 455 | else: |
| 456 | src_ip4 = src_ip_if.remote_ip4 |
| 457 | src_ip6 = src_ip_if.remote_ip6 |
| 458 | dst_ip4 = "10.0.%d.4" % i |
| 459 | dst_ip6 = "2001:db8::%x" % i |
| 460 | src_l4 = 1234 + i |
| 461 | dst_l4 = 4321 + i |
| 462 | |
| 463 | ulp = UDP(sport=src_l4, dport=dst_l4) |
| 464 | payload = "test pkt #%d" % i |
| 465 | if i % 2 == 1: |
| 466 | padstr = padstr1 |
| 467 | else: |
| 468 | padstr = padstr0 |
| 469 | |
| 470 | p = Ether(dst=dst_mac, src=src_mac) |
| 471 | if is_ip6: |
| 472 | p /= IPv6(src=src_ip6, dst=dst_ip6) |
| 473 | else: |
| 474 | p /= IP(src=src_ip4, dst=dst_ip4, frag=0, flags=0) |
| 475 | |
| 476 | p /= ulp / Raw(payload) |
| 477 | |
| 478 | if i % 2 == 1 or total_packet_count == 1: |
| 479 | self.extend_packet(p, size, padstr) |
| 480 | else: |
| 481 | self.extend_packet(p, 150, padstr) |
| 482 | pkts.append(p) |
| 483 | return pkts |
| 484 | |
| 485 | def add_rx_ether_header(self, in_pkts, rx_intf=None): |
| 486 | out = [] |
| 487 | if rx_intf is None: |
| 488 | rx_intf = self.pg0 |
| 489 | dst_mac = rx_intf.local_mac |
| 490 | src_mac = rx_intf.remote_mac |
| 491 | pkts = [] |
| 492 | for p in in_pkts: |
| 493 | p0 = Ether(dst=dst_mac, src=src_mac) / p[IP] |
| 494 | out.append(p0) |
| 495 | return out |
| 496 | |
| 497 | def encap_for_rx_test(self, pkts, rx_intf=None): |
| 498 | ende = self.pvti0.get_ende(for_rx_test=True) |
| 499 | encap_pkts = ende.encap_packets(pkts) |
| 500 | return self.add_rx_ether_header(encap_pkts, rx_intf) |
| 501 | |
| 502 | def decrement_ttl_and_build(self, send_pkts): |
| 503 | out = [] |
| 504 | pkts = copy.deepcopy(send_pkts) |
| 505 | for p in pkts: |
| 506 | p[IP].ttl = p[IP].ttl - 1 |
| 507 | out.append(Ether(p.build())) |
| 508 | return out |
| 509 | |
| 510 | def create_rx_packets(self, dst_ip_if, rx_intf=None, count=1, size=150): |
| 511 | pkts = [] |
| 512 | total_packet_count = count |
| 513 | padstr = "" |
| 514 | if rx_intf is None: |
| 515 | rx_intf = self.pg0 |
| 516 | for i in range(0, 2000): |
| 517 | padstr = padstr + (".%03x" % i) |
| 518 | |
| 519 | dst_mac = rx_intf.local_mac |
| 520 | src_mac = rx_intf.remote_mac |
| 521 | |
| 522 | for i in range(0, total_packet_count): |
| 523 | dst_ip4 = dst_ip_if.remote_ip4 |
| 524 | src_ip4 = "10.0.%d.4" % i |
| 525 | src_l4 = 1234 + i |
| 526 | dst_l4 = 4321 + i |
| 527 | |
| 528 | ulp = UDP(sport=src_l4, dport=dst_l4) |
| 529 | payload = "test" |
| 530 | |
| 531 | # if i % 2 == 1 or total_packet_count == 1: |
| 532 | # self.extend_packet(p, size, padstr) |
| 533 | # else: |
| 534 | # self.extend_packet(p, 150, padstr) |
| 535 | |
| 536 | pvti = PVTI(seq=42 + i, chunks=[]) |
| 537 | for j in range(0, 32): |
| 538 | p = ( |
| 539 | IP(src=src_ip4, dst=dst_ip4, frag=0, flags=0, id=j + 0x4000) |
| 540 | / ulp |
| 541 | / Raw(payload) |
| 542 | ) |
| 543 | chunk0 = PVTIChunk(data=raw(p)) |
| 544 | pvti.chunks.append(chunk0) |
| 545 | |
| 546 | p = ( |
| 547 | Ether(dst=dst_mac, src=src_mac) |
| 548 | / IP(src="192.0.2.1", dst=rx_intf.local_ip4, id=0x3000 + i) |
| 549 | / UDP(sport=12312, dport=12312) |
| 550 | / pvti |
| 551 | ) |
| 552 | # p.show() |
| 553 | # Ether(raw(p)).show() |
| 554 | |
| 555 | pkts.append(p) |
| 556 | return pkts |
| 557 | |
| 558 | def send_and_assert_no_replies_ignoring_init( |
| 559 | self, intf, pkts, remark="", timeout=None |
| 560 | ): |
| 561 | self.pg_send(intf, pkts) |
| 562 | |
| 563 | def _filter_out_fn(p): |
| 564 | return is_ipv6_misc(p) or is_handshake_init(p) |
| 565 | |
| 566 | try: |
| 567 | if not timeout: |
| 568 | timeout = 1 |
| 569 | for i in self.pg_interfaces: |
| 570 | i.assert_nothing_captured( |
| 571 | timeout=timeout, remark=remark, filter_out_fn=_filter_out_fn |
| 572 | ) |
| 573 | timeout = 0.1 |
| 574 | finally: |
| 575 | pass |
| 576 | |
| 577 | def test_0000_pvti_interface(self): |
| 578 | """Simple interface creation""" |
| 579 | local_port = 12312 |
| 580 | peer_addr = self.pg0.remote_ip4 # "192.0.2.1" |
| 581 | peer_port = 31234 |
| 582 | peer_port = 12312 |
| 583 | |
| 584 | # Create interface |
| 585 | pvti0 = VppPvtiInterface( |
| 586 | self, self.pg1.local_ip4, local_port, peer_addr, peer_port |
| 587 | ).add_vpp_config() |
| 588 | |
| 589 | self.logger.info(self.vapi.cli("sh int")) |
| 590 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 591 | self.logger.info(self.vapi.cli("show pvti tx peers")) |
| 592 | self.logger.info(self.vapi.cli("show pvti rx peers")) |
| 593 | |
| 594 | # delete interface |
| 595 | pvti0.remove_vpp_config() |
| 596 | # self.logger.info(self.vapi.cli("show pvti interface")) |
| 597 | # pvti0.add_vpp_config() |
| 598 | |
| 599 | def test_0001_pvti_send_simple_1pkt(self): |
| 600 | """v4o4 TX: Simple packet: 1 -> 1""" |
| 601 | |
| 602 | self.prepare_for_test("v4o4_1pkt_simple") |
| 603 | pkts = self.create_packets(self.pg1) |
| 604 | |
| 605 | recv_pkts = self.send_and_expect(self.pg1, pkts, self.pg0) |
| 606 | for p in recv_pkts: |
| 607 | self.logger.info(p) |
| 608 | |
| 609 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(pkts, recv_pkts) |
| 610 | self.assertEqual(c_pkts, py_pkts) |
| 611 | |
| 612 | self.cleanup_after_test() |
| 613 | |
| 614 | def test_0101_pvti_send_simple_1pkt(self): |
| 615 | """v6o4 TX: Simple packet: 1 -> 1""" |
| 616 | |
| 617 | self.prepare_for_test("v6o4_1pkt_simple") |
| 618 | pkts = self.create_packets(self.pg1, is_ip6=True) |
| 619 | |
| 620 | recv_pkts = self.send_and_expect(self.pg1, pkts, self.pg0, n_rx=1) |
| 621 | for p in recv_pkts: |
| 622 | self.logger.info(p) |
| 623 | |
| 624 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(pkts, recv_pkts) |
| 625 | self.assertEqual(c_pkts, py_pkts) |
| 626 | |
| 627 | self.cleanup_after_test() |
| 628 | |
| 629 | def test_0002_pvti_send_simple_2pkt(self): |
| 630 | """TX: Simple packet: 2 -> 1""" |
| 631 | self.prepare_for_test("2pkt_simple") |
| 632 | |
| 633 | send_pkts = self.create_packets(self.pg1, count=2) |
| 634 | pkts = copy.deepcopy(send_pkts) |
| 635 | rx = self.send_and_expect(self.pg1, pkts, self.pg0, n_rx=1) |
| 636 | for p in rx: |
| 637 | self.logger.info(p) |
| 638 | # p.show() |
| 639 | |
| 640 | payload0 = rx[0][PVTI].chunks[0].data |
| 641 | payload1 = rx[0][PVTI].chunks[1].data |
| 642 | |
| 643 | pktA0 = IP(payload0) |
| 644 | pktA1 = IP(payload1) |
| 645 | |
| 646 | p0 = pkts[0][IP] |
| 647 | p0.ttl = p0.ttl - 1 |
| 648 | pktB0 = IP(p0.build()) |
| 649 | |
| 650 | p1 = pkts[1][IP] |
| 651 | p1.ttl = p1.ttl - 1 |
| 652 | pktB1 = IP(p1.build()) |
| 653 | |
| 654 | self.assertEqual(pktA0, pktB0) |
| 655 | self.assertEqual(pktA1, pktB1) |
| 656 | |
| 657 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 658 | self.assertEqual(c_pkts, py_pkts) |
| 659 | |
| 660 | self.cleanup_after_test() |
| 661 | |
| 662 | def prepare_for_test(self, test_name, underlay_mtu=1500, is_ip6=False): |
| 663 | local_port = 12312 |
| 664 | peer_ip4_addr = "192.0.2.1" |
| 665 | peer_ip6_addr = "2001:db8:dead::1" |
| 666 | peer_port = 31234 |
| 667 | peer_port = 12312 |
| 668 | for i in self.pg_interfaces: |
| 669 | i.test_name = test_name |
| 670 | if is_ip6: |
| 671 | self.pvti0 = VppPvtiInterface( |
| 672 | self, |
| 673 | self.pg1.local_ip6, |
| 674 | local_port, |
| 675 | peer_ip6_addr, |
| 676 | peer_port, |
| 677 | underlay_mtu, |
| 678 | ).add_vpp_config() |
| 679 | else: |
| 680 | self.pvti0 = VppPvtiInterface( |
| 681 | self, |
| 682 | self.pg1.local_ip4, |
| 683 | local_port, |
| 684 | peer_ip4_addr, |
| 685 | peer_port, |
| 686 | underlay_mtu, |
| 687 | ).add_vpp_config() |
| 688 | self.pvti0.config_ip4() |
| 689 | self.pvti0.config_ip6() |
| 690 | self.pvti0.admin_up() |
| 691 | |
| 692 | self.logger.info(self.vapi.cli("ip route add 0.0.0.0/0 via 172.16.3.3")) |
| 693 | ## FIXME: using direct "interface" below results in blackouts. intermittently. |
| 694 | # self.logger.info(self.vapi.cli("ip route 0.0.0.0/0 via pvti0")) |
| 695 | self.logger.info(self.vapi.cli("ip route add ::/0 via pvti0")) |
| 696 | self.logger.info(self.vapi.cli("ip route add 192.0.2.1/32 via pg0")) |
| 697 | self.logger.info(self.vapi.cli("ip neighbor pg0 192.0.2.1 000c.0102.0304")) |
| 698 | self.logger.info(self.vapi.cli("ip route 2001:db8:dead::1/128 via pg0")) |
| 699 | self.logger.info( |
| 700 | self.vapi.cli("ip neighbor pg0 2001:db8:dead::1 000c.0102.0304") |
| 701 | ) |
| 702 | self.logger.info(self.vapi.cli("ip neighbor pg1 172.16.2.2 000c.0102.0304")) |
| 703 | self.logger.info(self.vapi.cli("sh int")) |
| 704 | self.logger.info(self.vapi.cli("sh ip fib")) |
| 705 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 706 | self.logger.info(self.vapi.cli("set interface ip pvti-bypass pg0")) |
| 707 | |
| 708 | def cleanup_after_test(self): |
| 709 | self.logger.info(self.vapi.cli("ip neighbor del pg0 192.0.2.1 000c.0102.0304")) |
| 710 | self.logger.info(self.vapi.cli("ip neighbor del pg1 172.16.2.2 000c.0102.0304")) |
| 711 | self.logger.info(self.vapi.cli("ip route del 192.0.2.1/32 via pg0")) |
| 712 | # self.logger.info(self.vapi.cli("ip route del 0.0.0.0/0 via pvti0")) |
| 713 | self.logger.info(self.vapi.cli("ip route del ::/0 via pvti0")) |
| 714 | self.logger.info(self.vapi.cli("sh int")) |
| 715 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 716 | self.pvti0.remove_vpp_config() |
| 717 | |
| 718 | def test_0003_pvti_send_simple_1pkt_big(self): |
| 719 | """TX: Simple big packet: 1 -> 2""" |
| 720 | self.prepare_for_test("1big_pkt") |
| 721 | |
| 722 | send_pkts = self.create_packets(self.pg1, count=1, size=1900) |
| 723 | pkts = copy.deepcopy(send_pkts) |
| 724 | self.logger.info("count: ") |
| 725 | self.logger.info(len(pkts)) |
| 726 | rx = self.send_and_expect(self.pg1, pkts, self.pg0, n_rx=2) |
| 727 | for p in rx: |
| 728 | self.logger.info(p) |
| 729 | self.logger.info(len(p[PVTI].chunks[0].data)) |
| 730 | # p.show() |
| 731 | payload = rx[0][PVTI].chunks[0].data + rx[1][PVTI].chunks[0].data |
| 732 | |
| 733 | pkt1 = IP(payload) |
| 734 | p0 = pkts[0][IP] |
| 735 | p0.ttl = p0.ttl - 1 |
| 736 | |
| 737 | pkt0 = IP(p0.build()) |
| 738 | |
| 739 | self.assertEqual(pkt0, pkt1) |
| 740 | |
| 741 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 742 | self.assertEqual(c_pkts, py_pkts) |
| 743 | |
| 744 | self.cleanup_after_test() |
| 745 | |
| 746 | def test_0004_pvti_send_simple_5pkt_big(self): |
| 747 | """v4o4 TX: Simple big packets: 5 -> 2""" |
| 748 | self.prepare_for_test("v4o4_5big_pkt") |
| 749 | |
| 750 | send_pkts = self.create_packets(self.pg1, count=5, size=1050) |
| 751 | self.logger.info("count: %d " % len(send_pkts)) |
| 752 | # self.logger.info(len(pkts)) |
| 753 | rx = self.send_and_expect(self.pg1, send_pkts, self.pg0, n_rx=2) |
| 754 | for p in rx: |
| 755 | self.logger.info(p) |
| 756 | self.logger.info(len(p[PVTI].chunks[0].data)) |
| 757 | # p.show() |
| 758 | |
| 759 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 760 | self.assertEqual(c_pkts, py_pkts) |
| 761 | |
| 762 | self.cleanup_after_test() |
| 763 | |
| 764 | def test_0104_pvti_send_simple_5pkt_big(self): |
| 765 | """v6o4 TX: Simple big packets: 5 -> 2""" |
| 766 | self.prepare_for_test("v4o4_5big_pkt") |
| 767 | |
| 768 | send_pkts = self.create_packets(self.pg1, count=5, size=1050, is_ip6=True) |
| 769 | self.logger.info("count: %d " % len(send_pkts)) |
| 770 | # self.logger.info(len(pkts)) |
| 771 | rx = self.send_and_expect(self.pg1, send_pkts, self.pg0, n_rx=2) |
| 772 | for p in rx: |
| 773 | self.logger.info(p) |
| 774 | self.logger.info(len(p[PVTI].chunks[0].data)) |
| 775 | # p.show() |
| 776 | |
| 777 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 778 | self.assertEqual(c_pkts, py_pkts) |
| 779 | |
| 780 | self.cleanup_after_test() |
| 781 | |
| 782 | def Xtest_0204_pvti_send_simple_5pkt_mix(self): |
| 783 | """vXo4 TX: Simple packets mix: 5 -> 2""" |
| 784 | # FIXME: This test is disabled for now, but left here, to have this comment |
| 785 | # The mix of IPv4 and IPv6 packets in VPP will forward two |
| 786 | # different graphs, so after encap it will result in two |
| 787 | # PV packets: one with IPv4 chunks, and one with IPv6 chunks. |
| 788 | # The python test encapsulator does not do this, and it is probably |
| 789 | # a useless idea to introduce attempts to mimic this behavior, |
| 790 | # because in any case one can not expect the orderly scheduling |
| 791 | # of IPv4 vs IPv6 graph processing. |
| 792 | self.prepare_for_test("vXo4_5big_pkt") |
| 793 | |
| 794 | send_pkts = self.create_packets(self.pg1, count=5, size=1050, af_mix=True) |
| 795 | # self.logger.info(len(pkts)) |
| 796 | rx = self.send_and_expect(self.pg1, send_pkts, self.pg0, n_rx=2) |
| 797 | for p in rx: |
| 798 | self.logger.info(p) |
| 799 | self.logger.info(len(p[PVTI].chunks[0].data)) |
| 800 | |
| 801 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 802 | self.assertEqual(c_pkts, py_pkts) |
| 803 | |
| 804 | self.cleanup_after_test() |
| 805 | |
| 806 | def test_0005_pvti_send_mix_3pkt_medium_mtu(self): |
| 807 | """TX: small+big+small packets over medium mtu: 3 -> 3""" |
| 808 | self.prepare_for_test("3pkt_small_mtu", underlay_mtu=400) |
| 809 | |
| 810 | send_pkts = self.create_packets(self.pg1, count=3, size=500) |
| 811 | pkts = copy.deepcopy(send_pkts) |
| 812 | self.logger.info("count: %d " % len(send_pkts)) |
| 813 | # self.logger.info(len(pkts)) |
| 814 | rx = self.send_and_expect(self.pg1, send_pkts, self.pg0, n_rx=3) |
| 815 | for p in rx: |
| 816 | self.logger.info(p) |
| 817 | self.logger.info(len(p[PVTI].chunks[0].data)) |
| 818 | # p.show() |
| 819 | |
| 820 | # check the middle chunk which is spread across two packets |
| 821 | payload = rx[0][PVTI].chunks[1].data + rx[1][PVTI].chunks[0].data |
| 822 | |
| 823 | pkt1 = IP(payload) |
| 824 | |
| 825 | p0 = pkts[1][IP] |
| 826 | p0.ttl = p0.ttl - 1 |
| 827 | |
| 828 | pkt0 = IP(p0.build()) |
| 829 | self.assertEqual(pkt0, pkt1) |
| 830 | |
| 831 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 832 | self.assertEqual(c_pkts, py_pkts) |
| 833 | |
| 834 | self.cleanup_after_test() |
| 835 | |
| 836 | def test_0006_pvti_send_mix_4pkt_medium_mtu(self): |
| 837 | """TX: small+big+small packets over 600 mtu: 4 -> 3""" |
| 838 | self.prepare_for_test("6pkt_small_mtu", underlay_mtu=600) |
| 839 | |
| 840 | send_pkts = self.create_packets(self.pg1, count=4, size=500) |
| 841 | pkts = copy.deepcopy(send_pkts) |
| 842 | # self.logger.info(len(pkts)) |
| 843 | rx = self.send_and_expect(self.pg1, send_pkts, self.pg0, n_rx=3) |
| 844 | for p in rx: |
| 845 | self.logger.info(p) |
| 846 | self.logger.info(len(p[PVTI].chunks[0].data)) |
| 847 | # p.show() |
| 848 | |
| 849 | # check the middle chunk which is spread across two packets |
| 850 | payload = rx[0][PVTI].chunks[1].data + rx[1][PVTI].chunks[0].data |
| 851 | |
| 852 | pkt1 = IP(payload) |
| 853 | |
| 854 | p0 = pkts[1][IP] |
| 855 | p0.ttl = p0.ttl - 1 |
| 856 | |
| 857 | pkt0 = IP(p0.build()) |
| 858 | self.assertEqual(pkt0, pkt1) |
| 859 | |
| 860 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 861 | self.assertEqual(c_pkts, py_pkts) |
| 862 | |
| 863 | self.cleanup_after_test() |
| 864 | |
| 865 | def test_0007_pvti_send_simple_1_3_pkt(self): |
| 866 | """TX: Simple packet: 1 -> 3, small mtu""" |
| 867 | |
| 868 | self.prepare_for_test("1_3_pkt_simple", underlay_mtu=520) |
| 869 | send_pkts = self.create_packets(self.pg1, count=1, size=1400) |
| 870 | pkts = copy.deepcopy(send_pkts) |
| 871 | |
| 872 | rx = self.send_and_expect(self.pg1, pkts, self.pg0, n_rx=3) |
| 873 | for p in rx: |
| 874 | self.logger.info(p) |
| 875 | |
| 876 | c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 877 | self.assertEqual(c_pkts, py_pkts) |
| 878 | |
| 879 | self.cleanup_after_test() |
| 880 | |
| 881 | def test_0008_pvti_chained_1_3_pkt(self): |
| 882 | """TX: Chained packet: 2700 byte 1 -> 3, mtu 1000""" |
| 883 | |
| 884 | self.prepare_for_test("1_3_pkt_simple", underlay_mtu=1000) |
| 885 | send_pkts = self.create_packets(self.pg1, count=1, size=2700) |
| 886 | pkts = copy.deepcopy(send_pkts) |
| 887 | |
| 888 | pkt0 = Ether(raw(pkts[0]))[IP] |
| 889 | |
| 890 | rx = self.send_and_expect(self.pg1, send_pkts, self.pg0, n_rx=3) |
| 891 | for p in rx: |
| 892 | self.logger.info(p) |
| 893 | |
| 894 | p0 = pkts[0][IP] |
| 895 | p0.ttl = p0.ttl - 1 |
| 896 | pkt0 = IP(p0.build()) |
| 897 | |
| 898 | payload = ( |
| 899 | rx[0][PVTI].chunks[0].data |
| 900 | + rx[1][PVTI].chunks[0].data |
| 901 | + rx[2][PVTI].chunks[0].data |
| 902 | # + rx[2][PVTI].chunks[1].data |
| 903 | ) |
| 904 | pkt1 = IP(payload) |
| 905 | |
| 906 | self.assertEqual(pkt0, pkt1) |
| 907 | |
| 908 | # FIXME: this will fail because the send path |
| 909 | # does not combine the data from two chained blocks. |
| 910 | # when this succeeds, the above checks in this testcase will need to be redone |
| 911 | # c_pkts, py_pkts = self.pvti0.verify_encap_packets(send_pkts, rx) |
| 912 | # self.assertEqual(c_pkts, py_pkts) |
| 913 | |
| 914 | self.cleanup_after_test() |
| 915 | |
| 916 | def test_1001_pvti_rx_simple_1pkt(self): |
| 917 | """RX: Simple packet: 1 -> 32""" |
| 918 | |
| 919 | self.prepare_for_test("1pkt_rx_simple") |
| 920 | pkts = self.create_rx_packets(self.pg1, rx_intf=self.pg0) |
| 921 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 922 | self.logger.info(self.vapi.cli("show udp ports")) |
| 923 | |
| 924 | recv_pkts = self.send_and_expect(self.pg0, pkts, self.pg1, n_rx=32) |
| 925 | for p in recv_pkts: |
| 926 | self.logger.info(p) |
| 927 | |
| 928 | self.cleanup_after_test() |
| 929 | |
| 930 | def test_1002_pvti_rx_big_1buf(self): |
| 931 | """RX: Orig Big packet, single buf: 2 -> 1""" |
| 932 | |
| 933 | self.prepare_for_test("1buf_rx_big") |
| 934 | |
| 935 | pkts_orig = self.create_packets(self.pg1, count=1, size=1900, for_rx=True) |
| 936 | pkts = self.encap_for_rx_test(pkts_orig, rx_intf=self.pg0) |
| 937 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 938 | self.logger.info(self.vapi.cli("show udp ports")) |
| 939 | |
| 940 | known_good_pkts = self.decrement_ttl_and_build(pkts_orig) |
| 941 | |
| 942 | recv_pkts = self.send_and_expect(self.pg0, pkts, self.pg1, n_rx=1) |
| 943 | for i, p in enumerate(recv_pkts): |
| 944 | self.logger.info(p) |
| 945 | self.assertEqual(p[IP], known_good_pkts[i][IP]) |
| 946 | |
| 947 | self.cleanup_after_test() |
| 948 | |
| 949 | def test_1003_pvti_rx_big_2buf(self): |
| 950 | """RX: Very Big packet, chained buf: 3 -> 1""" |
| 951 | |
| 952 | self.prepare_for_test("2buf_rx_big") |
| 953 | |
| 954 | pkts_orig = self.create_packets(self.pg1, count=1, size=3000, for_rx=True) |
| 955 | |
| 956 | pkts = self.encap_for_rx_test(pkts_orig, rx_intf=self.pg0) |
| 957 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 958 | self.logger.info(self.vapi.cli("show udp ports")) |
| 959 | |
| 960 | known_good_pkts = self.decrement_ttl_and_build(pkts_orig) |
| 961 | |
| 962 | recv_pkts = self.send_and_expect(self.pg0, pkts, self.pg1, n_rx=1) |
| 963 | for i, p in enumerate(recv_pkts): |
| 964 | self.logger.info(p) |
| 965 | if p[IP] != known_good_pkts[i][IP]: |
| 966 | p[IP].show() |
| 967 | known_good_pkts[i][IP].show() |
| 968 | self.assertEqual(p[IP], known_good_pkts[i][IP]) |
| 969 | |
| 970 | self.cleanup_after_test() |
| 971 | |
| 972 | def test_1004_pvti_rx_big_2buf_and_small(self): |
| 973 | """RX: Very Big packet, chained buf: 3 -> 1 + small pkt""" |
| 974 | |
| 975 | self.prepare_for_test("2buf_rx_big_and_small") |
| 976 | |
| 977 | pkts_orig = self.create_packets(self.pg1, count=2, size=3000, for_rx=True) |
| 978 | |
| 979 | pkts = self.encap_for_rx_test(pkts_orig, rx_intf=self.pg0) |
| 980 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 981 | self.logger.info(self.vapi.cli("show udp ports")) |
| 982 | |
| 983 | known_good_pkts = self.decrement_ttl_and_build(pkts_orig) |
| 984 | |
| 985 | recv_pkts = self.send_and_expect(self.pg0, pkts, self.pg1, n_rx=2) |
| 986 | for i, p in enumerate(recv_pkts): |
| 987 | self.logger.info(p) |
| 988 | if p[IP] != known_good_pkts[i][IP]: |
| 989 | p[IP].show() |
| 990 | known_good_pkts[i][IP].show() |
| 991 | self.assertEqual(p[IP], known_good_pkts[i][IP]) |
| 992 | |
| 993 | self.cleanup_after_test() |
| 994 | |
| 995 | def test_1005_pvti_rx_big_2buf_and_small_drop(self): |
| 996 | """RX: Very Big packet, chained buf: 3 -> 1 + small pkt, encap pkt lost""" |
| 997 | |
| 998 | self.prepare_for_test("2buf_rx_big_and_small_drop") |
| 999 | |
| 1000 | pkts_orig = self.create_packets(self.pg1, count=3, size=3000, for_rx=True) |
| 1001 | |
| 1002 | pkts = self.encap_for_rx_test(pkts_orig, rx_intf=self.pg0) |
| 1003 | # drop the second packet after encapsulation (the one with the second frag of the large packet) |
| 1004 | pkts.pop(1) |
| 1005 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 1006 | self.logger.info(self.vapi.cli("show udp ports")) |
| 1007 | |
| 1008 | known_good_pkts = self.decrement_ttl_and_build(pkts_orig) |
| 1009 | |
| 1010 | # drop the large original packet, leaving just two small ones |
| 1011 | known_good_pkts.pop(1) |
| 1012 | |
| 1013 | recv_pkts = self.send_and_expect(self.pg0, pkts, self.pg1, n_rx=2) |
| 1014 | for i, p in enumerate(recv_pkts): |
| 1015 | self.logger.info(p) |
| 1016 | if p[IP] != known_good_pkts[i][IP]: |
| 1017 | p[IP].show() |
| 1018 | known_good_pkts[i][IP].show() |
| 1019 | self.assertEqual(p[IP], known_good_pkts[i][IP]) |
| 1020 | |
| 1021 | self.cleanup_after_test() |
| 1022 | |
| 1023 | def test_1006_pvti_rx_big_2buf_and_small_drop2(self): |
| 1024 | """RX: Very Big packet, chained buf: 3 -> 1 + small pkt, non-initial frag pkt lost""" |
| 1025 | |
| 1026 | self.prepare_for_test("2buf_rx_big_and_small_drop2") |
| 1027 | |
| 1028 | pkts_orig = self.create_packets(self.pg1, count=3, size=6000, for_rx=True) |
| 1029 | |
| 1030 | pkts = self.encap_for_rx_test(pkts_orig, rx_intf=self.pg0) |
| 1031 | # drop the second packet after encapsulation (the one with the second frag of the large packet) |
| 1032 | pkts.pop(2) |
| 1033 | self.logger.info(self.vapi.cli("show pvti interface")) |
| 1034 | self.logger.info(self.vapi.cli("show udp ports")) |
| 1035 | |
| 1036 | known_good_pkts = self.decrement_ttl_and_build(pkts_orig) |
| 1037 | # drop the large original packet, leaving just two small ones |
| 1038 | known_good_pkts.pop(1) |
| 1039 | |
| 1040 | recv_pkts = self.send_and_expect(self.pg0, pkts, self.pg1, n_rx=2) |
| 1041 | for i, p in enumerate(recv_pkts): |
| 1042 | self.logger.info(p) |
| 1043 | if p[IP] != known_good_pkts[i][IP]: |
| 1044 | p[IP].show() |
| 1045 | known_good_pkts[i][IP].show() |
| 1046 | self.assertEqual(p[IP], known_good_pkts[i][IP]) |
| 1047 | |
| 1048 | self.cleanup_after_test() |
| 1049 | |
| 1050 | |
| 1051 | class PvtiHandoffTests(TestPvti): |
| 1052 | """Pvti Tests in multi worker setup""" |
| 1053 | |
| 1054 | vpp_worker_count = 2 |
| 1055 | |
| 1056 | def xtest_wg_peer_init(self): |
| 1057 | """Handoff""" |
| 1058 | |
| 1059 | port = 12383 |
| 1060 | |
| 1061 | # Create interfaces |
| 1062 | wg0 = VppWgInterface(self, self.pg1.local_ip4, port).add_vpp_config() |
| 1063 | wg0.admin_up() |
| 1064 | wg0.config_ip4() |
| 1065 | |
| 1066 | self.pg_enable_capture(self.pg_interfaces) |
| 1067 | self.pg_start() |
| 1068 | |
| 1069 | peer_1 = VppWgPeer( |
| 1070 | self, wg0, self.pg1.remote_ip4, port + 1, ["10.11.2.0/24", "10.11.3.0/24"] |
| 1071 | ).add_vpp_config() |
| 1072 | self.assertEqual(len(self.vapi.wireguard_peers_dump()), 1) |
| 1073 | |
| 1074 | r1 = VppIpRoute( |
| 1075 | self, "10.11.3.0", 24, [VppRoutePath("10.11.3.1", wg0.sw_if_index)] |
| 1076 | ).add_vpp_config() |
| 1077 | |
| 1078 | # skip the first automatic handshake |
| 1079 | self.pg1.get_capture(1, timeout=HANDSHAKE_JITTER) |
| 1080 | |
| 1081 | # send a valid handsake init for which we expect a response |
| 1082 | p = peer_1.mk_handshake(self.pg1) |
| 1083 | |
| 1084 | rx = self.send_and_expect(self.pg1, [p], self.pg1) |
| 1085 | |
| 1086 | peer_1.consume_response(rx[0]) |
| 1087 | |
| 1088 | # send a data packet from the peer through the tunnel |
| 1089 | # this completes the handshake and pins the peer to worker 0 |
| 1090 | p = ( |
| 1091 | IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) |
| 1092 | / UDP(sport=222, dport=223) |
| 1093 | / Raw() |
| 1094 | ) |
| 1095 | d = peer_1.encrypt_transport(p) |
| 1096 | p = peer_1.mk_tunnel_header(self.pg1) / ( |
| 1097 | Pvti(message_type=4, reserved_zero=0) |
| 1098 | / PvtiTransport( |
| 1099 | receiver_index=peer_1.sender, counter=0, encrypted_encapsulated_packet=d |
| 1100 | ) |
| 1101 | ) |
| 1102 | rxs = self.send_and_expect(self.pg1, [p], self.pg0, worker=0) |
| 1103 | |
| 1104 | for rx in rxs: |
| 1105 | self.assertEqual(rx[IP].dst, self.pg0.remote_ip4) |
| 1106 | self.assertEqual(rx[IP].ttl, 19) |
| 1107 | |
| 1108 | # send a packets that are routed into the tunnel |
| 1109 | # and pins the peer tp worker 1 |
| 1110 | pe = ( |
| 1111 | Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) |
| 1112 | / IP(src=self.pg0.remote_ip4, dst="10.11.3.2") |
| 1113 | / UDP(sport=555, dport=556) |
| 1114 | / Raw(b"\x00" * 80) |
| 1115 | ) |
| 1116 | rxs = self.send_and_expect(self.pg0, pe * 255, self.pg1, worker=1) |
| 1117 | peer_1.validate_encapped(rxs, pe) |
| 1118 | |
| 1119 | # send packets into the tunnel, from the other worker |
| 1120 | p = [ |
| 1121 | ( |
| 1122 | peer_1.mk_tunnel_header(self.pg1) |
| 1123 | / Pvti(message_type=4, reserved_zero=0) |
| 1124 | / PvtiTransport( |
| 1125 | receiver_index=peer_1.sender, |
| 1126 | counter=ii + 1, |
| 1127 | encrypted_encapsulated_packet=peer_1.encrypt_transport( |
| 1128 | ( |
| 1129 | IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) |
| 1130 | / UDP(sport=222, dport=223) |
| 1131 | / Raw() |
| 1132 | ) |
| 1133 | ), |
| 1134 | ) |
| 1135 | ) |
| 1136 | for ii in range(255) |
| 1137 | ] |
| 1138 | |
| 1139 | rxs = self.send_and_expect(self.pg1, p, self.pg0, worker=1) |
| 1140 | |
| 1141 | for rx in rxs: |
| 1142 | self.assertEqual(rx[IP].dst, self.pg0.remote_ip4) |
| 1143 | self.assertEqual(rx[IP].ttl, 19) |
| 1144 | |
| 1145 | # send a packets that are routed into the tunnel |
| 1146 | # from worker 0 |
| 1147 | rxs = self.send_and_expect(self.pg0, pe * 255, self.pg1, worker=0) |
| 1148 | |
| 1149 | peer_1.validate_encapped(rxs, pe) |
| 1150 | |
| 1151 | r1.remove_vpp_config() |
| 1152 | peer_1.remove_vpp_config() |
| 1153 | wg0.remove_vpp_config() |