blob: 8b2c7fe723fd6251705662c4f5e626868e9e3f86 [file] [log] [blame]
Nick Zavaritsky27518c22020-02-27 15:54:58 +00001/*
2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_ip_vtep_h
17#define included_ip_vtep_h
18
19#include <vppinfra/hash.h>
20#include <vnet/ip/ip.h>
Neale Rannse4031132020-10-26 13:00:06 +000021#include <vnet/ip/ip46_address.h>
Nick Zavaritsky27518c22020-02-27 15:54:58 +000022
23/**
24 * @brief Tunnel endpoint key (IPv4)
25 *
26 * Tunnel modules maintain a set of vtep4_key_t-s to track local IP
27 * addresses that have tunnels established. Bypass node consults the
28 * corresponding set to decide whether a packet should bypass normal
29 * processing and go directly to the tunnel protocol handler node.
30 */
31
32/* *INDENT-OFF* */
33typedef CLIB_PACKED
34(struct {
35 union {
36 struct {
37 ip4_address_t addr;
38 u32 fib_index;
39 };
40 u64 as_u64;
41 };
42}) vtep4_key_t;
43/* *INDENT-ON* */
44
45/**
46 * @brief Tunnel endpoint key (IPv6)
47 *
48 * Tunnel modules maintain a set of vtep6_key_t-s to track local IP
49 * addresses that have tunnels established. Bypass node consults the
50 * corresponding set to decide whether a packet should bypass normal
51 * processing and go directly to the tunnel protocol handler node.
52 */
53
54/* *INDENT-OFF* */
55typedef CLIB_PACKED
56(struct {
57 ip6_address_t addr;
58 u32 fib_index;
59}) vtep6_key_t;
60/* *INDENT-ON* */
61
62typedef struct
63{
64 uword *vtep4; /* local ip4 VTEPs keyed on their ip4 addr + fib_index */
65 uword *vtep6; /* local ip6 VTEPs keyed on their ip6 addr + fib_index */
66} vtep_table_t;
67
68always_inline vtep_table_t
69vtep_table_create ()
70{
71 vtep_table_t t = { };
72 t.vtep6 = hash_create_mem (0, sizeof (vtep6_key_t), sizeof (uword));
73 return t;
74}
75
76uword vtep_addr_ref (vtep_table_t * t, u32 fib_index, ip46_address_t * ip);
77uword vtep_addr_unref (vtep_table_t * t, u32 fib_index, ip46_address_t * ip);
78
79always_inline void
80vtep4_key_init (vtep4_key_t * k4)
81{
82 k4->as_u64 = ~((u64) 0);
83}
84
85always_inline void
86vtep6_key_init (vtep6_key_t * k6)
87{
88 ip6_address_set_zero (&k6->addr);
89 k6->fib_index = (u32) ~ 0;
90}
91
92enum
93{
94 VTEP_CHECK_FAIL = 0,
95 VTEP_CHECK_PASS = 1,
96 VTEP_CHECK_PASS_UNCHANGED = 2
97};
98
99always_inline u8
100vtep4_check (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
101 vtep4_key_t * last_k4)
102{
103 vtep4_key_t k4;
104 k4.addr.as_u32 = ip40->dst_address.as_u32;
105 k4.fib_index = vlib_buffer_get_ip4_fib_index (b0);
106 if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64))
107 return VTEP_CHECK_PASS_UNCHANGED;
108 if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64)))
109 return VTEP_CHECK_FAIL;
110 last_k4->as_u64 = k4.as_u64;
111 return VTEP_CHECK_PASS;
112}
113
Zhiyong Yang5e524172020-07-08 20:28:36 +0000114typedef struct
115{
116 vtep4_key_t vtep4_cache[8];
117 int idx;
118} vtep4_cache_t;
119
Ray Kinsella0a7551b2021-11-17 10:03:04 +0000120#ifdef CLIB_HAVE_VEC512
Zhiyong Yang5e524172020-07-08 20:28:36 +0000121always_inline u8
122vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
123 vtep4_key_t * last_k4, vtep4_cache_t * vtep4_u512)
124{
125 vtep4_key_t k4;
126 k4.addr.as_u32 = ip40->dst_address.as_u32;
127 k4.fib_index = vlib_buffer_get_ip4_fib_index (b0);
128
129 if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64))
130 return VTEP_CHECK_PASS_UNCHANGED;
131
Zhiyong Yang5e524172020-07-08 20:28:36 +0000132 u64x8 k4_u64x8 = u64x8_splat (k4.as_u64);
Ray Kinsella48060062020-12-18 10:39:46 +0000133 u64x8 cache = u64x8_load_unaligned (vtep4_u512->vtep4_cache);
Damjan Marion09aeee62021-04-20 21:28:45 +0200134 u8 result = u64x8_is_equal_mask (cache, k4_u64x8);
Zhiyong Yang5e524172020-07-08 20:28:36 +0000135 if (PREDICT_TRUE (result != 0))
136 {
Ray Kinsella48060062020-12-18 10:39:46 +0000137 last_k4->as_u64 =
Zhiyong Yang5e524172020-07-08 20:28:36 +0000138 vtep4_u512->vtep4_cache[count_trailing_zeros (result)].as_u64;
139 return VTEP_CHECK_PASS_UNCHANGED;
140 }
Zhiyong Yang5e524172020-07-08 20:28:36 +0000141
142 if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64)))
143 return VTEP_CHECK_FAIL;
144
Zhiyong Yang5e524172020-07-08 20:28:36 +0000145 vtep4_u512->vtep4_cache[vtep4_u512->idx].as_u64 = k4.as_u64;
146 vtep4_u512->idx = (vtep4_u512->idx + 1) & 0x7;
Junfeng Wang290526e2021-03-09 16:44:57 +0800147
148 last_k4->as_u64 = k4.as_u64;
Zhiyong Yang5e524172020-07-08 20:28:36 +0000149
150 return VTEP_CHECK_PASS;
151}
Junfeng Wang290526e2021-03-09 16:44:57 +0800152#endif
Zhiyong Yang5e524172020-07-08 20:28:36 +0000153
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000154always_inline u8
155vtep6_check (vtep_table_t * t, vlib_buffer_t * b0, ip6_header_t * ip60,
156 vtep6_key_t * last_k6)
157{
158 vtep6_key_t k6;
159 k6.fib_index = vlib_buffer_get_ip6_fib_index (b0);
160 if (PREDICT_TRUE (k6.fib_index == last_k6->fib_index
161 && ip60->dst_address.as_u64[0] == last_k6->addr.as_u64[0]
162 && ip60->dst_address.as_u64[1] ==
163 last_k6->addr.as_u64[1]))
164 {
165 return VTEP_CHECK_PASS_UNCHANGED;
166 }
167 k6.addr = ip60->dst_address;
168 if (PREDICT_FALSE (!hash_get_mem (t->vtep6, &k6)))
169 return VTEP_CHECK_FAIL;
170 *last_k6 = k6;
171 return VTEP_CHECK_PASS;
172}
173#endif /* included_ip_vtep_h */
174
175/*
176 * fd.io coding-style-patch-verification: ON
177 *
178 * Local Variables:
179 * eval: (c-set-style "gnu")
180 * End:
181 */