blob: 0f4c47fe11a6809c5b58f3fc2701ebf90f430bf1 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ip/ip4_fib.h: ip4 mtrie fib
17 *
18 * Copyright (c) 2012 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vnet/ip/ip.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070041#include <vnet/ip/ip4_mtrie.h>
42#include <vnet/fib/ip4_fib.h>
43
44
45/**
46 * Global pool of IPv4 8bit PLYs
47 */
Neale Ranns6bb2db02021-08-06 12:24:14 +000048ip4_mtrie_8_ply_t *ip4_ply_pool;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Neale Ranns04a75e32017-03-23 06:46:01 -070050always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000051ip4_mtrie_leaf_is_non_empty (ip4_mtrie_8_ply_t *p, u8 dst_byte)
Ed Warnickecb9cada2015-12-08 15:45:58 -070052{
Neale Ranns04a75e32017-03-23 06:46:01 -070053 /*
54 * It's 'non-empty' if the length of the leaf stored is greater than the
55 * length of a leaf in the covering ply. i.e. the leaf is more specific
56 * than it's would be cover in the covering ply
57 */
58 if (p->dst_address_bits_of_leaves[dst_byte] > p->dst_address_bits_base)
59 return (1);
60 return (0);
61}
62
Neale Ranns6bb2db02021-08-06 12:24:14 +000063always_inline ip4_mtrie_leaf_t
64ip4_mtrie_leaf_set_adj_index (u32 adj_index)
Neale Ranns04a75e32017-03-23 06:46:01 -070065{
Neale Ranns6bb2db02021-08-06 12:24:14 +000066 ip4_mtrie_leaf_t l;
Neale Ranns04a75e32017-03-23 06:46:01 -070067 l = 1 + 2 * adj_index;
Neale Ranns6bb2db02021-08-06 12:24:14 +000068 ASSERT (ip4_mtrie_leaf_get_adj_index (l) == adj_index);
Neale Ranns04a75e32017-03-23 06:46:01 -070069 return l;
70}
71
72always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000073ip4_mtrie_leaf_is_next_ply (ip4_mtrie_leaf_t n)
Neale Ranns04a75e32017-03-23 06:46:01 -070074{
75 return (n & 1) == 0;
76}
77
78always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000079ip4_mtrie_leaf_get_next_ply_index (ip4_mtrie_leaf_t n)
Neale Ranns04a75e32017-03-23 06:46:01 -070080{
Neale Ranns6bb2db02021-08-06 12:24:14 +000081 ASSERT (ip4_mtrie_leaf_is_next_ply (n));
Neale Ranns04a75e32017-03-23 06:46:01 -070082 return n >> 1;
83}
84
Neale Ranns6bb2db02021-08-06 12:24:14 +000085always_inline ip4_mtrie_leaf_t
86ip4_mtrie_leaf_set_next_ply_index (u32 i)
Neale Ranns04a75e32017-03-23 06:46:01 -070087{
Neale Ranns6bb2db02021-08-06 12:24:14 +000088 ip4_mtrie_leaf_t l;
Neale Ranns04a75e32017-03-23 06:46:01 -070089 l = 0 + 2 * i;
Neale Ranns6bb2db02021-08-06 12:24:14 +000090 ASSERT (ip4_mtrie_leaf_get_next_ply_index (l) == i);
Neale Ranns04a75e32017-03-23 06:46:01 -070091 return l;
92}
93
Neale Rannsa3af3372017-03-28 03:49:52 -070094#ifndef __ALTIVEC__
95#define PLY_X4_SPLAT_INIT(init_x4, init) \
96 init_x4 = u32x4_splat (init);
97#else
98#define PLY_X4_SPLAT_INIT(init_x4, init) \
99{ \
100 u32x4_union_t y; \
101 y.as_u32[0] = init; \
102 y.as_u32[1] = init; \
103 y.as_u32[2] = init; \
104 y.as_u32[3] = init; \
105 init_x4 = y.as_u32x4; \
106}
107#endif
108
109#ifdef CLIB_HAVE_VEC128
110#define PLY_INIT_LEAVES(p) \
111{ \
112 u32x4 *l, init_x4; \
113 \
114 PLY_X4_SPLAT_INIT(init_x4, init); \
115 for (l = p->leaves_as_u32x4; \
116 l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); \
117 l += 4) \
118 { \
119 l[0] = init_x4; \
120 l[1] = init_x4; \
121 l[2] = init_x4; \
122 l[3] = init_x4; \
123 } \
124}
125#else
126#define PLY_INIT_LEAVES(p) \
127{ \
128 u32 *l; \
129 \
130 for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4) \
131 { \
132 l[0] = init; \
133 l[1] = init; \
134 l[2] = init; \
135 l[3] = init; \
136 } \
137}
138#endif
139
140#define PLY_INIT(p, init, prefix_len, ply_base_len) \
141{ \
142 /* \
143 * A leaf is 'empty' if it represents a leaf from the covering PLY \
144 * i.e. if the prefix length of the leaf is less than or equal to \
145 * the prefix length of the PLY \
146 */ \
147 p->n_non_empty_leafs = (prefix_len > ply_base_len ? \
148 ARRAY_LEN (p->leaves) : 0); \
Dave Barachb7b92992018-10-17 10:38:51 -0400149 clib_memset (p->dst_address_bits_of_leaves, prefix_len, \
Neale Rannsa3af3372017-03-28 03:49:52 -0700150 sizeof (p->dst_address_bits_of_leaves)); \
151 p->dst_address_bits_base = ply_base_len; \
152 \
153 /* Initialize leaves. */ \
154 PLY_INIT_LEAVES(p); \
155}
156
Neale Ranns04a75e32017-03-23 06:46:01 -0700157static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000158ply_8_init (ip4_mtrie_8_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len,
159 u32 ply_base_len)
Neale Ranns04a75e32017-03-23 06:46:01 -0700160{
Neale Rannsa3af3372017-03-28 03:49:52 -0700161 PLY_INIT (p, init, prefix_len, ply_base_len);
162}
163
164static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000165ply_16_init (ip4_mtrie_16_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len)
Neale Rannsa3af3372017-03-28 03:49:52 -0700166{
Dave Barachb7b92992018-10-17 10:38:51 -0400167 clib_memset (p->dst_address_bits_of_leaves, prefix_len,
168 sizeof (p->dst_address_bits_of_leaves));
Neale Rannsa3af3372017-03-28 03:49:52 -0700169 PLY_INIT_LEAVES (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170}
171
Neale Ranns6bb2db02021-08-06 12:24:14 +0000172static ip4_mtrie_leaf_t
Neale Ranns7244a702021-08-06 13:12:00 +0000173ply_create (ip4_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, u32 ply_base_len)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700174{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000175 ip4_mtrie_8_ply_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700176 /* Get cache aligned ply. */
Neale Ranns1ec36522017-11-29 05:20:37 -0800177
Neale Rannsa3af3372017-03-28 03:49:52 -0700178 pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700179
Neale Rannsa3af3372017-03-28 03:49:52 -0700180 ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000181 return ip4_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182}
183
Neale Ranns6bb2db02021-08-06 12:24:14 +0000184always_inline ip4_mtrie_8_ply_t *
Neale Ranns7244a702021-08-06 13:12:00 +0000185get_next_ply_for_leaf (ip4_mtrie_leaf_t l)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000187 uword n = ip4_mtrie_leaf_get_next_ply_index (l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188
Neale Rannsa3af3372017-03-28 03:49:52 -0700189 return pool_elt_at_index (ip4_ply_pool, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190}
191
Dave Barachd7cb1b52016-12-09 09:52:16 -0500192void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000193ip4_mtrie_16_free (ip4_mtrie_16_t *m)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194{
Lijian.Zhang33af8c12019-09-16 16:22:36 +0800195 /* the root ply is embedded so there is nothing to do,
Neale Rannsa3af3372017-03-28 03:49:52 -0700196 * the assumption being that the IP4 FIB table has emptied the trie
197 * before deletion.
198 */
199#if CLIB_DEBUG > 0
200 int i;
201 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
202 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000203 ASSERT (!ip4_mtrie_leaf_is_next_ply (m->root_ply.leaves[i]));
Neale Rannsa3af3372017-03-28 03:49:52 -0700204 }
205#endif
206}
207
208void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000209ip4_mtrie_16_init (ip4_mtrie_16_t *m)
Neale Rannsa3af3372017-03-28 03:49:52 -0700210{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000211 ply_16_init (&m->root_ply, IP4_MTRIE_LEAF_EMPTY, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212}
213
Neale Ranns7244a702021-08-06 13:12:00 +0000214void
215ip4_mtrie_8_free (ip4_mtrie_8_t *m)
216{
217 /* the root ply is embedded so there is nothing to do,
218 * the assumption being that the IP4 FIB table has emptied the trie
219 * before deletion.
220 */
221 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
222
223#if CLIB_DEBUG > 0
224 int i;
225 for (i = 0; i < ARRAY_LEN (root->leaves); i++)
226 {
227 ASSERT (!ip4_mtrie_leaf_is_next_ply (root->leaves[i]));
228 }
229#endif
230
231 pool_put (ip4_ply_pool, root);
232}
233
234void
235ip4_mtrie_8_init (ip4_mtrie_8_t *m)
236{
237 ip4_mtrie_8_ply_t *root;
238
239 pool_get (ip4_ply_pool, root);
240 m->root_ply = root - ip4_ply_pool;
241
242 ply_8_init (root, IP4_MTRIE_LEAF_EMPTY, 0, 0);
243}
244
Dave Barachd7cb1b52016-12-09 09:52:16 -0500245typedef struct
246{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247 ip4_address_t dst_address;
248 u32 dst_address_length;
249 u32 adj_index;
Neale Ranns04a75e32017-03-23 06:46:01 -0700250 u32 cover_address_length;
251 u32 cover_adj_index;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000252} ip4_mtrie_set_unset_leaf_args_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253
254static void
Neale Ranns7244a702021-08-06 13:12:00 +0000255set_ply_with_more_specific_leaf (ip4_mtrie_8_ply_t *ply,
Neale Ranns6bb2db02021-08-06 12:24:14 +0000256 ip4_mtrie_leaf_t new_leaf,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257 uword new_leaf_dst_address_bits)
258{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000259 ip4_mtrie_leaf_t old_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260 uword i;
261
Neale Ranns6bb2db02021-08-06 12:24:14 +0000262 ASSERT (ip4_mtrie_leaf_is_terminal (new_leaf));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700263
264 for (i = 0; i < ARRAY_LEN (ply->leaves); i++)
265 {
266 old_leaf = ply->leaves[i];
267
268 /* Recurse into sub plies. */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000269 if (!ip4_mtrie_leaf_is_terminal (old_leaf))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270 {
Neale Ranns7244a702021-08-06 13:12:00 +0000271 ip4_mtrie_8_ply_t *sub_ply = get_next_ply_for_leaf (old_leaf);
272 set_ply_with_more_specific_leaf (sub_ply, new_leaf,
Dave Barachd7cb1b52016-12-09 09:52:16 -0500273 new_leaf_dst_address_bits);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274 }
275
276 /* Replace less specific terminal leaves with new leaf. */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500277 else if (new_leaf_dst_address_bits >=
278 ply->dst_address_bits_of_leaves[i])
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279 {
jaszha03ee743762019-09-27 12:52:18 -0500280 clib_atomic_store_rel_n (&ply->leaves[i], new_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281 ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000282 ply->n_non_empty_leafs += ip4_mtrie_leaf_is_non_empty (ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283 }
284 }
285}
286
287static void
Neale Ranns7244a702021-08-06 13:12:00 +0000288set_leaf (const ip4_mtrie_set_unset_leaf_args_t *a, u32 old_ply_index,
289 u32 dst_address_byte_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000291 ip4_mtrie_leaf_t old_leaf, new_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292 i32 n_dst_bits_next_plies;
293 u8 dst_byte;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000294 ip4_mtrie_8_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700295
296 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297
Neale Rannsf0609302017-04-11 09:13:39 -0700298 ASSERT (a->dst_address_length <= 32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
300
Neale Rannsa3af3372017-03-28 03:49:52 -0700301 /* how many bits of the destination address are in the next PLY */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500302 n_dst_bits_next_plies =
303 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700304
305 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
306
307 /* Number of bits next plies <= 0 => insert leaves this ply. */
308 if (n_dst_bits_next_plies <= 0)
309 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700310 /* The mask length of the address to insert maps to this ply */
Neale Ranns6ff05492017-06-06 06:52:14 -0700311 uword old_leaf_is_terminal;
312 u32 i, n_dst_bits_this_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313
Neale Rannsa3af3372017-03-28 03:49:52 -0700314 /* The number of bits, and hence slots/buckets, we will fill */
Neale Ranns04a75e32017-03-23 06:46:01 -0700315 n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500316 ASSERT ((a->dst_address.as_u8[dst_address_byte_index] &
317 pow2_mask (n_dst_bits_this_ply)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318
Neale Rannsa3af3372017-03-28 03:49:52 -0700319 /* Starting at the value of the byte at this section of the v4 address
320 * fill the buckets/slots of the ply */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
322 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000323 ip4_mtrie_8_ply_t *new_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324
325 old_leaf = old_ply->leaves[i];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000326 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700327
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i])
329 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700330 /* The new leaf is more or equally specific than the one currently
331 * occupying the slot */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000332 new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333
334 if (old_leaf_is_terminal)
335 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700336 /* The current leaf is terminal, we can replace it with
337 * the new one */
Neale Ranns04a75e32017-03-23 06:46:01 -0700338 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000339 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Neale Rannsa3af3372017-03-28 03:49:52 -0700340
Dave Barachd7cb1b52016-12-09 09:52:16 -0500341 old_ply->dst_address_bits_of_leaves[i] =
342 a->dst_address_length;
jaszha03ee743762019-09-27 12:52:18 -0500343 clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf);
Neale Ranns04a75e32017-03-23 06:46:01 -0700344
Dave Barachd7cb1b52016-12-09 09:52:16 -0500345 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000346 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500347 ASSERT (old_ply->n_non_empty_leafs <=
348 ARRAY_LEN (old_ply->leaves));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349 }
350 else
351 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700352 /* Existing leaf points to another ply. We need to place
353 * new_leaf into all more specific slots. */
Neale Ranns7244a702021-08-06 13:12:00 +0000354 new_ply = get_next_ply_for_leaf (old_leaf);
355 set_ply_with_more_specific_leaf (new_ply, new_leaf,
Dave Barachd7cb1b52016-12-09 09:52:16 -0500356 a->dst_address_length);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357 }
358 }
Dave Barachd7cb1b52016-12-09 09:52:16 -0500359 else if (!old_leaf_is_terminal)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700360 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700361 /* The current leaf is less specific and not termial (i.e. a ply),
362 * recurse on down the trie */
Neale Ranns7244a702021-08-06 13:12:00 +0000363 new_ply = get_next_ply_for_leaf (old_leaf);
364 set_leaf (a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700365 }
Neale Rannsa3af3372017-03-28 03:49:52 -0700366 /*
367 * else
368 * the route we are adding is less specific than the leaf currently
369 * occupying this slot. leave it there
370 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371 }
372 }
373 else
374 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700375 /* The address to insert requires us to move down at a lower level of
376 * the trie - recurse on down */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000377 ip4_mtrie_8_ply_t *new_ply;
Neale Ranns04a75e32017-03-23 06:46:01 -0700378 u8 ply_base_len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
Neale Ranns04a75e32017-03-23 06:46:01 -0700380 ply_base_len = 8 * (dst_address_byte_index + 1);
Neale Rannsa3af3372017-03-28 03:49:52 -0700381
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382 old_leaf = old_ply->leaves[dst_byte];
Neale Rannsa3af3372017-03-28 03:49:52 -0700383
Neale Ranns6bb2db02021-08-06 12:24:14 +0000384 if (ip4_mtrie_leaf_is_terminal (old_leaf))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700386 /* There is a leaf occupying the slot. Replace it with a new ply */
Neale Ranns04a75e32017-03-23 06:46:01 -0700387 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000388 ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte);
Neale Ranns04a75e32017-03-23 06:46:01 -0700389
Neale Ranns7244a702021-08-06 13:12:00 +0000390 new_leaf = ply_create (old_leaf,
391 old_ply->dst_address_bits_of_leaves[dst_byte],
392 ply_base_len);
393 new_ply = get_next_ply_for_leaf (new_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394
395 /* Refetch since ply_create may move pool. */
Neale Rannsa3af3372017-03-28 03:49:52 -0700396 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
jaszha03ee743762019-09-27 12:52:18 -0500398 clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
Neale Ranns04a75e32017-03-23 06:46:01 -0700399 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400
Neale Rannsa3af3372017-03-28 03:49:52 -0700401 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000402 ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte);
Neale Ranns04a75e32017-03-23 06:46:01 -0700403 ASSERT (old_ply->n_non_empty_leafs >= 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700404 }
405 else
Neale Ranns7244a702021-08-06 13:12:00 +0000406 new_ply = get_next_ply_for_leaf (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700407
Neale Ranns7244a702021-08-06 13:12:00 +0000408 set_leaf (a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
Neale Rannsa3af3372017-03-28 03:49:52 -0700409 }
410}
411
412static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000413set_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a)
Neale Rannsa3af3372017-03-28 03:49:52 -0700414{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000415 ip4_mtrie_leaf_t old_leaf, new_leaf;
416 ip4_mtrie_16_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700417 i32 n_dst_bits_next_plies;
418 u16 dst_byte;
419
420 old_ply = &m->root_ply;
421
Neale Rannsf0609302017-04-11 09:13:39 -0700422 ASSERT (a->dst_address_length <= 32);
Neale Rannsa3af3372017-03-28 03:49:52 -0700423
424 /* how many bits of the destination address are in the next PLY */
425 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
426
427 dst_byte = a->dst_address.as_u16[0];
428
429 /* Number of bits next plies <= 0 => insert leaves this ply. */
430 if (n_dst_bits_next_plies <= 0)
431 {
432 /* The mask length of the address to insert maps to this ply */
Neale Ranns6ff05492017-06-06 06:52:14 -0700433 uword old_leaf_is_terminal;
434 u32 i, n_dst_bits_this_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700435
436 /* The number of bits, and hence slots/buckets, we will fill */
437 n_dst_bits_this_ply = 16 - a->dst_address_length;
438 ASSERT ((clib_host_to_net_u16 (a->dst_address.as_u16[0]) &
439 pow2_mask (n_dst_bits_this_ply)) == 0);
440
441 /* Starting at the value of the byte at this section of the v4 address
442 * fill the buckets/slots of the ply */
443 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
444 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000445 ip4_mtrie_8_ply_t *new_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700446 u16 slot;
447
448 slot = clib_net_to_host_u16 (dst_byte);
449 slot += i;
450 slot = clib_host_to_net_u16 (slot);
451
452 old_leaf = old_ply->leaves[slot];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000453 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700454
455 if (a->dst_address_length >=
456 old_ply->dst_address_bits_of_leaves[slot])
457 {
458 /* The new leaf is more or equally specific than the one currently
459 * occupying the slot */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000460 new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Neale Rannsa3af3372017-03-28 03:49:52 -0700461
462 if (old_leaf_is_terminal)
463 {
464 /* The current leaf is terminal, we can replace it with
465 * the new one */
466 old_ply->dst_address_bits_of_leaves[slot] =
467 a->dst_address_length;
jaszha03ee743762019-09-27 12:52:18 -0500468 clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700469 }
470 else
471 {
472 /* Existing leaf points to another ply. We need to place
473 * new_leaf into all more specific slots. */
Neale Ranns7244a702021-08-06 13:12:00 +0000474 new_ply = get_next_ply_for_leaf (old_leaf);
475 set_ply_with_more_specific_leaf (new_ply, new_leaf,
Neale Rannsa3af3372017-03-28 03:49:52 -0700476 a->dst_address_length);
477 }
478 }
479 else if (!old_leaf_is_terminal)
480 {
481 /* The current leaf is less specific and not termial (i.e. a ply),
482 * recurse on down the trie */
Neale Ranns7244a702021-08-06 13:12:00 +0000483 new_ply = get_next_ply_for_leaf (old_leaf);
484 set_leaf (a, new_ply - ip4_ply_pool, 2);
Neale Rannsa3af3372017-03-28 03:49:52 -0700485 }
486 /*
487 * else
488 * the route we are adding is less specific than the leaf currently
489 * occupying this slot. leave it there
490 */
491 }
492 }
493 else
494 {
495 /* The address to insert requires us to move down at a lower level of
496 * the trie - recurse on down */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000497 ip4_mtrie_8_ply_t *new_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700498 u8 ply_base_len;
499
500 ply_base_len = 16;
501
502 old_leaf = old_ply->leaves[dst_byte];
503
Neale Ranns6bb2db02021-08-06 12:24:14 +0000504 if (ip4_mtrie_leaf_is_terminal (old_leaf))
Neale Rannsa3af3372017-03-28 03:49:52 -0700505 {
506 /* There is a leaf occupying the slot. Replace it with a new ply */
Neale Ranns7244a702021-08-06 13:12:00 +0000507 new_leaf = ply_create (old_leaf,
508 old_ply->dst_address_bits_of_leaves[dst_byte],
509 ply_base_len);
510 new_ply = get_next_ply_for_leaf (new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700511
jaszha03ee743762019-09-27 12:52:18 -0500512 clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700513 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
514 }
515 else
Neale Ranns7244a702021-08-06 13:12:00 +0000516 new_ply = get_next_ply_for_leaf (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700517
Neale Ranns7244a702021-08-06 13:12:00 +0000518 set_leaf (a, new_ply - ip4_ply_pool, 2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700519 }
520}
521
522static uword
Neale Ranns7244a702021-08-06 13:12:00 +0000523unset_leaf (const ip4_mtrie_set_unset_leaf_args_t *a,
Neale Ranns6bb2db02021-08-06 12:24:14 +0000524 ip4_mtrie_8_ply_t *old_ply, u32 dst_address_byte_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700525{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000526 ip4_mtrie_leaf_t old_leaf, del_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700527 i32 n_dst_bits_next_plies;
Dave Barach6f6f34f2016-08-08 13:05:31 -0400528 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700529 u8 dst_byte;
530
Neale Rannsf0609302017-04-11 09:13:39 -0700531 ASSERT (a->dst_address_length <= 32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700532 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
533
Dave Barachd7cb1b52016-12-09 09:52:16 -0500534 n_dst_bits_next_plies =
535 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536
537 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
538 if (n_dst_bits_next_plies < 0)
539 dst_byte &= ~pow2_mask (-n_dst_bits_next_plies);
540
Dave Barachd7cb1b52016-12-09 09:52:16 -0500541 n_dst_bits_this_ply =
542 n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700543 n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply);
544
Neale Ranns6bb2db02021-08-06 12:24:14 +0000545 del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700546
547 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
548 {
549 old_leaf = old_ply->leaves[i];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000550 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700551
Neale Ranns7244a702021-08-06 13:12:00 +0000552 if (old_leaf == del_leaf ||
553 (!old_leaf_is_terminal &&
554 unset_leaf (a, get_next_ply_for_leaf (old_leaf),
555 dst_address_byte_index + 1)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700556 {
Neale Ranns04a75e32017-03-23 06:46:01 -0700557 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000558 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700559
Neale Ranns6bb2db02021-08-06 12:24:14 +0000560 clib_atomic_store_rel_n (
561 &old_ply->leaves[i],
562 ip4_mtrie_leaf_set_adj_index (a->cover_adj_index));
mu.duojiao9744e6d2018-10-17 10:59:09 +0800563 old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length;
Neale Ranns04a75e32017-03-23 06:46:01 -0700564
565 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000566 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567
568 ASSERT (old_ply->n_non_empty_leafs >= 0);
569 if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0)
570 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700571 pool_put (ip4_ply_pool, old_ply);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700572 /* Old ply was deleted. */
573 return 1;
574 }
Neale Ranns04a75e32017-03-23 06:46:01 -0700575#if CLIB_DEBUG > 0
576 else if (dst_address_byte_index)
577 {
578 int ii, count = 0;
579 for (ii = 0; ii < ARRAY_LEN (old_ply->leaves); ii++)
580 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000581 count += ip4_mtrie_leaf_is_non_empty (old_ply, ii);
Neale Ranns04a75e32017-03-23 06:46:01 -0700582 }
583 ASSERT (count);
584 }
585#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700586 }
587 }
588
589 /* Old ply was not deleted. */
590 return 0;
591}
592
Neale Rannsa3af3372017-03-28 03:49:52 -0700593static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000594unset_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700595{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000596 ip4_mtrie_leaf_t old_leaf, del_leaf;
Neale Rannsa3af3372017-03-28 03:49:52 -0700597 i32 n_dst_bits_next_plies;
598 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
599 u16 dst_byte;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000600 ip4_mtrie_16_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700601
Neale Rannsf0609302017-04-11 09:13:39 -0700602 ASSERT (a->dst_address_length <= 32);
Neale Rannsa3af3372017-03-28 03:49:52 -0700603
604 old_ply = &m->root_ply;
605 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
606
607 dst_byte = a->dst_address.as_u16[0];
608
609 n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ?
610 (16 - a->dst_address_length) : 0);
611
Neale Ranns6bb2db02021-08-06 12:24:14 +0000612 del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Neale Rannsa3af3372017-03-28 03:49:52 -0700613
614 /* Starting at the value of the byte at this section of the v4 address
615 * fill the buckets/slots of the ply */
616 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
617 {
618 u16 slot;
619
620 slot = clib_net_to_host_u16 (dst_byte);
621 slot += i;
622 slot = clib_host_to_net_u16 (slot);
623
624 old_leaf = old_ply->leaves[slot];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000625 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700626
Neale Ranns7244a702021-08-06 13:12:00 +0000627 if (old_leaf == del_leaf ||
628 (!old_leaf_is_terminal &&
629 unset_leaf (a, get_next_ply_for_leaf (old_leaf), 2)))
Neale Rannsa3af3372017-03-28 03:49:52 -0700630 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000631 clib_atomic_store_rel_n (
632 &old_ply->leaves[slot],
633 ip4_mtrie_leaf_set_adj_index (a->cover_adj_index));
Neale Rannsa3af3372017-03-28 03:49:52 -0700634 old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length;
635 }
636 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700637}
638
639void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000640ip4_mtrie_16_route_add (ip4_mtrie_16_t *m, const ip4_address_t *dst_address,
641 u32 dst_address_length, u32 adj_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700642{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000643 ip4_mtrie_set_unset_leaf_args_t a;
Dave Barachd7cb1b52016-12-09 09:52:16 -0500644 ip4_main_t *im = &ip4_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700645
Ed Warnickecb9cada2015-12-08 15:45:58 -0700646 /* Honor dst_address_length. Fib masks are in network byte order */
Neale Rannsa3af3372017-03-28 03:49:52 -0700647 a.dst_address.as_u32 = (dst_address->as_u32 &
648 im->fib_masks[dst_address_length]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700649 a.dst_address_length = dst_address_length;
650 a.adj_index = adj_index;
651
Neale Rannsa3af3372017-03-28 03:49:52 -0700652 set_root_leaf (m, &a);
653}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700654
Neale Rannsa3af3372017-03-28 03:49:52 -0700655void
Neale Ranns7244a702021-08-06 13:12:00 +0000656ip4_mtrie_8_route_add (ip4_mtrie_8_t *m, const ip4_address_t *dst_address,
657 u32 dst_address_length, u32 adj_index)
658{
659 ip4_mtrie_set_unset_leaf_args_t a;
660 ip4_main_t *im = &ip4_main;
661
662 /* Honor dst_address_length. Fib masks are in network byte order */
663 a.dst_address.as_u32 =
664 (dst_address->as_u32 & im->fib_masks[dst_address_length]);
665 a.dst_address_length = dst_address_length;
666 a.adj_index = adj_index;
667
668 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
669
670 set_leaf (&a, root - ip4_ply_pool, 0);
671}
672
673void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000674ip4_mtrie_16_route_del (ip4_mtrie_16_t *m, const ip4_address_t *dst_address,
675 u32 dst_address_length, u32 adj_index,
676 u32 cover_address_length, u32 cover_adj_index)
Neale Rannsa3af3372017-03-28 03:49:52 -0700677{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000678 ip4_mtrie_set_unset_leaf_args_t a;
Neale Rannsa3af3372017-03-28 03:49:52 -0700679 ip4_main_t *im = &ip4_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680
Neale Rannsa3af3372017-03-28 03:49:52 -0700681 /* Honor dst_address_length. Fib masks are in network byte order */
682 a.dst_address.as_u32 = (dst_address->as_u32 &
683 im->fib_masks[dst_address_length]);
684 a.dst_address_length = dst_address_length;
685 a.adj_index = adj_index;
686 a.cover_adj_index = cover_adj_index;
687 a.cover_address_length = cover_address_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700688
Neale Rannsa3af3372017-03-28 03:49:52 -0700689 /* the top level ply is never removed */
690 unset_root_leaf (m, &a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691}
692
Neale Ranns7244a702021-08-06 13:12:00 +0000693void
694ip4_mtrie_8_route_del (ip4_mtrie_8_t *m, const ip4_address_t *dst_address,
695 u32 dst_address_length, u32 adj_index,
696 u32 cover_address_length, u32 cover_adj_index)
697{
698 ip4_main_t *im = &ip4_main;
699
700 /* Honor dst_address_length. Fib masks are in network byte order */
701 ip4_mtrie_set_unset_leaf_args_t a = {
702 .dst_address.as_u32 =
703 (dst_address->as_u32 & im->fib_masks[dst_address_length]),
704 .dst_address_length = dst_address_length,
705 .adj_index = adj_index,
706 .cover_adj_index = cover_adj_index,
707 .cover_address_length = cover_address_length,
708 };
709
710 /* the top level ply is never removed */
711 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
712
713 unset_leaf (&a, root, 0);
714}
715
Ed Warnickecb9cada2015-12-08 15:45:58 -0700716/* Returns number of bytes of memory used by mtrie. */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500717static uword
Neale Ranns7244a702021-08-06 13:12:00 +0000718mtrie_ply_memory_usage (ip4_mtrie_8_ply_t *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700719{
720 uword bytes, i;
721
Ed Warnickecb9cada2015-12-08 15:45:58 -0700722 bytes = sizeof (p[0]);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500723 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700724 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000725 ip4_mtrie_leaf_t l = p->leaves[i];
726 if (ip4_mtrie_leaf_is_next_ply (l))
Neale Ranns7244a702021-08-06 13:12:00 +0000727 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
Neale Rannsa3af3372017-03-28 03:49:52 -0700728 }
729
730 return bytes;
731}
732
733/* Returns number of bytes of memory used by mtrie. */
Neale Rannsc87aafa2017-11-29 00:59:31 -0800734uword
Neale Ranns6bb2db02021-08-06 12:24:14 +0000735ip4_mtrie_16_memory_usage (ip4_mtrie_16_t *m)
Neale Rannsa3af3372017-03-28 03:49:52 -0700736{
737 uword bytes, i;
738
739 bytes = sizeof (*m);
740 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
741 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000742 ip4_mtrie_leaf_t l = m->root_ply.leaves[i];
743 if (ip4_mtrie_leaf_is_next_ply (l))
Neale Ranns7244a702021-08-06 13:12:00 +0000744 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
745 }
746
747 return bytes;
748}
749uword
750ip4_mtrie_8_memory_usage (ip4_mtrie_8_t *m)
751{
752 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
753 uword bytes, i;
754
755 bytes = sizeof (*m);
756 for (i = 0; i < ARRAY_LEN (root->leaves); i++)
757 {
758 ip4_mtrie_leaf_t l = root->leaves[i];
759 if (ip4_mtrie_leaf_is_next_ply (l))
760 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700761 }
762
763 return bytes;
764}
765
Dave Barachd7cb1b52016-12-09 09:52:16 -0500766static u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000767format_ip4_mtrie_leaf (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700768{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000769 ip4_mtrie_leaf_t l = va_arg (*va, ip4_mtrie_leaf_t);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700770
Neale Ranns6bb2db02021-08-06 12:24:14 +0000771 if (ip4_mtrie_leaf_is_terminal (l))
772 s = format (s, "lb-index %d", ip4_mtrie_leaf_get_adj_index (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700773 else
Neale Ranns6bb2db02021-08-06 12:24:14 +0000774 s = format (s, "next ply %d", ip4_mtrie_leaf_get_next_ply_index (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700775 return s;
776}
777
Neale Ranns6bb2db02021-08-06 12:24:14 +0000778#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \
779 ({ \
780 u32 a, ia_length; \
781 ip4_address_t ia; \
Neale Ranns7244a702021-08-06 13:12:00 +0000782 ip4_mtrie_leaf_t _l = (_p)->leaves[(_i)]; \
Neale Ranns6bb2db02021-08-06 12:24:14 +0000783 \
784 a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \
785 ia.as_u32 = clib_host_to_net_u32 (a); \
786 ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \
787 s = format (s, "\n%U%U %U", format_white_space, (_indent) + 4, \
788 format_ip4_address_and_length, &ia, ia_length, \
789 format_ip4_mtrie_leaf, _l); \
790 \
791 if (ip4_mtrie_leaf_is_next_ply (_l)) \
792 s = format (s, "\n%U", format_ip4_mtrie_ply, m, a, (_indent) + 8, \
793 ip4_mtrie_leaf_get_next_ply_index (_l)); \
794 s; \
795 })
Neale Rannsa3af3372017-03-28 03:49:52 -0700796
Dave Barachd7cb1b52016-12-09 09:52:16 -0500797static u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000798format_ip4_mtrie_ply (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700799{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000800 ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700801 u32 base_address = va_arg (*va, u32);
mu.duojiao59a82952018-10-11 14:27:30 +0800802 u32 indent = va_arg (*va, u32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700803 u32 ply_index = va_arg (*va, u32);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000804 ip4_mtrie_8_ply_t *p;
Neale Rannsa3af3372017-03-28 03:49:52 -0700805 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700806
Neale Rannsa3af3372017-03-28 03:49:52 -0700807 p = pool_elt_at_index (ip4_ply_pool, ply_index);
mu.duojiao59a82952018-10-11 14:27:30 +0800808 s = format (s, "%Uply index %d, %d non-empty leaves",
809 format_white_space, indent, ply_index, p->n_non_empty_leafs);
Neale Rannsa3af3372017-03-28 03:49:52 -0700810
Ed Warnickecb9cada2015-12-08 15:45:58 -0700811 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
812 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000813 if (ip4_mtrie_leaf_is_non_empty (p, i))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700814 {
mu.duojiao59a82952018-10-11 14:27:30 +0800815 s = FORMAT_PLY (s, p, i, i, base_address,
Neale Ranns756cd942018-04-06 09:18:11 -0700816 p->dst_address_bits_base + 8, indent);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700817 }
818 }
819
820 return s;
821}
822
Dave Barachd7cb1b52016-12-09 09:52:16 -0500823u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000824format_ip4_mtrie_16 (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700825{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000826 ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *);
Neale Ranns39194252017-11-27 01:03:25 -0800827 int verbose = va_arg (*va, int);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000828 ip4_mtrie_16_ply_t *p;
Neale Rannsa3af3372017-03-28 03:49:52 -0700829 u32 base_address = 0;
830 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700831
Neale Ranns7244a702021-08-06 13:12:00 +0000832 s =
833 format (s, "16-8-8: %d plies, memory usage %U\n", pool_elts (ip4_ply_pool),
834 format_memory_size, ip4_mtrie_16_memory_usage (m));
Neale Rannsc87aafa2017-11-29 00:59:31 -0800835 p = &m->root_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700836
Neale Ranns39194252017-11-27 01:03:25 -0800837 if (verbose)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700838 {
Neale Ranns39194252017-11-27 01:03:25 -0800839 s = format (s, "root-ply");
840 p = &m->root_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700841
Neale Ranns39194252017-11-27 01:03:25 -0800842 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
Neale Rannsa3af3372017-03-28 03:49:52 -0700843 {
Neale Ranns39194252017-11-27 01:03:25 -0800844 u16 slot;
845
846 slot = clib_host_to_net_u16 (i);
847
848 if (p->dst_address_bits_of_leaves[slot] > 0)
849 {
mu.duojiao59a82952018-10-11 14:27:30 +0800850 s = FORMAT_PLY (s, p, i, slot, base_address, 16, 0);
Neale Ranns39194252017-11-27 01:03:25 -0800851 }
Neale Rannsa3af3372017-03-28 03:49:52 -0700852 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700853 }
854
855 return s;
856}
Dave Barachd7cb1b52016-12-09 09:52:16 -0500857
Neale Ranns7244a702021-08-06 13:12:00 +0000858u8 *
859format_ip4_mtrie_8 (u8 *s, va_list *va)
860{
861 ip4_mtrie_8_t *m = va_arg (*va, ip4_mtrie_8_t *);
862 int verbose = va_arg (*va, int);
863 ip4_mtrie_8_ply_t *root;
864 u32 base_address = 0;
865 u16 slot;
866
867 root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
868
869 s = format (s, "8-8-8-8; %d plies, memory usage %U\n",
870 pool_elts (ip4_ply_pool), format_memory_size,
871 ip4_mtrie_8_memory_usage (m));
872
873 if (verbose)
874 {
875 s = format (s, "root-ply");
876
877 for (slot = 0; slot < ARRAY_LEN (root->leaves); slot++)
878 {
879 if (root->dst_address_bits_of_leaves[slot] > 0)
880 {
881 s = FORMAT_PLY (s, root, slot, slot, base_address, 8, 0);
882 }
883 }
884 }
885
886 return s;
887}
888
Neale Ranns1ec36522017-11-29 05:20:37 -0800889/** Default heap size for the IPv4 mtries */
890#define IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE (32<<20)
Dave Barach01a2a102020-06-11 08:57:52 -0400891#ifndef MAP_HUGE_SHIFT
892#define MAP_HUGE_SHIFT 26
893#endif
Neale Ranns1ec36522017-11-29 05:20:37 -0800894
Neale Rannsa3af3372017-03-28 03:49:52 -0700895static clib_error_t *
896ip4_mtrie_module_init (vlib_main_t * vm)
897{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000898 CLIB_UNUSED (ip4_mtrie_8_ply_t * p);
Neale Ranns1ec36522017-11-29 05:20:37 -0800899 clib_error_t *error = NULL;
Neale Ranns1ec36522017-11-29 05:20:37 -0800900
901 /* Burn one ply so index 0 is taken */
Neale Rannsa3af3372017-03-28 03:49:52 -0700902 pool_get (ip4_ply_pool, p);
903
Neale Ranns1ec36522017-11-29 05:20:37 -0800904 return (error);
Neale Rannsa3af3372017-03-28 03:49:52 -0700905}
906
907VLIB_INIT_FUNCTION (ip4_mtrie_module_init);
908
Dave Barachd7cb1b52016-12-09 09:52:16 -0500909/*
910 * fd.io coding-style-patch-verification: ON
911 *
912 * Local Variables:
913 * eval: (c-set-style "gnu")
914 * End:
915 */