blob: 00855f7db43b9351bec129d449edf8c9ea63f930 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ip/ip4_fib.h: ip4 mtrie fib
17 *
18 * Copyright (c) 2012 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vnet/ip/ip.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070041#include <vnet/ip/ip4_mtrie.h>
42#include <vnet/fib/ip4_fib.h>
43
44
45/**
46 * Global pool of IPv4 8bit PLYs
47 */
Neale Ranns6bb2db02021-08-06 12:24:14 +000048ip4_mtrie_8_ply_t *ip4_ply_pool;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Neale Ranns04a75e32017-03-23 06:46:01 -070050always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000051ip4_mtrie_leaf_is_non_empty (ip4_mtrie_8_ply_t *p, u8 dst_byte)
Ed Warnickecb9cada2015-12-08 15:45:58 -070052{
Neale Ranns04a75e32017-03-23 06:46:01 -070053 /*
54 * It's 'non-empty' if the length of the leaf stored is greater than the
55 * length of a leaf in the covering ply. i.e. the leaf is more specific
56 * than it's would be cover in the covering ply
57 */
58 if (p->dst_address_bits_of_leaves[dst_byte] > p->dst_address_bits_base)
59 return (1);
60 return (0);
61}
62
Neale Ranns6bb2db02021-08-06 12:24:14 +000063always_inline ip4_mtrie_leaf_t
64ip4_mtrie_leaf_set_adj_index (u32 adj_index)
Neale Ranns04a75e32017-03-23 06:46:01 -070065{
Neale Ranns6bb2db02021-08-06 12:24:14 +000066 ip4_mtrie_leaf_t l;
Neale Ranns04a75e32017-03-23 06:46:01 -070067 l = 1 + 2 * adj_index;
Neale Ranns6bb2db02021-08-06 12:24:14 +000068 ASSERT (ip4_mtrie_leaf_get_adj_index (l) == adj_index);
Neale Ranns04a75e32017-03-23 06:46:01 -070069 return l;
70}
71
72always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000073ip4_mtrie_leaf_is_next_ply (ip4_mtrie_leaf_t n)
Neale Ranns04a75e32017-03-23 06:46:01 -070074{
75 return (n & 1) == 0;
76}
77
78always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000079ip4_mtrie_leaf_get_next_ply_index (ip4_mtrie_leaf_t n)
Neale Ranns04a75e32017-03-23 06:46:01 -070080{
Neale Ranns6bb2db02021-08-06 12:24:14 +000081 ASSERT (ip4_mtrie_leaf_is_next_ply (n));
Neale Ranns04a75e32017-03-23 06:46:01 -070082 return n >> 1;
83}
84
Neale Ranns6bb2db02021-08-06 12:24:14 +000085always_inline ip4_mtrie_leaf_t
86ip4_mtrie_leaf_set_next_ply_index (u32 i)
Neale Ranns04a75e32017-03-23 06:46:01 -070087{
Neale Ranns6bb2db02021-08-06 12:24:14 +000088 ip4_mtrie_leaf_t l;
Neale Ranns04a75e32017-03-23 06:46:01 -070089 l = 0 + 2 * i;
Neale Ranns6bb2db02021-08-06 12:24:14 +000090 ASSERT (ip4_mtrie_leaf_get_next_ply_index (l) == i);
Neale Ranns04a75e32017-03-23 06:46:01 -070091 return l;
92}
93
94static void
Neale Ranns6bb2db02021-08-06 12:24:14 +000095ply_8_init (ip4_mtrie_8_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len,
96 u32 ply_base_len)
Neale Ranns04a75e32017-03-23 06:46:01 -070097{
Damjan Marion9ff617c2021-12-23 13:19:15 +010098 p->n_non_empty_leafs = prefix_len > ply_base_len ? ARRAY_LEN (p->leaves) : 0;
99 clib_memset_u8 (p->dst_address_bits_of_leaves, prefix_len,
100 sizeof (p->dst_address_bits_of_leaves));
101 p->dst_address_bits_base = ply_base_len;
102
103 clib_memset_u32 (p->leaves, init, ARRAY_LEN (p->leaves));
Neale Rannsa3af3372017-03-28 03:49:52 -0700104}
105
106static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000107ply_16_init (ip4_mtrie_16_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len)
Neale Rannsa3af3372017-03-28 03:49:52 -0700108{
Damjan Marion9ff617c2021-12-23 13:19:15 +0100109 clib_memset_u8 (p->dst_address_bits_of_leaves, prefix_len,
110 sizeof (p->dst_address_bits_of_leaves));
111 clib_memset_u32 (p->leaves, init, ARRAY_LEN (p->leaves));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112}
113
Neale Ranns6bb2db02021-08-06 12:24:14 +0000114static ip4_mtrie_leaf_t
Neale Ranns7244a702021-08-06 13:12:00 +0000115ply_create (ip4_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, u32 ply_base_len)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000117 ip4_mtrie_8_ply_t *p;
Vladislav Grishenkoa5bfe6c2022-01-12 15:22:23 +0500118 ip4_mtrie_leaf_t l;
119 u8 need_barrier_sync = pool_get_will_expand (ip4_ply_pool);
120 vlib_main_t *vm = vlib_get_main ();
121 ASSERT (vm->thread_index == 0);
Neale Ranns1ec36522017-11-29 05:20:37 -0800122
Vladislav Grishenkoa5bfe6c2022-01-12 15:22:23 +0500123 if (need_barrier_sync)
124 vlib_worker_thread_barrier_sync (vm);
125
126 /* Get cache aligned ply. */
Neale Rannsa3af3372017-03-28 03:49:52 -0700127 pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128
Neale Rannsa3af3372017-03-28 03:49:52 -0700129 ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
Vladislav Grishenkoa5bfe6c2022-01-12 15:22:23 +0500130 l = ip4_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool);
131
132 if (need_barrier_sync)
133 vlib_worker_thread_barrier_release (vm);
134
135 return l;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136}
137
Neale Ranns6bb2db02021-08-06 12:24:14 +0000138always_inline ip4_mtrie_8_ply_t *
Neale Ranns7244a702021-08-06 13:12:00 +0000139get_next_ply_for_leaf (ip4_mtrie_leaf_t l)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000141 uword n = ip4_mtrie_leaf_get_next_ply_index (l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142
Neale Rannsa3af3372017-03-28 03:49:52 -0700143 return pool_elt_at_index (ip4_ply_pool, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144}
145
Dave Barachd7cb1b52016-12-09 09:52:16 -0500146void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000147ip4_mtrie_16_free (ip4_mtrie_16_t *m)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148{
Lijian.Zhang33af8c12019-09-16 16:22:36 +0800149 /* the root ply is embedded so there is nothing to do,
Neale Rannsa3af3372017-03-28 03:49:52 -0700150 * the assumption being that the IP4 FIB table has emptied the trie
151 * before deletion.
152 */
153#if CLIB_DEBUG > 0
154 int i;
155 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
156 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000157 ASSERT (!ip4_mtrie_leaf_is_next_ply (m->root_ply.leaves[i]));
Neale Rannsa3af3372017-03-28 03:49:52 -0700158 }
159#endif
160}
161
162void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000163ip4_mtrie_16_init (ip4_mtrie_16_t *m)
Neale Rannsa3af3372017-03-28 03:49:52 -0700164{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000165 ply_16_init (&m->root_ply, IP4_MTRIE_LEAF_EMPTY, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166}
167
Neale Ranns7244a702021-08-06 13:12:00 +0000168void
169ip4_mtrie_8_free (ip4_mtrie_8_t *m)
170{
171 /* the root ply is embedded so there is nothing to do,
172 * the assumption being that the IP4 FIB table has emptied the trie
173 * before deletion.
174 */
175 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
176
177#if CLIB_DEBUG > 0
178 int i;
179 for (i = 0; i < ARRAY_LEN (root->leaves); i++)
180 {
181 ASSERT (!ip4_mtrie_leaf_is_next_ply (root->leaves[i]));
182 }
183#endif
184
185 pool_put (ip4_ply_pool, root);
186}
187
188void
189ip4_mtrie_8_init (ip4_mtrie_8_t *m)
190{
191 ip4_mtrie_8_ply_t *root;
192
193 pool_get (ip4_ply_pool, root);
194 m->root_ply = root - ip4_ply_pool;
195
196 ply_8_init (root, IP4_MTRIE_LEAF_EMPTY, 0, 0);
197}
198
Dave Barachd7cb1b52016-12-09 09:52:16 -0500199typedef struct
200{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201 ip4_address_t dst_address;
202 u32 dst_address_length;
203 u32 adj_index;
Neale Ranns04a75e32017-03-23 06:46:01 -0700204 u32 cover_address_length;
205 u32 cover_adj_index;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000206} ip4_mtrie_set_unset_leaf_args_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207
208static void
Neale Ranns7244a702021-08-06 13:12:00 +0000209set_ply_with_more_specific_leaf (ip4_mtrie_8_ply_t *ply,
Neale Ranns6bb2db02021-08-06 12:24:14 +0000210 ip4_mtrie_leaf_t new_leaf,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700211 uword new_leaf_dst_address_bits)
212{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000213 ip4_mtrie_leaf_t old_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214 uword i;
215
Neale Ranns6bb2db02021-08-06 12:24:14 +0000216 ASSERT (ip4_mtrie_leaf_is_terminal (new_leaf));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700217
218 for (i = 0; i < ARRAY_LEN (ply->leaves); i++)
219 {
220 old_leaf = ply->leaves[i];
221
222 /* Recurse into sub plies. */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000223 if (!ip4_mtrie_leaf_is_terminal (old_leaf))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224 {
Neale Ranns7244a702021-08-06 13:12:00 +0000225 ip4_mtrie_8_ply_t *sub_ply = get_next_ply_for_leaf (old_leaf);
226 set_ply_with_more_specific_leaf (sub_ply, new_leaf,
Dave Barachd7cb1b52016-12-09 09:52:16 -0500227 new_leaf_dst_address_bits);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228 }
229
230 /* Replace less specific terminal leaves with new leaf. */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500231 else if (new_leaf_dst_address_bits >=
232 ply->dst_address_bits_of_leaves[i])
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233 {
jaszha03ee743762019-09-27 12:52:18 -0500234 clib_atomic_store_rel_n (&ply->leaves[i], new_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700235 ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000236 ply->n_non_empty_leafs += ip4_mtrie_leaf_is_non_empty (ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237 }
238 }
239}
240
241static void
Neale Ranns7244a702021-08-06 13:12:00 +0000242set_leaf (const ip4_mtrie_set_unset_leaf_args_t *a, u32 old_ply_index,
243 u32 dst_address_byte_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000245 ip4_mtrie_leaf_t old_leaf, new_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246 i32 n_dst_bits_next_plies;
247 u8 dst_byte;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000248 ip4_mtrie_8_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700249
250 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251
Neale Rannsf0609302017-04-11 09:13:39 -0700252 ASSERT (a->dst_address_length <= 32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
254
Neale Rannsa3af3372017-03-28 03:49:52 -0700255 /* how many bits of the destination address are in the next PLY */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500256 n_dst_bits_next_plies =
257 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
259 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
260
261 /* Number of bits next plies <= 0 => insert leaves this ply. */
262 if (n_dst_bits_next_plies <= 0)
263 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700264 /* The mask length of the address to insert maps to this ply */
Neale Ranns6ff05492017-06-06 06:52:14 -0700265 uword old_leaf_is_terminal;
266 u32 i, n_dst_bits_this_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267
Neale Rannsa3af3372017-03-28 03:49:52 -0700268 /* The number of bits, and hence slots/buckets, we will fill */
Neale Ranns04a75e32017-03-23 06:46:01 -0700269 n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500270 ASSERT ((a->dst_address.as_u8[dst_address_byte_index] &
271 pow2_mask (n_dst_bits_this_ply)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272
Neale Rannsa3af3372017-03-28 03:49:52 -0700273 /* Starting at the value of the byte at this section of the v4 address
274 * fill the buckets/slots of the ply */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
276 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000277 ip4_mtrie_8_ply_t *new_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700278
279 old_leaf = old_ply->leaves[i];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000280 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282 if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i])
283 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700284 /* The new leaf is more or equally specific than the one currently
285 * occupying the slot */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000286 new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
288 if (old_leaf_is_terminal)
289 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700290 /* The current leaf is terminal, we can replace it with
291 * the new one */
Neale Ranns04a75e32017-03-23 06:46:01 -0700292 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000293 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Neale Rannsa3af3372017-03-28 03:49:52 -0700294
Dave Barachd7cb1b52016-12-09 09:52:16 -0500295 old_ply->dst_address_bits_of_leaves[i] =
296 a->dst_address_length;
jaszha03ee743762019-09-27 12:52:18 -0500297 clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf);
Neale Ranns04a75e32017-03-23 06:46:01 -0700298
Dave Barachd7cb1b52016-12-09 09:52:16 -0500299 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000300 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500301 ASSERT (old_ply->n_non_empty_leafs <=
302 ARRAY_LEN (old_ply->leaves));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303 }
304 else
305 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700306 /* Existing leaf points to another ply. We need to place
307 * new_leaf into all more specific slots. */
Neale Ranns7244a702021-08-06 13:12:00 +0000308 new_ply = get_next_ply_for_leaf (old_leaf);
309 set_ply_with_more_specific_leaf (new_ply, new_leaf,
Dave Barachd7cb1b52016-12-09 09:52:16 -0500310 a->dst_address_length);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700311 }
312 }
Dave Barachd7cb1b52016-12-09 09:52:16 -0500313 else if (!old_leaf_is_terminal)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700315 /* The current leaf is less specific and not termial (i.e. a ply),
316 * recurse on down the trie */
Neale Ranns7244a702021-08-06 13:12:00 +0000317 new_ply = get_next_ply_for_leaf (old_leaf);
318 set_leaf (a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319 }
Neale Rannsa3af3372017-03-28 03:49:52 -0700320 /*
321 * else
322 * the route we are adding is less specific than the leaf currently
323 * occupying this slot. leave it there
324 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325 }
326 }
327 else
328 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700329 /* The address to insert requires us to move down at a lower level of
330 * the trie - recurse on down */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000331 ip4_mtrie_8_ply_t *new_ply;
Neale Ranns04a75e32017-03-23 06:46:01 -0700332 u8 ply_base_len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333
Neale Ranns04a75e32017-03-23 06:46:01 -0700334 ply_base_len = 8 * (dst_address_byte_index + 1);
Neale Rannsa3af3372017-03-28 03:49:52 -0700335
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336 old_leaf = old_ply->leaves[dst_byte];
Neale Rannsa3af3372017-03-28 03:49:52 -0700337
Neale Ranns6bb2db02021-08-06 12:24:14 +0000338 if (ip4_mtrie_leaf_is_terminal (old_leaf))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700340 /* There is a leaf occupying the slot. Replace it with a new ply */
Neale Ranns04a75e32017-03-23 06:46:01 -0700341 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000342 ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte);
Neale Ranns04a75e32017-03-23 06:46:01 -0700343
Neale Ranns7244a702021-08-06 13:12:00 +0000344 new_leaf = ply_create (old_leaf,
345 old_ply->dst_address_bits_of_leaves[dst_byte],
346 ply_base_len);
347 new_ply = get_next_ply_for_leaf (new_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700348
349 /* Refetch since ply_create may move pool. */
Neale Rannsa3af3372017-03-28 03:49:52 -0700350 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700351
jaszha03ee743762019-09-27 12:52:18 -0500352 clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
Neale Ranns04a75e32017-03-23 06:46:01 -0700353 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354
Neale Rannsa3af3372017-03-28 03:49:52 -0700355 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000356 ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte);
Neale Ranns04a75e32017-03-23 06:46:01 -0700357 ASSERT (old_ply->n_non_empty_leafs >= 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358 }
359 else
Neale Ranns7244a702021-08-06 13:12:00 +0000360 new_ply = get_next_ply_for_leaf (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361
Neale Ranns7244a702021-08-06 13:12:00 +0000362 set_leaf (a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
Neale Rannsa3af3372017-03-28 03:49:52 -0700363 }
364}
365
366static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000367set_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a)
Neale Rannsa3af3372017-03-28 03:49:52 -0700368{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000369 ip4_mtrie_leaf_t old_leaf, new_leaf;
370 ip4_mtrie_16_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700371 i32 n_dst_bits_next_plies;
372 u16 dst_byte;
373
374 old_ply = &m->root_ply;
375
Neale Rannsf0609302017-04-11 09:13:39 -0700376 ASSERT (a->dst_address_length <= 32);
Neale Rannsa3af3372017-03-28 03:49:52 -0700377
378 /* how many bits of the destination address are in the next PLY */
379 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
380
381 dst_byte = a->dst_address.as_u16[0];
382
383 /* Number of bits next plies <= 0 => insert leaves this ply. */
384 if (n_dst_bits_next_plies <= 0)
385 {
386 /* The mask length of the address to insert maps to this ply */
Neale Ranns6ff05492017-06-06 06:52:14 -0700387 uword old_leaf_is_terminal;
388 u32 i, n_dst_bits_this_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700389
390 /* The number of bits, and hence slots/buckets, we will fill */
391 n_dst_bits_this_ply = 16 - a->dst_address_length;
392 ASSERT ((clib_host_to_net_u16 (a->dst_address.as_u16[0]) &
393 pow2_mask (n_dst_bits_this_ply)) == 0);
394
395 /* Starting at the value of the byte at this section of the v4 address
396 * fill the buckets/slots of the ply */
397 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
398 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000399 ip4_mtrie_8_ply_t *new_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700400 u16 slot;
401
402 slot = clib_net_to_host_u16 (dst_byte);
403 slot += i;
404 slot = clib_host_to_net_u16 (slot);
405
406 old_leaf = old_ply->leaves[slot];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000407 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700408
409 if (a->dst_address_length >=
410 old_ply->dst_address_bits_of_leaves[slot])
411 {
412 /* The new leaf is more or equally specific than the one currently
413 * occupying the slot */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000414 new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Neale Rannsa3af3372017-03-28 03:49:52 -0700415
416 if (old_leaf_is_terminal)
417 {
418 /* The current leaf is terminal, we can replace it with
419 * the new one */
420 old_ply->dst_address_bits_of_leaves[slot] =
421 a->dst_address_length;
jaszha03ee743762019-09-27 12:52:18 -0500422 clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700423 }
424 else
425 {
426 /* Existing leaf points to another ply. We need to place
427 * new_leaf into all more specific slots. */
Neale Ranns7244a702021-08-06 13:12:00 +0000428 new_ply = get_next_ply_for_leaf (old_leaf);
429 set_ply_with_more_specific_leaf (new_ply, new_leaf,
Neale Rannsa3af3372017-03-28 03:49:52 -0700430 a->dst_address_length);
431 }
432 }
433 else if (!old_leaf_is_terminal)
434 {
435 /* The current leaf is less specific and not termial (i.e. a ply),
436 * recurse on down the trie */
Neale Ranns7244a702021-08-06 13:12:00 +0000437 new_ply = get_next_ply_for_leaf (old_leaf);
438 set_leaf (a, new_ply - ip4_ply_pool, 2);
Neale Rannsa3af3372017-03-28 03:49:52 -0700439 }
440 /*
441 * else
442 * the route we are adding is less specific than the leaf currently
443 * occupying this slot. leave it there
444 */
445 }
446 }
447 else
448 {
449 /* The address to insert requires us to move down at a lower level of
450 * the trie - recurse on down */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000451 ip4_mtrie_8_ply_t *new_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700452 u8 ply_base_len;
453
454 ply_base_len = 16;
455
456 old_leaf = old_ply->leaves[dst_byte];
457
Neale Ranns6bb2db02021-08-06 12:24:14 +0000458 if (ip4_mtrie_leaf_is_terminal (old_leaf))
Neale Rannsa3af3372017-03-28 03:49:52 -0700459 {
460 /* There is a leaf occupying the slot. Replace it with a new ply */
Neale Ranns7244a702021-08-06 13:12:00 +0000461 new_leaf = ply_create (old_leaf,
462 old_ply->dst_address_bits_of_leaves[dst_byte],
463 ply_base_len);
464 new_ply = get_next_ply_for_leaf (new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700465
jaszha03ee743762019-09-27 12:52:18 -0500466 clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700467 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
468 }
469 else
Neale Ranns7244a702021-08-06 13:12:00 +0000470 new_ply = get_next_ply_for_leaf (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700471
Neale Ranns7244a702021-08-06 13:12:00 +0000472 set_leaf (a, new_ply - ip4_ply_pool, 2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700473 }
474}
475
476static uword
Neale Ranns7244a702021-08-06 13:12:00 +0000477unset_leaf (const ip4_mtrie_set_unset_leaf_args_t *a,
Neale Ranns6bb2db02021-08-06 12:24:14 +0000478 ip4_mtrie_8_ply_t *old_ply, u32 dst_address_byte_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700479{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000480 ip4_mtrie_leaf_t old_leaf, del_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700481 i32 n_dst_bits_next_plies;
Dave Barach6f6f34f2016-08-08 13:05:31 -0400482 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700483 u8 dst_byte;
484
Neale Rannsf0609302017-04-11 09:13:39 -0700485 ASSERT (a->dst_address_length <= 32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
487
Dave Barachd7cb1b52016-12-09 09:52:16 -0500488 n_dst_bits_next_plies =
489 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700490
491 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
492 if (n_dst_bits_next_plies < 0)
493 dst_byte &= ~pow2_mask (-n_dst_bits_next_plies);
494
Dave Barachd7cb1b52016-12-09 09:52:16 -0500495 n_dst_bits_this_ply =
496 n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700497 n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply);
498
Neale Ranns6bb2db02021-08-06 12:24:14 +0000499 del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700500
501 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
502 {
503 old_leaf = old_ply->leaves[i];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000504 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505
Neale Ranns7244a702021-08-06 13:12:00 +0000506 if (old_leaf == del_leaf ||
507 (!old_leaf_is_terminal &&
508 unset_leaf (a, get_next_ply_for_leaf (old_leaf),
509 dst_address_byte_index + 1)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700510 {
Neale Ranns04a75e32017-03-23 06:46:01 -0700511 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000512 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700513
Neale Ranns6bb2db02021-08-06 12:24:14 +0000514 clib_atomic_store_rel_n (
515 &old_ply->leaves[i],
516 ip4_mtrie_leaf_set_adj_index (a->cover_adj_index));
mu.duojiao9744e6d2018-10-17 10:59:09 +0800517 old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length;
Neale Ranns04a75e32017-03-23 06:46:01 -0700518
519 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000520 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700521
522 ASSERT (old_ply->n_non_empty_leafs >= 0);
523 if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0)
524 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700525 pool_put (ip4_ply_pool, old_ply);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700526 /* Old ply was deleted. */
527 return 1;
528 }
Neale Ranns04a75e32017-03-23 06:46:01 -0700529#if CLIB_DEBUG > 0
530 else if (dst_address_byte_index)
531 {
532 int ii, count = 0;
533 for (ii = 0; ii < ARRAY_LEN (old_ply->leaves); ii++)
534 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000535 count += ip4_mtrie_leaf_is_non_empty (old_ply, ii);
Neale Ranns04a75e32017-03-23 06:46:01 -0700536 }
537 ASSERT (count);
538 }
539#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700540 }
541 }
542
543 /* Old ply was not deleted. */
544 return 0;
545}
546
Neale Rannsa3af3372017-03-28 03:49:52 -0700547static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000548unset_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700549{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000550 ip4_mtrie_leaf_t old_leaf, del_leaf;
Neale Rannsa3af3372017-03-28 03:49:52 -0700551 i32 n_dst_bits_next_plies;
552 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
553 u16 dst_byte;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000554 ip4_mtrie_16_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700555
Neale Rannsf0609302017-04-11 09:13:39 -0700556 ASSERT (a->dst_address_length <= 32);
Neale Rannsa3af3372017-03-28 03:49:52 -0700557
558 old_ply = &m->root_ply;
559 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
560
561 dst_byte = a->dst_address.as_u16[0];
562
563 n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ?
564 (16 - a->dst_address_length) : 0);
565
Neale Ranns6bb2db02021-08-06 12:24:14 +0000566 del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Neale Rannsa3af3372017-03-28 03:49:52 -0700567
568 /* Starting at the value of the byte at this section of the v4 address
569 * fill the buckets/slots of the ply */
570 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
571 {
572 u16 slot;
573
574 slot = clib_net_to_host_u16 (dst_byte);
575 slot += i;
576 slot = clib_host_to_net_u16 (slot);
577
578 old_leaf = old_ply->leaves[slot];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000579 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700580
Neale Ranns7244a702021-08-06 13:12:00 +0000581 if (old_leaf == del_leaf ||
582 (!old_leaf_is_terminal &&
583 unset_leaf (a, get_next_ply_for_leaf (old_leaf), 2)))
Neale Rannsa3af3372017-03-28 03:49:52 -0700584 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000585 clib_atomic_store_rel_n (
586 &old_ply->leaves[slot],
587 ip4_mtrie_leaf_set_adj_index (a->cover_adj_index));
Neale Rannsa3af3372017-03-28 03:49:52 -0700588 old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length;
589 }
590 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700591}
592
593void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000594ip4_mtrie_16_route_add (ip4_mtrie_16_t *m, const ip4_address_t *dst_address,
595 u32 dst_address_length, u32 adj_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700596{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000597 ip4_mtrie_set_unset_leaf_args_t a;
Dave Barachd7cb1b52016-12-09 09:52:16 -0500598 ip4_main_t *im = &ip4_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700599
Ed Warnickecb9cada2015-12-08 15:45:58 -0700600 /* Honor dst_address_length. Fib masks are in network byte order */
Neale Rannsa3af3372017-03-28 03:49:52 -0700601 a.dst_address.as_u32 = (dst_address->as_u32 &
602 im->fib_masks[dst_address_length]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700603 a.dst_address_length = dst_address_length;
604 a.adj_index = adj_index;
605
Neale Rannsa3af3372017-03-28 03:49:52 -0700606 set_root_leaf (m, &a);
607}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700608
Neale Rannsa3af3372017-03-28 03:49:52 -0700609void
Neale Ranns7244a702021-08-06 13:12:00 +0000610ip4_mtrie_8_route_add (ip4_mtrie_8_t *m, const ip4_address_t *dst_address,
611 u32 dst_address_length, u32 adj_index)
612{
613 ip4_mtrie_set_unset_leaf_args_t a;
614 ip4_main_t *im = &ip4_main;
615
616 /* Honor dst_address_length. Fib masks are in network byte order */
617 a.dst_address.as_u32 =
618 (dst_address->as_u32 & im->fib_masks[dst_address_length]);
619 a.dst_address_length = dst_address_length;
620 a.adj_index = adj_index;
621
622 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
623
624 set_leaf (&a, root - ip4_ply_pool, 0);
625}
626
627void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000628ip4_mtrie_16_route_del (ip4_mtrie_16_t *m, const ip4_address_t *dst_address,
629 u32 dst_address_length, u32 adj_index,
630 u32 cover_address_length, u32 cover_adj_index)
Neale Rannsa3af3372017-03-28 03:49:52 -0700631{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000632 ip4_mtrie_set_unset_leaf_args_t a;
Neale Rannsa3af3372017-03-28 03:49:52 -0700633 ip4_main_t *im = &ip4_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700634
Neale Rannsa3af3372017-03-28 03:49:52 -0700635 /* Honor dst_address_length. Fib masks are in network byte order */
636 a.dst_address.as_u32 = (dst_address->as_u32 &
637 im->fib_masks[dst_address_length]);
638 a.dst_address_length = dst_address_length;
639 a.adj_index = adj_index;
640 a.cover_adj_index = cover_adj_index;
641 a.cover_address_length = cover_address_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700642
Neale Rannsa3af3372017-03-28 03:49:52 -0700643 /* the top level ply is never removed */
644 unset_root_leaf (m, &a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700645}
646
Neale Ranns7244a702021-08-06 13:12:00 +0000647void
648ip4_mtrie_8_route_del (ip4_mtrie_8_t *m, const ip4_address_t *dst_address,
649 u32 dst_address_length, u32 adj_index,
650 u32 cover_address_length, u32 cover_adj_index)
651{
652 ip4_main_t *im = &ip4_main;
653
654 /* Honor dst_address_length. Fib masks are in network byte order */
655 ip4_mtrie_set_unset_leaf_args_t a = {
656 .dst_address.as_u32 =
657 (dst_address->as_u32 & im->fib_masks[dst_address_length]),
658 .dst_address_length = dst_address_length,
659 .adj_index = adj_index,
660 .cover_adj_index = cover_adj_index,
661 .cover_address_length = cover_address_length,
662 };
663
664 /* the top level ply is never removed */
665 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
666
667 unset_leaf (&a, root, 0);
668}
669
Ed Warnickecb9cada2015-12-08 15:45:58 -0700670/* Returns number of bytes of memory used by mtrie. */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500671static uword
Neale Ranns7244a702021-08-06 13:12:00 +0000672mtrie_ply_memory_usage (ip4_mtrie_8_ply_t *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700673{
674 uword bytes, i;
675
Ed Warnickecb9cada2015-12-08 15:45:58 -0700676 bytes = sizeof (p[0]);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500677 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700678 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000679 ip4_mtrie_leaf_t l = p->leaves[i];
680 if (ip4_mtrie_leaf_is_next_ply (l))
Neale Ranns7244a702021-08-06 13:12:00 +0000681 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
Neale Rannsa3af3372017-03-28 03:49:52 -0700682 }
683
684 return bytes;
685}
686
687/* Returns number of bytes of memory used by mtrie. */
Neale Rannsc87aafa2017-11-29 00:59:31 -0800688uword
Neale Ranns6bb2db02021-08-06 12:24:14 +0000689ip4_mtrie_16_memory_usage (ip4_mtrie_16_t *m)
Neale Rannsa3af3372017-03-28 03:49:52 -0700690{
691 uword bytes, i;
692
693 bytes = sizeof (*m);
694 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
695 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000696 ip4_mtrie_leaf_t l = m->root_ply.leaves[i];
697 if (ip4_mtrie_leaf_is_next_ply (l))
Neale Ranns7244a702021-08-06 13:12:00 +0000698 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
699 }
700
701 return bytes;
702}
703uword
704ip4_mtrie_8_memory_usage (ip4_mtrie_8_t *m)
705{
706 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
707 uword bytes, i;
708
709 bytes = sizeof (*m);
710 for (i = 0; i < ARRAY_LEN (root->leaves); i++)
711 {
712 ip4_mtrie_leaf_t l = root->leaves[i];
713 if (ip4_mtrie_leaf_is_next_ply (l))
714 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700715 }
716
717 return bytes;
718}
719
Dave Barachd7cb1b52016-12-09 09:52:16 -0500720static u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000721format_ip4_mtrie_leaf (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700722{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000723 ip4_mtrie_leaf_t l = va_arg (*va, ip4_mtrie_leaf_t);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700724
Neale Ranns6bb2db02021-08-06 12:24:14 +0000725 if (ip4_mtrie_leaf_is_terminal (l))
726 s = format (s, "lb-index %d", ip4_mtrie_leaf_get_adj_index (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700727 else
Neale Ranns6bb2db02021-08-06 12:24:14 +0000728 s = format (s, "next ply %d", ip4_mtrie_leaf_get_next_ply_index (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700729 return s;
730}
731
Neale Ranns6bb2db02021-08-06 12:24:14 +0000732#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \
733 ({ \
734 u32 a, ia_length; \
735 ip4_address_t ia; \
Neale Ranns7244a702021-08-06 13:12:00 +0000736 ip4_mtrie_leaf_t _l = (_p)->leaves[(_i)]; \
Neale Ranns6bb2db02021-08-06 12:24:14 +0000737 \
738 a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \
739 ia.as_u32 = clib_host_to_net_u32 (a); \
740 ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \
741 s = format (s, "\n%U%U %U", format_white_space, (_indent) + 4, \
742 format_ip4_address_and_length, &ia, ia_length, \
743 format_ip4_mtrie_leaf, _l); \
744 \
745 if (ip4_mtrie_leaf_is_next_ply (_l)) \
746 s = format (s, "\n%U", format_ip4_mtrie_ply, m, a, (_indent) + 8, \
747 ip4_mtrie_leaf_get_next_ply_index (_l)); \
748 s; \
749 })
Neale Rannsa3af3372017-03-28 03:49:52 -0700750
Dave Barachd7cb1b52016-12-09 09:52:16 -0500751static u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000752format_ip4_mtrie_ply (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700753{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000754 ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700755 u32 base_address = va_arg (*va, u32);
mu.duojiao59a82952018-10-11 14:27:30 +0800756 u32 indent = va_arg (*va, u32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700757 u32 ply_index = va_arg (*va, u32);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000758 ip4_mtrie_8_ply_t *p;
Neale Rannsa3af3372017-03-28 03:49:52 -0700759 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700760
Neale Rannsa3af3372017-03-28 03:49:52 -0700761 p = pool_elt_at_index (ip4_ply_pool, ply_index);
mu.duojiao59a82952018-10-11 14:27:30 +0800762 s = format (s, "%Uply index %d, %d non-empty leaves",
763 format_white_space, indent, ply_index, p->n_non_empty_leafs);
Neale Rannsa3af3372017-03-28 03:49:52 -0700764
Ed Warnickecb9cada2015-12-08 15:45:58 -0700765 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
766 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000767 if (ip4_mtrie_leaf_is_non_empty (p, i))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700768 {
mu.duojiao59a82952018-10-11 14:27:30 +0800769 s = FORMAT_PLY (s, p, i, i, base_address,
Neale Ranns756cd942018-04-06 09:18:11 -0700770 p->dst_address_bits_base + 8, indent);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700771 }
772 }
773
774 return s;
775}
776
Dave Barachd7cb1b52016-12-09 09:52:16 -0500777u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000778format_ip4_mtrie_16 (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700779{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000780 ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *);
Neale Ranns39194252017-11-27 01:03:25 -0800781 int verbose = va_arg (*va, int);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000782 ip4_mtrie_16_ply_t *p;
Neale Rannsa3af3372017-03-28 03:49:52 -0700783 u32 base_address = 0;
784 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700785
Neale Ranns7244a702021-08-06 13:12:00 +0000786 s =
787 format (s, "16-8-8: %d plies, memory usage %U\n", pool_elts (ip4_ply_pool),
788 format_memory_size, ip4_mtrie_16_memory_usage (m));
Neale Rannsc87aafa2017-11-29 00:59:31 -0800789 p = &m->root_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700790
Neale Ranns39194252017-11-27 01:03:25 -0800791 if (verbose)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700792 {
Neale Ranns39194252017-11-27 01:03:25 -0800793 s = format (s, "root-ply");
794 p = &m->root_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700795
Neale Ranns39194252017-11-27 01:03:25 -0800796 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
Neale Rannsa3af3372017-03-28 03:49:52 -0700797 {
Neale Ranns39194252017-11-27 01:03:25 -0800798 u16 slot;
799
800 slot = clib_host_to_net_u16 (i);
801
802 if (p->dst_address_bits_of_leaves[slot] > 0)
803 {
mu.duojiao59a82952018-10-11 14:27:30 +0800804 s = FORMAT_PLY (s, p, i, slot, base_address, 16, 0);
Neale Ranns39194252017-11-27 01:03:25 -0800805 }
Neale Rannsa3af3372017-03-28 03:49:52 -0700806 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807 }
808
809 return s;
810}
Dave Barachd7cb1b52016-12-09 09:52:16 -0500811
Neale Ranns7244a702021-08-06 13:12:00 +0000812u8 *
813format_ip4_mtrie_8 (u8 *s, va_list *va)
814{
815 ip4_mtrie_8_t *m = va_arg (*va, ip4_mtrie_8_t *);
816 int verbose = va_arg (*va, int);
817 ip4_mtrie_8_ply_t *root;
818 u32 base_address = 0;
819 u16 slot;
820
821 root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
822
823 s = format (s, "8-8-8-8; %d plies, memory usage %U\n",
824 pool_elts (ip4_ply_pool), format_memory_size,
825 ip4_mtrie_8_memory_usage (m));
826
827 if (verbose)
828 {
829 s = format (s, "root-ply");
830
831 for (slot = 0; slot < ARRAY_LEN (root->leaves); slot++)
832 {
833 if (root->dst_address_bits_of_leaves[slot] > 0)
834 {
835 s = FORMAT_PLY (s, root, slot, slot, base_address, 8, 0);
836 }
837 }
838 }
839
840 return s;
841}
842
Neale Ranns1ec36522017-11-29 05:20:37 -0800843/** Default heap size for the IPv4 mtries */
844#define IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE (32<<20)
Dave Barach01a2a102020-06-11 08:57:52 -0400845#ifndef MAP_HUGE_SHIFT
846#define MAP_HUGE_SHIFT 26
847#endif
Neale Ranns1ec36522017-11-29 05:20:37 -0800848
Neale Rannsa3af3372017-03-28 03:49:52 -0700849static clib_error_t *
850ip4_mtrie_module_init (vlib_main_t * vm)
851{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000852 CLIB_UNUSED (ip4_mtrie_8_ply_t * p);
Neale Ranns1ec36522017-11-29 05:20:37 -0800853 clib_error_t *error = NULL;
Neale Ranns1ec36522017-11-29 05:20:37 -0800854
855 /* Burn one ply so index 0 is taken */
Neale Rannsa3af3372017-03-28 03:49:52 -0700856 pool_get (ip4_ply_pool, p);
857
Neale Ranns1ec36522017-11-29 05:20:37 -0800858 return (error);
Neale Rannsa3af3372017-03-28 03:49:52 -0700859}
860
861VLIB_INIT_FUNCTION (ip4_mtrie_module_init);
862
Dave Barachd7cb1b52016-12-09 09:52:16 -0500863/*
864 * fd.io coding-style-patch-verification: ON
865 *
866 * Local Variables:
867 * eval: (c-set-style "gnu")
868 * End:
869 */