blob: e61b4aa577b5773f72770b66c36034973b390ae8 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ip/ip4_fib.h: ip4 mtrie fib
17 *
18 * Copyright (c) 2012 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vnet/ip/ip.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070041#include <vnet/ip/ip4_mtrie.h>
42#include <vnet/fib/ip4_fib.h>
43
44
45/**
46 * Global pool of IPv4 8bit PLYs
47 */
Neale Ranns6bb2db02021-08-06 12:24:14 +000048ip4_mtrie_8_ply_t *ip4_ply_pool;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Neale Ranns04a75e32017-03-23 06:46:01 -070050always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000051ip4_mtrie_leaf_is_non_empty (ip4_mtrie_8_ply_t *p, u8 dst_byte)
Ed Warnickecb9cada2015-12-08 15:45:58 -070052{
Neale Ranns04a75e32017-03-23 06:46:01 -070053 /*
54 * It's 'non-empty' if the length of the leaf stored is greater than the
55 * length of a leaf in the covering ply. i.e. the leaf is more specific
56 * than it's would be cover in the covering ply
57 */
58 if (p->dst_address_bits_of_leaves[dst_byte] > p->dst_address_bits_base)
59 return (1);
60 return (0);
61}
62
Neale Ranns6bb2db02021-08-06 12:24:14 +000063always_inline ip4_mtrie_leaf_t
64ip4_mtrie_leaf_set_adj_index (u32 adj_index)
Neale Ranns04a75e32017-03-23 06:46:01 -070065{
Neale Ranns6bb2db02021-08-06 12:24:14 +000066 ip4_mtrie_leaf_t l;
Neale Ranns04a75e32017-03-23 06:46:01 -070067 l = 1 + 2 * adj_index;
Neale Ranns6bb2db02021-08-06 12:24:14 +000068 ASSERT (ip4_mtrie_leaf_get_adj_index (l) == adj_index);
Neale Ranns04a75e32017-03-23 06:46:01 -070069 return l;
70}
71
72always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000073ip4_mtrie_leaf_is_next_ply (ip4_mtrie_leaf_t n)
Neale Ranns04a75e32017-03-23 06:46:01 -070074{
75 return (n & 1) == 0;
76}
77
78always_inline u32
Neale Ranns6bb2db02021-08-06 12:24:14 +000079ip4_mtrie_leaf_get_next_ply_index (ip4_mtrie_leaf_t n)
Neale Ranns04a75e32017-03-23 06:46:01 -070080{
Neale Ranns6bb2db02021-08-06 12:24:14 +000081 ASSERT (ip4_mtrie_leaf_is_next_ply (n));
Neale Ranns04a75e32017-03-23 06:46:01 -070082 return n >> 1;
83}
84
Neale Ranns6bb2db02021-08-06 12:24:14 +000085always_inline ip4_mtrie_leaf_t
86ip4_mtrie_leaf_set_next_ply_index (u32 i)
Neale Ranns04a75e32017-03-23 06:46:01 -070087{
Neale Ranns6bb2db02021-08-06 12:24:14 +000088 ip4_mtrie_leaf_t l;
Neale Ranns04a75e32017-03-23 06:46:01 -070089 l = 0 + 2 * i;
Neale Ranns6bb2db02021-08-06 12:24:14 +000090 ASSERT (ip4_mtrie_leaf_get_next_ply_index (l) == i);
Neale Ranns04a75e32017-03-23 06:46:01 -070091 return l;
92}
93
94static void
Neale Ranns6bb2db02021-08-06 12:24:14 +000095ply_8_init (ip4_mtrie_8_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len,
96 u32 ply_base_len)
Neale Ranns04a75e32017-03-23 06:46:01 -070097{
Damjan Marion9ff617c2021-12-23 13:19:15 +010098 p->n_non_empty_leafs = prefix_len > ply_base_len ? ARRAY_LEN (p->leaves) : 0;
99 clib_memset_u8 (p->dst_address_bits_of_leaves, prefix_len,
100 sizeof (p->dst_address_bits_of_leaves));
101 p->dst_address_bits_base = ply_base_len;
102
103 clib_memset_u32 (p->leaves, init, ARRAY_LEN (p->leaves));
Neale Rannsa3af3372017-03-28 03:49:52 -0700104}
105
106static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000107ply_16_init (ip4_mtrie_16_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len)
Neale Rannsa3af3372017-03-28 03:49:52 -0700108{
Damjan Marion9ff617c2021-12-23 13:19:15 +0100109 clib_memset_u8 (p->dst_address_bits_of_leaves, prefix_len,
110 sizeof (p->dst_address_bits_of_leaves));
111 clib_memset_u32 (p->leaves, init, ARRAY_LEN (p->leaves));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112}
113
Neale Ranns6bb2db02021-08-06 12:24:14 +0000114static ip4_mtrie_leaf_t
Neale Ranns7244a702021-08-06 13:12:00 +0000115ply_create (ip4_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, u32 ply_base_len)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000117 ip4_mtrie_8_ply_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118 /* Get cache aligned ply. */
Neale Ranns1ec36522017-11-29 05:20:37 -0800119
Neale Rannsa3af3372017-03-28 03:49:52 -0700120 pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Neale Rannsa3af3372017-03-28 03:49:52 -0700122 ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000123 return ip4_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124}
125
Neale Ranns6bb2db02021-08-06 12:24:14 +0000126always_inline ip4_mtrie_8_ply_t *
Neale Ranns7244a702021-08-06 13:12:00 +0000127get_next_ply_for_leaf (ip4_mtrie_leaf_t l)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000129 uword n = ip4_mtrie_leaf_get_next_ply_index (l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Neale Rannsa3af3372017-03-28 03:49:52 -0700131 return pool_elt_at_index (ip4_ply_pool, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132}
133
Dave Barachd7cb1b52016-12-09 09:52:16 -0500134void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000135ip4_mtrie_16_free (ip4_mtrie_16_t *m)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Lijian.Zhang33af8c12019-09-16 16:22:36 +0800137 /* the root ply is embedded so there is nothing to do,
Neale Rannsa3af3372017-03-28 03:49:52 -0700138 * the assumption being that the IP4 FIB table has emptied the trie
139 * before deletion.
140 */
141#if CLIB_DEBUG > 0
142 int i;
143 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
144 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000145 ASSERT (!ip4_mtrie_leaf_is_next_ply (m->root_ply.leaves[i]));
Neale Rannsa3af3372017-03-28 03:49:52 -0700146 }
147#endif
148}
149
150void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000151ip4_mtrie_16_init (ip4_mtrie_16_t *m)
Neale Rannsa3af3372017-03-28 03:49:52 -0700152{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000153 ply_16_init (&m->root_ply, IP4_MTRIE_LEAF_EMPTY, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154}
155
Neale Ranns7244a702021-08-06 13:12:00 +0000156void
157ip4_mtrie_8_free (ip4_mtrie_8_t *m)
158{
159 /* the root ply is embedded so there is nothing to do,
160 * the assumption being that the IP4 FIB table has emptied the trie
161 * before deletion.
162 */
163 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
164
165#if CLIB_DEBUG > 0
166 int i;
167 for (i = 0; i < ARRAY_LEN (root->leaves); i++)
168 {
169 ASSERT (!ip4_mtrie_leaf_is_next_ply (root->leaves[i]));
170 }
171#endif
172
173 pool_put (ip4_ply_pool, root);
174}
175
176void
177ip4_mtrie_8_init (ip4_mtrie_8_t *m)
178{
179 ip4_mtrie_8_ply_t *root;
180
181 pool_get (ip4_ply_pool, root);
182 m->root_ply = root - ip4_ply_pool;
183
184 ply_8_init (root, IP4_MTRIE_LEAF_EMPTY, 0, 0);
185}
186
Dave Barachd7cb1b52016-12-09 09:52:16 -0500187typedef struct
188{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189 ip4_address_t dst_address;
190 u32 dst_address_length;
191 u32 adj_index;
Neale Ranns04a75e32017-03-23 06:46:01 -0700192 u32 cover_address_length;
193 u32 cover_adj_index;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000194} ip4_mtrie_set_unset_leaf_args_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195
196static void
Neale Ranns7244a702021-08-06 13:12:00 +0000197set_ply_with_more_specific_leaf (ip4_mtrie_8_ply_t *ply,
Neale Ranns6bb2db02021-08-06 12:24:14 +0000198 ip4_mtrie_leaf_t new_leaf,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700199 uword new_leaf_dst_address_bits)
200{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000201 ip4_mtrie_leaf_t old_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700202 uword i;
203
Neale Ranns6bb2db02021-08-06 12:24:14 +0000204 ASSERT (ip4_mtrie_leaf_is_terminal (new_leaf));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700205
206 for (i = 0; i < ARRAY_LEN (ply->leaves); i++)
207 {
208 old_leaf = ply->leaves[i];
209
210 /* Recurse into sub plies. */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000211 if (!ip4_mtrie_leaf_is_terminal (old_leaf))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212 {
Neale Ranns7244a702021-08-06 13:12:00 +0000213 ip4_mtrie_8_ply_t *sub_ply = get_next_ply_for_leaf (old_leaf);
214 set_ply_with_more_specific_leaf (sub_ply, new_leaf,
Dave Barachd7cb1b52016-12-09 09:52:16 -0500215 new_leaf_dst_address_bits);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 }
217
218 /* Replace less specific terminal leaves with new leaf. */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500219 else if (new_leaf_dst_address_bits >=
220 ply->dst_address_bits_of_leaves[i])
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221 {
jaszha03ee743762019-09-27 12:52:18 -0500222 clib_atomic_store_rel_n (&ply->leaves[i], new_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700223 ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000224 ply->n_non_empty_leafs += ip4_mtrie_leaf_is_non_empty (ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225 }
226 }
227}
228
229static void
Neale Ranns7244a702021-08-06 13:12:00 +0000230set_leaf (const ip4_mtrie_set_unset_leaf_args_t *a, u32 old_ply_index,
231 u32 dst_address_byte_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000233 ip4_mtrie_leaf_t old_leaf, new_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234 i32 n_dst_bits_next_plies;
235 u8 dst_byte;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000236 ip4_mtrie_8_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700237
238 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239
Neale Rannsf0609302017-04-11 09:13:39 -0700240 ASSERT (a->dst_address_length <= 32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
242
Neale Rannsa3af3372017-03-28 03:49:52 -0700243 /* how many bits of the destination address are in the next PLY */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500244 n_dst_bits_next_plies =
245 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246
247 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
248
249 /* Number of bits next plies <= 0 => insert leaves this ply. */
250 if (n_dst_bits_next_plies <= 0)
251 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700252 /* The mask length of the address to insert maps to this ply */
Neale Ranns6ff05492017-06-06 06:52:14 -0700253 uword old_leaf_is_terminal;
254 u32 i, n_dst_bits_this_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
Neale Rannsa3af3372017-03-28 03:49:52 -0700256 /* The number of bits, and hence slots/buckets, we will fill */
Neale Ranns04a75e32017-03-23 06:46:01 -0700257 n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500258 ASSERT ((a->dst_address.as_u8[dst_address_byte_index] &
259 pow2_mask (n_dst_bits_this_ply)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260
Neale Rannsa3af3372017-03-28 03:49:52 -0700261 /* Starting at the value of the byte at this section of the v4 address
262 * fill the buckets/slots of the ply */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700263 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
264 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000265 ip4_mtrie_8_ply_t *new_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
267 old_leaf = old_ply->leaves[i];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000268 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270 if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i])
271 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700272 /* The new leaf is more or equally specific than the one currently
273 * occupying the slot */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000274 new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275
276 if (old_leaf_is_terminal)
277 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700278 /* The current leaf is terminal, we can replace it with
279 * the new one */
Neale Ranns04a75e32017-03-23 06:46:01 -0700280 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000281 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Neale Rannsa3af3372017-03-28 03:49:52 -0700282
Dave Barachd7cb1b52016-12-09 09:52:16 -0500283 old_ply->dst_address_bits_of_leaves[i] =
284 a->dst_address_length;
jaszha03ee743762019-09-27 12:52:18 -0500285 clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf);
Neale Ranns04a75e32017-03-23 06:46:01 -0700286
Dave Barachd7cb1b52016-12-09 09:52:16 -0500287 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000288 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500289 ASSERT (old_ply->n_non_empty_leafs <=
290 ARRAY_LEN (old_ply->leaves));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700291 }
292 else
293 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700294 /* Existing leaf points to another ply. We need to place
295 * new_leaf into all more specific slots. */
Neale Ranns7244a702021-08-06 13:12:00 +0000296 new_ply = get_next_ply_for_leaf (old_leaf);
297 set_ply_with_more_specific_leaf (new_ply, new_leaf,
Dave Barachd7cb1b52016-12-09 09:52:16 -0500298 a->dst_address_length);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299 }
300 }
Dave Barachd7cb1b52016-12-09 09:52:16 -0500301 else if (!old_leaf_is_terminal)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700303 /* The current leaf is less specific and not termial (i.e. a ply),
304 * recurse on down the trie */
Neale Ranns7244a702021-08-06 13:12:00 +0000305 new_ply = get_next_ply_for_leaf (old_leaf);
306 set_leaf (a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307 }
Neale Rannsa3af3372017-03-28 03:49:52 -0700308 /*
309 * else
310 * the route we are adding is less specific than the leaf currently
311 * occupying this slot. leave it there
312 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313 }
314 }
315 else
316 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700317 /* The address to insert requires us to move down at a lower level of
318 * the trie - recurse on down */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000319 ip4_mtrie_8_ply_t *new_ply;
Neale Ranns04a75e32017-03-23 06:46:01 -0700320 u8 ply_base_len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321
Neale Ranns04a75e32017-03-23 06:46:01 -0700322 ply_base_len = 8 * (dst_address_byte_index + 1);
Neale Rannsa3af3372017-03-28 03:49:52 -0700323
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324 old_leaf = old_ply->leaves[dst_byte];
Neale Rannsa3af3372017-03-28 03:49:52 -0700325
Neale Ranns6bb2db02021-08-06 12:24:14 +0000326 if (ip4_mtrie_leaf_is_terminal (old_leaf))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700327 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700328 /* There is a leaf occupying the slot. Replace it with a new ply */
Neale Ranns04a75e32017-03-23 06:46:01 -0700329 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000330 ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte);
Neale Ranns04a75e32017-03-23 06:46:01 -0700331
Neale Ranns7244a702021-08-06 13:12:00 +0000332 new_leaf = ply_create (old_leaf,
333 old_ply->dst_address_bits_of_leaves[dst_byte],
334 ply_base_len);
335 new_ply = get_next_ply_for_leaf (new_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336
337 /* Refetch since ply_create may move pool. */
Neale Rannsa3af3372017-03-28 03:49:52 -0700338 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339
jaszha03ee743762019-09-27 12:52:18 -0500340 clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
Neale Ranns04a75e32017-03-23 06:46:01 -0700341 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700342
Neale Rannsa3af3372017-03-28 03:49:52 -0700343 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000344 ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte);
Neale Ranns04a75e32017-03-23 06:46:01 -0700345 ASSERT (old_ply->n_non_empty_leafs >= 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346 }
347 else
Neale Ranns7244a702021-08-06 13:12:00 +0000348 new_ply = get_next_ply_for_leaf (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349
Neale Ranns7244a702021-08-06 13:12:00 +0000350 set_leaf (a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
Neale Rannsa3af3372017-03-28 03:49:52 -0700351 }
352}
353
354static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000355set_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a)
Neale Rannsa3af3372017-03-28 03:49:52 -0700356{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000357 ip4_mtrie_leaf_t old_leaf, new_leaf;
358 ip4_mtrie_16_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700359 i32 n_dst_bits_next_plies;
360 u16 dst_byte;
361
362 old_ply = &m->root_ply;
363
Neale Rannsf0609302017-04-11 09:13:39 -0700364 ASSERT (a->dst_address_length <= 32);
Neale Rannsa3af3372017-03-28 03:49:52 -0700365
366 /* how many bits of the destination address are in the next PLY */
367 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
368
369 dst_byte = a->dst_address.as_u16[0];
370
371 /* Number of bits next plies <= 0 => insert leaves this ply. */
372 if (n_dst_bits_next_plies <= 0)
373 {
374 /* The mask length of the address to insert maps to this ply */
Neale Ranns6ff05492017-06-06 06:52:14 -0700375 uword old_leaf_is_terminal;
376 u32 i, n_dst_bits_this_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700377
378 /* The number of bits, and hence slots/buckets, we will fill */
379 n_dst_bits_this_ply = 16 - a->dst_address_length;
380 ASSERT ((clib_host_to_net_u16 (a->dst_address.as_u16[0]) &
381 pow2_mask (n_dst_bits_this_ply)) == 0);
382
383 /* Starting at the value of the byte at this section of the v4 address
384 * fill the buckets/slots of the ply */
385 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
386 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000387 ip4_mtrie_8_ply_t *new_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700388 u16 slot;
389
390 slot = clib_net_to_host_u16 (dst_byte);
391 slot += i;
392 slot = clib_host_to_net_u16 (slot);
393
394 old_leaf = old_ply->leaves[slot];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000395 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700396
397 if (a->dst_address_length >=
398 old_ply->dst_address_bits_of_leaves[slot])
399 {
400 /* The new leaf is more or equally specific than the one currently
401 * occupying the slot */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000402 new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Neale Rannsa3af3372017-03-28 03:49:52 -0700403
404 if (old_leaf_is_terminal)
405 {
406 /* The current leaf is terminal, we can replace it with
407 * the new one */
408 old_ply->dst_address_bits_of_leaves[slot] =
409 a->dst_address_length;
jaszha03ee743762019-09-27 12:52:18 -0500410 clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700411 }
412 else
413 {
414 /* Existing leaf points to another ply. We need to place
415 * new_leaf into all more specific slots. */
Neale Ranns7244a702021-08-06 13:12:00 +0000416 new_ply = get_next_ply_for_leaf (old_leaf);
417 set_ply_with_more_specific_leaf (new_ply, new_leaf,
Neale Rannsa3af3372017-03-28 03:49:52 -0700418 a->dst_address_length);
419 }
420 }
421 else if (!old_leaf_is_terminal)
422 {
423 /* The current leaf is less specific and not termial (i.e. a ply),
424 * recurse on down the trie */
Neale Ranns7244a702021-08-06 13:12:00 +0000425 new_ply = get_next_ply_for_leaf (old_leaf);
426 set_leaf (a, new_ply - ip4_ply_pool, 2);
Neale Rannsa3af3372017-03-28 03:49:52 -0700427 }
428 /*
429 * else
430 * the route we are adding is less specific than the leaf currently
431 * occupying this slot. leave it there
432 */
433 }
434 }
435 else
436 {
437 /* The address to insert requires us to move down at a lower level of
438 * the trie - recurse on down */
Neale Ranns6bb2db02021-08-06 12:24:14 +0000439 ip4_mtrie_8_ply_t *new_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700440 u8 ply_base_len;
441
442 ply_base_len = 16;
443
444 old_leaf = old_ply->leaves[dst_byte];
445
Neale Ranns6bb2db02021-08-06 12:24:14 +0000446 if (ip4_mtrie_leaf_is_terminal (old_leaf))
Neale Rannsa3af3372017-03-28 03:49:52 -0700447 {
448 /* There is a leaf occupying the slot. Replace it with a new ply */
Neale Ranns7244a702021-08-06 13:12:00 +0000449 new_leaf = ply_create (old_leaf,
450 old_ply->dst_address_bits_of_leaves[dst_byte],
451 ply_base_len);
452 new_ply = get_next_ply_for_leaf (new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700453
jaszha03ee743762019-09-27 12:52:18 -0500454 clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700455 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
456 }
457 else
Neale Ranns7244a702021-08-06 13:12:00 +0000458 new_ply = get_next_ply_for_leaf (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700459
Neale Ranns7244a702021-08-06 13:12:00 +0000460 set_leaf (a, new_ply - ip4_ply_pool, 2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700461 }
462}
463
464static uword
Neale Ranns7244a702021-08-06 13:12:00 +0000465unset_leaf (const ip4_mtrie_set_unset_leaf_args_t *a,
Neale Ranns6bb2db02021-08-06 12:24:14 +0000466 ip4_mtrie_8_ply_t *old_ply, u32 dst_address_byte_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700467{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000468 ip4_mtrie_leaf_t old_leaf, del_leaf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700469 i32 n_dst_bits_next_plies;
Dave Barach6f6f34f2016-08-08 13:05:31 -0400470 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471 u8 dst_byte;
472
Neale Rannsf0609302017-04-11 09:13:39 -0700473 ASSERT (a->dst_address_length <= 32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700474 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
475
Dave Barachd7cb1b52016-12-09 09:52:16 -0500476 n_dst_bits_next_plies =
477 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700478
479 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
480 if (n_dst_bits_next_plies < 0)
481 dst_byte &= ~pow2_mask (-n_dst_bits_next_plies);
482
Dave Barachd7cb1b52016-12-09 09:52:16 -0500483 n_dst_bits_this_ply =
484 n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485 n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply);
486
Neale Ranns6bb2db02021-08-06 12:24:14 +0000487 del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488
489 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
490 {
491 old_leaf = old_ply->leaves[i];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000492 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493
Neale Ranns7244a702021-08-06 13:12:00 +0000494 if (old_leaf == del_leaf ||
495 (!old_leaf_is_terminal &&
496 unset_leaf (a, get_next_ply_for_leaf (old_leaf),
497 dst_address_byte_index + 1)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498 {
Neale Ranns04a75e32017-03-23 06:46:01 -0700499 old_ply->n_non_empty_leafs -=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000500 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700501
Neale Ranns6bb2db02021-08-06 12:24:14 +0000502 clib_atomic_store_rel_n (
503 &old_ply->leaves[i],
504 ip4_mtrie_leaf_set_adj_index (a->cover_adj_index));
mu.duojiao9744e6d2018-10-17 10:59:09 +0800505 old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length;
Neale Ranns04a75e32017-03-23 06:46:01 -0700506
507 old_ply->n_non_empty_leafs +=
Neale Ranns6bb2db02021-08-06 12:24:14 +0000508 ip4_mtrie_leaf_is_non_empty (old_ply, i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700509
510 ASSERT (old_ply->n_non_empty_leafs >= 0);
511 if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0)
512 {
Neale Rannsa3af3372017-03-28 03:49:52 -0700513 pool_put (ip4_ply_pool, old_ply);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700514 /* Old ply was deleted. */
515 return 1;
516 }
Neale Ranns04a75e32017-03-23 06:46:01 -0700517#if CLIB_DEBUG > 0
518 else if (dst_address_byte_index)
519 {
520 int ii, count = 0;
521 for (ii = 0; ii < ARRAY_LEN (old_ply->leaves); ii++)
522 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000523 count += ip4_mtrie_leaf_is_non_empty (old_ply, ii);
Neale Ranns04a75e32017-03-23 06:46:01 -0700524 }
525 ASSERT (count);
526 }
527#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528 }
529 }
530
531 /* Old ply was not deleted. */
532 return 0;
533}
534
Neale Rannsa3af3372017-03-28 03:49:52 -0700535static void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000536unset_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700537{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000538 ip4_mtrie_leaf_t old_leaf, del_leaf;
Neale Rannsa3af3372017-03-28 03:49:52 -0700539 i32 n_dst_bits_next_plies;
540 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
541 u16 dst_byte;
Neale Ranns6bb2db02021-08-06 12:24:14 +0000542 ip4_mtrie_16_ply_t *old_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700543
Neale Rannsf0609302017-04-11 09:13:39 -0700544 ASSERT (a->dst_address_length <= 32);
Neale Rannsa3af3372017-03-28 03:49:52 -0700545
546 old_ply = &m->root_ply;
547 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
548
549 dst_byte = a->dst_address.as_u16[0];
550
551 n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ?
552 (16 - a->dst_address_length) : 0);
553
Neale Ranns6bb2db02021-08-06 12:24:14 +0000554 del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index);
Neale Rannsa3af3372017-03-28 03:49:52 -0700555
556 /* Starting at the value of the byte at this section of the v4 address
557 * fill the buckets/slots of the ply */
558 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
559 {
560 u16 slot;
561
562 slot = clib_net_to_host_u16 (dst_byte);
563 slot += i;
564 slot = clib_host_to_net_u16 (slot);
565
566 old_leaf = old_ply->leaves[slot];
Neale Ranns6bb2db02021-08-06 12:24:14 +0000567 old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf);
Neale Rannsa3af3372017-03-28 03:49:52 -0700568
Neale Ranns7244a702021-08-06 13:12:00 +0000569 if (old_leaf == del_leaf ||
570 (!old_leaf_is_terminal &&
571 unset_leaf (a, get_next_ply_for_leaf (old_leaf), 2)))
Neale Rannsa3af3372017-03-28 03:49:52 -0700572 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000573 clib_atomic_store_rel_n (
574 &old_ply->leaves[slot],
575 ip4_mtrie_leaf_set_adj_index (a->cover_adj_index));
Neale Rannsa3af3372017-03-28 03:49:52 -0700576 old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length;
577 }
578 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579}
580
581void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000582ip4_mtrie_16_route_add (ip4_mtrie_16_t *m, const ip4_address_t *dst_address,
583 u32 dst_address_length, u32 adj_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700584{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000585 ip4_mtrie_set_unset_leaf_args_t a;
Dave Barachd7cb1b52016-12-09 09:52:16 -0500586 ip4_main_t *im = &ip4_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700587
Ed Warnickecb9cada2015-12-08 15:45:58 -0700588 /* Honor dst_address_length. Fib masks are in network byte order */
Neale Rannsa3af3372017-03-28 03:49:52 -0700589 a.dst_address.as_u32 = (dst_address->as_u32 &
590 im->fib_masks[dst_address_length]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700591 a.dst_address_length = dst_address_length;
592 a.adj_index = adj_index;
593
Neale Rannsa3af3372017-03-28 03:49:52 -0700594 set_root_leaf (m, &a);
595}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700596
Neale Rannsa3af3372017-03-28 03:49:52 -0700597void
Neale Ranns7244a702021-08-06 13:12:00 +0000598ip4_mtrie_8_route_add (ip4_mtrie_8_t *m, const ip4_address_t *dst_address,
599 u32 dst_address_length, u32 adj_index)
600{
601 ip4_mtrie_set_unset_leaf_args_t a;
602 ip4_main_t *im = &ip4_main;
603
604 /* Honor dst_address_length. Fib masks are in network byte order */
605 a.dst_address.as_u32 =
606 (dst_address->as_u32 & im->fib_masks[dst_address_length]);
607 a.dst_address_length = dst_address_length;
608 a.adj_index = adj_index;
609
610 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
611
612 set_leaf (&a, root - ip4_ply_pool, 0);
613}
614
615void
Neale Ranns6bb2db02021-08-06 12:24:14 +0000616ip4_mtrie_16_route_del (ip4_mtrie_16_t *m, const ip4_address_t *dst_address,
617 u32 dst_address_length, u32 adj_index,
618 u32 cover_address_length, u32 cover_adj_index)
Neale Rannsa3af3372017-03-28 03:49:52 -0700619{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000620 ip4_mtrie_set_unset_leaf_args_t a;
Neale Rannsa3af3372017-03-28 03:49:52 -0700621 ip4_main_t *im = &ip4_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700622
Neale Rannsa3af3372017-03-28 03:49:52 -0700623 /* Honor dst_address_length. Fib masks are in network byte order */
624 a.dst_address.as_u32 = (dst_address->as_u32 &
625 im->fib_masks[dst_address_length]);
626 a.dst_address_length = dst_address_length;
627 a.adj_index = adj_index;
628 a.cover_adj_index = cover_adj_index;
629 a.cover_address_length = cover_address_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700630
Neale Rannsa3af3372017-03-28 03:49:52 -0700631 /* the top level ply is never removed */
632 unset_root_leaf (m, &a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700633}
634
Neale Ranns7244a702021-08-06 13:12:00 +0000635void
636ip4_mtrie_8_route_del (ip4_mtrie_8_t *m, const ip4_address_t *dst_address,
637 u32 dst_address_length, u32 adj_index,
638 u32 cover_address_length, u32 cover_adj_index)
639{
640 ip4_main_t *im = &ip4_main;
641
642 /* Honor dst_address_length. Fib masks are in network byte order */
643 ip4_mtrie_set_unset_leaf_args_t a = {
644 .dst_address.as_u32 =
645 (dst_address->as_u32 & im->fib_masks[dst_address_length]),
646 .dst_address_length = dst_address_length,
647 .adj_index = adj_index,
648 .cover_adj_index = cover_adj_index,
649 .cover_address_length = cover_address_length,
650 };
651
652 /* the top level ply is never removed */
653 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
654
655 unset_leaf (&a, root, 0);
656}
657
Ed Warnickecb9cada2015-12-08 15:45:58 -0700658/* Returns number of bytes of memory used by mtrie. */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500659static uword
Neale Ranns7244a702021-08-06 13:12:00 +0000660mtrie_ply_memory_usage (ip4_mtrie_8_ply_t *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700661{
662 uword bytes, i;
663
Ed Warnickecb9cada2015-12-08 15:45:58 -0700664 bytes = sizeof (p[0]);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500665 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700666 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000667 ip4_mtrie_leaf_t l = p->leaves[i];
668 if (ip4_mtrie_leaf_is_next_ply (l))
Neale Ranns7244a702021-08-06 13:12:00 +0000669 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
Neale Rannsa3af3372017-03-28 03:49:52 -0700670 }
671
672 return bytes;
673}
674
675/* Returns number of bytes of memory used by mtrie. */
Neale Rannsc87aafa2017-11-29 00:59:31 -0800676uword
Neale Ranns6bb2db02021-08-06 12:24:14 +0000677ip4_mtrie_16_memory_usage (ip4_mtrie_16_t *m)
Neale Rannsa3af3372017-03-28 03:49:52 -0700678{
679 uword bytes, i;
680
681 bytes = sizeof (*m);
682 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
683 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000684 ip4_mtrie_leaf_t l = m->root_ply.leaves[i];
685 if (ip4_mtrie_leaf_is_next_ply (l))
Neale Ranns7244a702021-08-06 13:12:00 +0000686 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
687 }
688
689 return bytes;
690}
691uword
692ip4_mtrie_8_memory_usage (ip4_mtrie_8_t *m)
693{
694 ip4_mtrie_8_ply_t *root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
695 uword bytes, i;
696
697 bytes = sizeof (*m);
698 for (i = 0; i < ARRAY_LEN (root->leaves); i++)
699 {
700 ip4_mtrie_leaf_t l = root->leaves[i];
701 if (ip4_mtrie_leaf_is_next_ply (l))
702 bytes += mtrie_ply_memory_usage (get_next_ply_for_leaf (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700703 }
704
705 return bytes;
706}
707
Dave Barachd7cb1b52016-12-09 09:52:16 -0500708static u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000709format_ip4_mtrie_leaf (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700710{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000711 ip4_mtrie_leaf_t l = va_arg (*va, ip4_mtrie_leaf_t);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700712
Neale Ranns6bb2db02021-08-06 12:24:14 +0000713 if (ip4_mtrie_leaf_is_terminal (l))
714 s = format (s, "lb-index %d", ip4_mtrie_leaf_get_adj_index (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700715 else
Neale Ranns6bb2db02021-08-06 12:24:14 +0000716 s = format (s, "next ply %d", ip4_mtrie_leaf_get_next_ply_index (l));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700717 return s;
718}
719
Neale Ranns6bb2db02021-08-06 12:24:14 +0000720#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \
721 ({ \
722 u32 a, ia_length; \
723 ip4_address_t ia; \
Neale Ranns7244a702021-08-06 13:12:00 +0000724 ip4_mtrie_leaf_t _l = (_p)->leaves[(_i)]; \
Neale Ranns6bb2db02021-08-06 12:24:14 +0000725 \
726 a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \
727 ia.as_u32 = clib_host_to_net_u32 (a); \
728 ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \
729 s = format (s, "\n%U%U %U", format_white_space, (_indent) + 4, \
730 format_ip4_address_and_length, &ia, ia_length, \
731 format_ip4_mtrie_leaf, _l); \
732 \
733 if (ip4_mtrie_leaf_is_next_ply (_l)) \
734 s = format (s, "\n%U", format_ip4_mtrie_ply, m, a, (_indent) + 8, \
735 ip4_mtrie_leaf_get_next_ply_index (_l)); \
736 s; \
737 })
Neale Rannsa3af3372017-03-28 03:49:52 -0700738
Dave Barachd7cb1b52016-12-09 09:52:16 -0500739static u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000740format_ip4_mtrie_ply (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700741{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000742 ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700743 u32 base_address = va_arg (*va, u32);
mu.duojiao59a82952018-10-11 14:27:30 +0800744 u32 indent = va_arg (*va, u32);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700745 u32 ply_index = va_arg (*va, u32);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000746 ip4_mtrie_8_ply_t *p;
Neale Rannsa3af3372017-03-28 03:49:52 -0700747 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700748
Neale Rannsa3af3372017-03-28 03:49:52 -0700749 p = pool_elt_at_index (ip4_ply_pool, ply_index);
mu.duojiao59a82952018-10-11 14:27:30 +0800750 s = format (s, "%Uply index %d, %d non-empty leaves",
751 format_white_space, indent, ply_index, p->n_non_empty_leafs);
Neale Rannsa3af3372017-03-28 03:49:52 -0700752
Ed Warnickecb9cada2015-12-08 15:45:58 -0700753 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
754 {
Neale Ranns6bb2db02021-08-06 12:24:14 +0000755 if (ip4_mtrie_leaf_is_non_empty (p, i))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700756 {
mu.duojiao59a82952018-10-11 14:27:30 +0800757 s = FORMAT_PLY (s, p, i, i, base_address,
Neale Ranns756cd942018-04-06 09:18:11 -0700758 p->dst_address_bits_base + 8, indent);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700759 }
760 }
761
762 return s;
763}
764
Dave Barachd7cb1b52016-12-09 09:52:16 -0500765u8 *
Neale Ranns6bb2db02021-08-06 12:24:14 +0000766format_ip4_mtrie_16 (u8 *s, va_list *va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700767{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000768 ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *);
Neale Ranns39194252017-11-27 01:03:25 -0800769 int verbose = va_arg (*va, int);
Neale Ranns6bb2db02021-08-06 12:24:14 +0000770 ip4_mtrie_16_ply_t *p;
Neale Rannsa3af3372017-03-28 03:49:52 -0700771 u32 base_address = 0;
772 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700773
Neale Ranns7244a702021-08-06 13:12:00 +0000774 s =
775 format (s, "16-8-8: %d plies, memory usage %U\n", pool_elts (ip4_ply_pool),
776 format_memory_size, ip4_mtrie_16_memory_usage (m));
Neale Rannsc87aafa2017-11-29 00:59:31 -0800777 p = &m->root_ply;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700778
Neale Ranns39194252017-11-27 01:03:25 -0800779 if (verbose)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700780 {
Neale Ranns39194252017-11-27 01:03:25 -0800781 s = format (s, "root-ply");
782 p = &m->root_ply;
Neale Rannsa3af3372017-03-28 03:49:52 -0700783
Neale Ranns39194252017-11-27 01:03:25 -0800784 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
Neale Rannsa3af3372017-03-28 03:49:52 -0700785 {
Neale Ranns39194252017-11-27 01:03:25 -0800786 u16 slot;
787
788 slot = clib_host_to_net_u16 (i);
789
790 if (p->dst_address_bits_of_leaves[slot] > 0)
791 {
mu.duojiao59a82952018-10-11 14:27:30 +0800792 s = FORMAT_PLY (s, p, i, slot, base_address, 16, 0);
Neale Ranns39194252017-11-27 01:03:25 -0800793 }
Neale Rannsa3af3372017-03-28 03:49:52 -0700794 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700795 }
796
797 return s;
798}
Dave Barachd7cb1b52016-12-09 09:52:16 -0500799
Neale Ranns7244a702021-08-06 13:12:00 +0000800u8 *
801format_ip4_mtrie_8 (u8 *s, va_list *va)
802{
803 ip4_mtrie_8_t *m = va_arg (*va, ip4_mtrie_8_t *);
804 int verbose = va_arg (*va, int);
805 ip4_mtrie_8_ply_t *root;
806 u32 base_address = 0;
807 u16 slot;
808
809 root = pool_elt_at_index (ip4_ply_pool, m->root_ply);
810
811 s = format (s, "8-8-8-8; %d plies, memory usage %U\n",
812 pool_elts (ip4_ply_pool), format_memory_size,
813 ip4_mtrie_8_memory_usage (m));
814
815 if (verbose)
816 {
817 s = format (s, "root-ply");
818
819 for (slot = 0; slot < ARRAY_LEN (root->leaves); slot++)
820 {
821 if (root->dst_address_bits_of_leaves[slot] > 0)
822 {
823 s = FORMAT_PLY (s, root, slot, slot, base_address, 8, 0);
824 }
825 }
826 }
827
828 return s;
829}
830
Neale Ranns1ec36522017-11-29 05:20:37 -0800831/** Default heap size for the IPv4 mtries */
832#define IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE (32<<20)
Dave Barach01a2a102020-06-11 08:57:52 -0400833#ifndef MAP_HUGE_SHIFT
834#define MAP_HUGE_SHIFT 26
835#endif
Neale Ranns1ec36522017-11-29 05:20:37 -0800836
Neale Rannsa3af3372017-03-28 03:49:52 -0700837static clib_error_t *
838ip4_mtrie_module_init (vlib_main_t * vm)
839{
Neale Ranns6bb2db02021-08-06 12:24:14 +0000840 CLIB_UNUSED (ip4_mtrie_8_ply_t * p);
Neale Ranns1ec36522017-11-29 05:20:37 -0800841 clib_error_t *error = NULL;
Neale Ranns1ec36522017-11-29 05:20:37 -0800842
843 /* Burn one ply so index 0 is taken */
Neale Rannsa3af3372017-03-28 03:49:52 -0700844 pool_get (ip4_ply_pool, p);
845
Neale Ranns1ec36522017-11-29 05:20:37 -0800846 return (error);
Neale Rannsa3af3372017-03-28 03:49:52 -0700847}
848
849VLIB_INIT_FUNCTION (ip4_mtrie_module_init);
850
Dave Barachd7cb1b52016-12-09 09:52:16 -0500851/*
852 * fd.io coding-style-patch-verification: ON
853 *
854 * Local Variables:
855 * eval: (c-set-style "gnu")
856 * End:
857 */