blob: 4d52ad0afd9ac3ef5aa94f7aa714a22a5ea505eb [file] [log] [blame]
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -08001/*
Neale Ranns879132d2019-06-07 02:52:36 -04002 * Copyright (c) 2015-2019 Cisco and/or its affiliates.
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -08003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ip/ip4_forward.h: IP v4 forwarding
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef __included_ip4_forward_h__
41#define __included_ip4_forward_h__
42
Lijian Zhang2e237212018-09-10 17:13:56 +080043#include <vppinfra/cache.h>
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -080044#include <vnet/fib/ip4_fib.h>
45#include <vnet/dpo/load_balance_map.h>
46
47/**
48 * @file
49 * @brief IPv4 Forwarding.
50 *
51 * This file contains the source code for IPv4 forwarding.
52 */
53
54always_inline uword
55ip4_lookup_inline (vlib_main_t * vm,
56 vlib_node_runtime_t * node,
57 vlib_frame_t * frame,
58 int lookup_for_responses_to_locally_received_packets)
59{
60 ip4_main_t *im = &ip4_main;
61 vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
Neale Ranns879132d2019-06-07 02:52:36 -040062 u32 n_left, *from;
Damjan Marion067cd622018-07-11 12:47:43 +020063 u32 thread_index = vm->thread_index;
Zhiyong Yang689f5b52019-04-24 01:31:14 -040064 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
65 vlib_buffer_t **b = bufs;
Neale Ranns879132d2019-06-07 02:52:36 -040066 u16 nexts[VLIB_FRAME_SIZE], *next;
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -080067
68 from = vlib_frame_vector_args (frame);
Neale Ranns879132d2019-06-07 02:52:36 -040069 n_left = frame->n_vectors;
70 next = nexts;
71 vlib_get_buffers (vm, from, bufs, n_left);
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -080072
Lijian Zhang2e237212018-09-10 17:13:56 +080073#if (CLIB_N_PREFETCHES >= 8)
Neale Ranns879132d2019-06-07 02:52:36 -040074 while (n_left >= 4)
75 {
76 ip4_header_t *ip0, *ip1, *ip2, *ip3;
77 const load_balance_t *lb0, *lb1, *lb2, *lb3;
78 ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
79 ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
80 ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
81 u32 lb_index0, lb_index1, lb_index2, lb_index3;
82 flow_hash_config_t flow_hash_config0, flow_hash_config1;
83 flow_hash_config_t flow_hash_config2, flow_hash_config3;
84 u32 hash_c0, hash_c1, hash_c2, hash_c3;
85 const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
86
87 /* Prefetch next iteration. */
88 if (n_left >= 8)
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -080089 {
Neale Ranns879132d2019-06-07 02:52:36 -040090 vlib_prefetch_buffer_header (b[4], LOAD);
91 vlib_prefetch_buffer_header (b[5], LOAD);
92 vlib_prefetch_buffer_header (b[6], LOAD);
93 vlib_prefetch_buffer_header (b[7], LOAD);
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -080094
Neale Ranns879132d2019-06-07 02:52:36 -040095 CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
96 CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
97 CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
98 CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -080099 }
100
Neale Ranns879132d2019-06-07 02:52:36 -0400101 ip0 = vlib_buffer_get_current (b[0]);
102 ip1 = vlib_buffer_get_current (b[1]);
103 ip2 = vlib_buffer_get_current (b[2]);
104 ip3 = vlib_buffer_get_current (b[3]);
105
106 dst_addr0 = &ip0->dst_address;
107 dst_addr1 = &ip1->dst_address;
108 dst_addr2 = &ip2->dst_address;
109 dst_addr3 = &ip3->dst_address;
110
111 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
112 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
113 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[2]);
114 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[3]);
115
116 if (!lookup_for_responses_to_locally_received_packets)
117 {
118 mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
119 mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
120 mtrie2 = &ip4_fib_get (vnet_buffer (b[2])->ip.fib_index)->mtrie;
121 mtrie3 = &ip4_fib_get (vnet_buffer (b[3])->ip.fib_index)->mtrie;
122
123 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
124 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
125 leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2);
126 leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3);
127 }
128
129 if (!lookup_for_responses_to_locally_received_packets)
130 {
131 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
132 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
133 leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2);
134 leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2);
135 }
136
137 if (!lookup_for_responses_to_locally_received_packets)
138 {
139 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
140 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
141 leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3);
142 leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3);
143 }
144
145 if (lookup_for_responses_to_locally_received_packets)
146 {
147 lb_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_RX];
148 lb_index1 = vnet_buffer (b[1])->ip.adj_index[VLIB_RX];
149 lb_index2 = vnet_buffer (b[2])->ip.adj_index[VLIB_RX];
150 lb_index3 = vnet_buffer (b[3])->ip.adj_index[VLIB_RX];
151 }
152 else
153 {
154 lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
155 lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
156 lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
157 lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
158 }
159
160 ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
161 lb0 = load_balance_get (lb_index0);
162 lb1 = load_balance_get (lb_index1);
163 lb2 = load_balance_get (lb_index2);
164 lb3 = load_balance_get (lb_index3);
165
166 ASSERT (lb0->lb_n_buckets > 0);
167 ASSERT (is_pow2 (lb0->lb_n_buckets));
168 ASSERT (lb1->lb_n_buckets > 0);
169 ASSERT (is_pow2 (lb1->lb_n_buckets));
170 ASSERT (lb2->lb_n_buckets > 0);
171 ASSERT (is_pow2 (lb2->lb_n_buckets));
172 ASSERT (lb3->lb_n_buckets > 0);
173 ASSERT (is_pow2 (lb3->lb_n_buckets));
174
175 /* Use flow hash to compute multipath adjacency. */
176 hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
177 hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
178 hash_c2 = vnet_buffer (b[2])->ip.flow_hash = 0;
179 hash_c3 = vnet_buffer (b[3])->ip.flow_hash = 0;
180 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
181 {
182 flow_hash_config0 = lb0->lb_hash_config;
183 hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
184 ip4_compute_flow_hash (ip0, flow_hash_config0);
185 dpo0 =
186 load_balance_get_fwd_bucket (lb0,
187 (hash_c0 &
188 (lb0->lb_n_buckets_minus_1)));
189 }
190 else
191 {
192 dpo0 = load_balance_get_bucket_i (lb0, 0);
193 }
194 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
195 {
196 flow_hash_config1 = lb1->lb_hash_config;
197 hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
198 ip4_compute_flow_hash (ip1, flow_hash_config1);
199 dpo1 =
200 load_balance_get_fwd_bucket (lb1,
201 (hash_c1 &
202 (lb1->lb_n_buckets_minus_1)));
203 }
204 else
205 {
206 dpo1 = load_balance_get_bucket_i (lb1, 0);
207 }
208 if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
209 {
210 flow_hash_config2 = lb2->lb_hash_config;
211 hash_c2 = vnet_buffer (b[2])->ip.flow_hash =
212 ip4_compute_flow_hash (ip2, flow_hash_config2);
213 dpo2 =
214 load_balance_get_fwd_bucket (lb2,
215 (hash_c2 &
216 (lb2->lb_n_buckets_minus_1)));
217 }
218 else
219 {
220 dpo2 = load_balance_get_bucket_i (lb2, 0);
221 }
222 if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
223 {
224 flow_hash_config3 = lb3->lb_hash_config;
225 hash_c3 = vnet_buffer (b[3])->ip.flow_hash =
226 ip4_compute_flow_hash (ip3, flow_hash_config3);
227 dpo3 =
228 load_balance_get_fwd_bucket (lb3,
229 (hash_c3 &
230 (lb3->lb_n_buckets_minus_1)));
231 }
232 else
233 {
234 dpo3 = load_balance_get_bucket_i (lb3, 0);
235 }
236
237 next[0] = dpo0->dpoi_next_node;
238 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
239 next[1] = dpo1->dpoi_next_node;
240 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
241 next[2] = dpo2->dpoi_next_node;
242 vnet_buffer (b[2])->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
243 next[3] = dpo3->dpoi_next_node;
244 vnet_buffer (b[3])->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
245
246 vlib_increment_combined_counter
247 (cm, thread_index, lb_index0, 1,
248 vlib_buffer_length_in_chain (vm, b[0]));
249 vlib_increment_combined_counter
250 (cm, thread_index, lb_index1, 1,
251 vlib_buffer_length_in_chain (vm, b[1]));
252 vlib_increment_combined_counter
253 (cm, thread_index, lb_index2, 1,
254 vlib_buffer_length_in_chain (vm, b[2]));
255 vlib_increment_combined_counter
256 (cm, thread_index, lb_index3, 1,
257 vlib_buffer_length_in_chain (vm, b[3]));
258
259 b += 4;
260 next += 4;
261 n_left -= 4;
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -0800262 }
Neale Ranns879132d2019-06-07 02:52:36 -0400263#elif (CLIB_N_PREFETCHES >= 4)
264 while (n_left >= 4)
265 {
266 ip4_header_t *ip0, *ip1;
267 const load_balance_t *lb0, *lb1;
268 ip4_fib_mtrie_t *mtrie0, *mtrie1;
269 ip4_fib_mtrie_leaf_t leaf0, leaf1;
270 ip4_address_t *dst_addr0, *dst_addr1;
271 u32 lb_index0, lb_index1;
272 flow_hash_config_t flow_hash_config0, flow_hash_config1;
273 u32 hash_c0, hash_c1;
274 const dpo_id_t *dpo0, *dpo1;
275
276 /* Prefetch next iteration. */
277 {
278 vlib_prefetch_buffer_header (b[2], LOAD);
279 vlib_prefetch_buffer_header (b[3], LOAD);
280
281 CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
282 CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
283 }
284
285 ip0 = vlib_buffer_get_current (b[0]);
286 ip1 = vlib_buffer_get_current (b[1]);
287
288 dst_addr0 = &ip0->dst_address;
289 dst_addr1 = &ip1->dst_address;
290
291 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
292 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
293
294 if (!lookup_for_responses_to_locally_received_packets)
295 {
296 mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
297 mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
298
299 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
300 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
301 }
302
303 if (!lookup_for_responses_to_locally_received_packets)
304 {
305 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
306 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
307 }
308
309 if (!lookup_for_responses_to_locally_received_packets)
310 {
311 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
312 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
313 }
314
315 if (lookup_for_responses_to_locally_received_packets)
316 {
317 lb_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_RX];
318 lb_index1 = vnet_buffer (b[1])->ip.adj_index[VLIB_RX];
319 }
320 else
321 {
322 lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
323 lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
324 }
325
326 ASSERT (lb_index0 && lb_index1);
327 lb0 = load_balance_get (lb_index0);
328 lb1 = load_balance_get (lb_index1);
329
330 ASSERT (lb0->lb_n_buckets > 0);
331 ASSERT (is_pow2 (lb0->lb_n_buckets));
332 ASSERT (lb1->lb_n_buckets > 0);
333 ASSERT (is_pow2 (lb1->lb_n_buckets));
334
335 /* Use flow hash to compute multipath adjacency. */
336 hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
337 hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
338 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
339 {
340 flow_hash_config0 = lb0->lb_hash_config;
341 hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
342 ip4_compute_flow_hash (ip0, flow_hash_config0);
343 dpo0 =
344 load_balance_get_fwd_bucket (lb0,
345 (hash_c0 &
346 (lb0->lb_n_buckets_minus_1)));
347 }
348 else
349 {
350 dpo0 = load_balance_get_bucket_i (lb0, 0);
351 }
352 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
353 {
354 flow_hash_config1 = lb1->lb_hash_config;
355 hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
356 ip4_compute_flow_hash (ip1, flow_hash_config1);
357 dpo1 =
358 load_balance_get_fwd_bucket (lb1,
359 (hash_c1 &
360 (lb1->lb_n_buckets_minus_1)));
361 }
362 else
363 {
364 dpo1 = load_balance_get_bucket_i (lb1, 0);
365 }
366
367 next[0] = dpo0->dpoi_next_node;
368 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
369 next[1] = dpo1->dpoi_next_node;
370 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
371
372 vlib_increment_combined_counter
373 (cm, thread_index, lb_index0, 1,
374 vlib_buffer_length_in_chain (vm, b[0]));
375 vlib_increment_combined_counter
376 (cm, thread_index, lb_index1, 1,
377 vlib_buffer_length_in_chain (vm, b[1]));
378
379 b += 2;
380 next += 2;
381 n_left -= 2;
382 }
383#endif
384 while (n_left > 0)
385 {
386 ip4_header_t *ip0;
387 const load_balance_t *lb0;
388 ip4_fib_mtrie_t *mtrie0;
389 ip4_fib_mtrie_leaf_t leaf0;
390 ip4_address_t *dst_addr0;
391 u32 lbi0;
392 flow_hash_config_t flow_hash_config0;
393 const dpo_id_t *dpo0;
394 u32 hash_c0;
395
396 ip0 = vlib_buffer_get_current (b[0]);
397 dst_addr0 = &ip0->dst_address;
398 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
399
400 if (!lookup_for_responses_to_locally_received_packets)
401 {
402 mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
403 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
404 }
405
406 if (!lookup_for_responses_to_locally_received_packets)
407 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
408
409 if (!lookup_for_responses_to_locally_received_packets)
410 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
411
412 if (lookup_for_responses_to_locally_received_packets)
413 lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_RX];
414 else
415 {
416 /* Handle default route. */
417 lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
418 }
419
420 ASSERT (lbi0);
421 lb0 = load_balance_get (lbi0);
422
423 ASSERT (lb0->lb_n_buckets > 0);
424 ASSERT (is_pow2 (lb0->lb_n_buckets));
425
426 /* Use flow hash to compute multipath adjacency. */
427 hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
428 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
429 {
430 flow_hash_config0 = lb0->lb_hash_config;
431
432 hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
433 ip4_compute_flow_hash (ip0, flow_hash_config0);
434 dpo0 =
435 load_balance_get_fwd_bucket (lb0,
436 (hash_c0 &
437 (lb0->lb_n_buckets_minus_1)));
438 }
439 else
440 {
441 dpo0 = load_balance_get_bucket_i (lb0, 0);
442 }
443
444 next[0] = dpo0->dpoi_next_node;
445 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
446
447 vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
448 vlib_buffer_length_in_chain (vm,
449 b[0]));
450
451 b += 1;
452 next += 1;
453 n_left -= 1;
454 }
455
456 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
Vijayabhaskar Katamreddyacbde662018-01-23 13:39:40 -0800457
458 if (node->flags & VLIB_NODE_FLAG_TRACE)
459 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
460
461 return frame->n_vectors;
462}
463
464#endif /* __included_ip4_forward_h__ */
465
466/*
467 * fd.io coding-style-patch-verification: ON
468 *
469 * Local Variables:
470 * eval: (c-set-style "gnu")
471 * End:
472 */