blob: 11b7ad6c8958d4868efb31981f37fe10b36004f2 [file] [log] [blame]
Neale Rannsd91c1db2017-07-31 02:30:50 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef __IP_PUNT_DROP_H__
17#define __IP_PUNT_DROP_H__
18
19#include <vnet/ip/ip.h>
20#include <vnet/policer/policer.h>
21#include <vnet/policer/police_inlines.h>
22
23/**
24 * IP4 punt policer configuration
25 * we police the punt rate to prevent overloading the host
26 */
27typedef struct ip_punt_policer_t_
28{
29 u32 policer_index;
Brian Russellbaebb222021-01-19 16:48:56 +000030 u32 fq_index;
Neale Rannsd91c1db2017-07-31 02:30:50 -070031} ip_punt_policer_t;
32
33typedef enum ip_punt_policer_next_t_
34{
35 IP_PUNT_POLICER_NEXT_DROP,
Brian Russell1b2e16a2021-01-21 14:44:09 +000036 IP_PUNT_POLICER_NEXT_HANDOFF,
Neale Rannsd91c1db2017-07-31 02:30:50 -070037 IP_PUNT_POLICER_N_NEXT,
38} ip_punt_policer_next_t;
39
40typedef struct ip_punt_policer_trace_t_
41{
42 u32 policer_index;
43 u32 next;
44} ip_punt_policer_trace_t;
45
46#define foreach_ip_punt_policer_error \
47_(DROP, "ip punt policer drop")
48
49typedef enum
50{
51#define _(sym,str) IP_PUNT_POLICER_ERROR_##sym,
52 foreach_ip_punt_policer_error
53#undef _
54 IP4_PUNT_POLICER_N_ERROR,
55} ip_punt_policer_error_t;
56
57extern u8 *format_ip_punt_policer_trace (u8 * s, va_list * args);
Brian Russellbaebb222021-01-19 16:48:56 +000058extern vlib_node_registration_t ip4_punt_policer_node;
59extern ip_punt_policer_t ip4_punt_policer_cfg;
60extern vlib_node_registration_t ip6_punt_policer_node;
61extern ip_punt_policer_t ip6_punt_policer_cfg;
Neale Rannsd91c1db2017-07-31 02:30:50 -070062
63/**
64 * IP punt policing node function
65 */
66always_inline uword
67ip_punt_policer (vlib_main_t * vm,
68 vlib_node_runtime_t * node,
69 vlib_frame_t * frame, u8 arc_index, u32 policer_index)
70{
71 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
72 u64 time_in_policer_periods;
73 vnet_feature_main_t *fm = &feature_main;
74 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
75
76 time_in_policer_periods =
77 clib_cpu_time_now () >> POLICER_TICKS_PER_PERIOD_SHIFT;
78
79 from = vlib_frame_vector_args (frame);
80 n_left_from = frame->n_vectors;
81 next_index = node->cached_next_index;
82
83 while (n_left_from > 0)
84 {
85 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
86
87 while (n_left_from >= 4 && n_left_to_next >= 2)
88 {
89 vlib_buffer_t *b0, *b1;
90 u32 next0, next1;
91 u8 act0, act1;
92 u32 bi0, bi1;
93
94 next0 = next1 = 0;
95 bi0 = to_next[0] = from[0];
96 bi1 = to_next[1] = from[1];
97
98 from += 2;
99 n_left_from -= 2;
100 to_next += 2;
101 n_left_to_next -= 2;
102
103 b0 = vlib_get_buffer (vm, bi0);
104 b1 = vlib_get_buffer (vm, bi1);
105
Brian Russellfce88652021-02-18 10:25:23 +0000106 act0 = vnet_policer_police (vm, b0, policer_index,
107 time_in_policer_periods, POLICE_CONFORM,
108 true);
109 act1 = vnet_policer_police (vm, b1, policer_index,
110 time_in_policer_periods, POLICE_CONFORM,
111 true);
112
113 if (PREDICT_FALSE (act0 == QOS_ACTION_HANDOFF))
Neale Rannsd91c1db2017-07-31 02:30:50 -0700114 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000115 next0 = next1 = IP_PUNT_POLICER_NEXT_HANDOFF;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700116 }
Brian Russell1b2e16a2021-01-21 14:44:09 +0000117 else
Neale Rannsd91c1db2017-07-31 02:30:50 -0700118 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000119
120 vnet_get_config_data (&cm->config_main,
121 &b0->current_config_index, &next0, 0);
122 vnet_get_config_data (&cm->config_main,
123 &b1->current_config_index, &next1, 0);
124
Brian Russellc5299ff2021-02-09 10:16:58 +0000125 if (PREDICT_FALSE (act0 == QOS_ACTION_DROP))
Brian Russell1b2e16a2021-01-21 14:44:09 +0000126 {
127 next0 = IP_PUNT_POLICER_NEXT_DROP;
128 b0->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
129 }
Brian Russellc5299ff2021-02-09 10:16:58 +0000130 if (PREDICT_FALSE (act1 == QOS_ACTION_DROP))
Brian Russell1b2e16a2021-01-21 14:44:09 +0000131 {
132 next1 = IP_PUNT_POLICER_NEXT_DROP;
133 b1->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
134 }
135
136 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
137 {
138 ip_punt_policer_trace_t *t =
139 vlib_add_trace (vm, node, b0, sizeof (*t));
140 t->next = next0;
141 t->policer_index = policer_index;
142 }
143 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
144 {
145 ip_punt_policer_trace_t *t =
146 vlib_add_trace (vm, node, b1, sizeof (*t));
147 t->next = next1;
148 t->policer_index = policer_index;
149 }
Neale Rannsd91c1db2017-07-31 02:30:50 -0700150 }
151
Neale Rannsd91c1db2017-07-31 02:30:50 -0700152 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
153 n_left_to_next,
154 bi0, bi1, next0, next1);
155 }
156 while (n_left_from > 0 && n_left_to_next > 0)
157 {
158 vlib_buffer_t *b0;
159 u32 next0;
160 u32 bi0;
161 u8 act0;
162
163 next0 = 0;
164 bi0 = to_next[0] = from[0];
165
166 from += 1;
167 n_left_from -= 1;
168 to_next += 1;
169 n_left_to_next -= 1;
170
171 b0 = vlib_get_buffer (vm, bi0);
172
Brian Russellfce88652021-02-18 10:25:23 +0000173 act0 = vnet_policer_police (vm, b0, policer_index,
174 time_in_policer_periods, POLICE_CONFORM,
175 true);
176 if (PREDICT_FALSE (act0 == QOS_ACTION_HANDOFF))
Neale Rannsd91c1db2017-07-31 02:30:50 -0700177 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000178 next0 = IP_PUNT_POLICER_NEXT_HANDOFF;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700179 }
Brian Russell1b2e16a2021-01-21 14:44:09 +0000180 else
Neale Rannsd91c1db2017-07-31 02:30:50 -0700181 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000182 vnet_get_config_data (&cm->config_main,
183 &b0->current_config_index, &next0, 0);
Neale Rannsd91c1db2017-07-31 02:30:50 -0700184
Brian Russellc5299ff2021-02-09 10:16:58 +0000185 if (PREDICT_FALSE (act0 == QOS_ACTION_DROP))
Brian Russell1b2e16a2021-01-21 14:44:09 +0000186 {
187 next0 = IP_PUNT_POLICER_NEXT_DROP;
188 b0->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
189 }
190
191 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
192 {
193 ip_punt_policer_trace_t *t =
194 vlib_add_trace (vm, node, b0, sizeof (*t));
195 t->next = next0;
196 t->policer_index = policer_index;
197 }
198 }
Neale Rannsd91c1db2017-07-31 02:30:50 -0700199 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
200 n_left_to_next, bi0, next0);
201 }
202 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
203 }
204
205 return frame->n_vectors;
206}
207
208/**
209 * IP4 punt redirect per-rx interface configuration
210 * redirect punted traffic to another location.
211 */
212typedef struct ip_punt_redirect_rx_t_
213{
214 /**
Neale Ranns92207752019-06-03 13:21:40 +0000215 * Node linkage into the FIB graph
Neale Rannsd91c1db2017-07-31 02:30:50 -0700216 */
Neale Ranns92207752019-06-03 13:21:40 +0000217 fib_node_t node;
218
219 fib_protocol_t fproto;
220 fib_forward_chain_type_t payload_type;
221 fib_node_index_t pl;
222 u32 sibling;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700223
224 /**
Neale Ranns92207752019-06-03 13:21:40 +0000225 * redirect forwarding
Neale Rannsd91c1db2017-07-31 02:30:50 -0700226 */
Neale Ranns92207752019-06-03 13:21:40 +0000227 dpo_id_t dpo;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700228} ip_punt_redirect_rx_t;
229
230/**
231 * IP punt redirect configuration
232 */
233typedef struct ip_punt_redirect_t_
234{
Neale Ranns92207752019-06-03 13:21:40 +0000235 ip_punt_redirect_rx_t *pool;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700236
Neale Ranns92207752019-06-03 13:21:40 +0000237 /**
238 * per-RX interface configuration.
239 * sw_if_index = 0 (from which packets are never received) is used to
240 * indicate 'from-any'
241 */
242 index_t *redirect_by_rx_sw_if_index[FIB_PROTOCOL_IP_MAX];
243} ip_punt_redirect_cfg_t;
244
245extern ip_punt_redirect_cfg_t ip_punt_redirect_cfg;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700246
247/**
248 * IP punt redirect next nodes
249 */
250typedef enum ip_punt_redirect_next_t_
251{
252 IP_PUNT_REDIRECT_NEXT_DROP,
253 IP_PUNT_REDIRECT_NEXT_TX,
254 IP_PUNT_REDIRECT_NEXT_ARP,
255 IP_PUNT_REDIRECT_N_NEXT,
256} ip_punt_redirect_next_t;
257
258/**
259 * IP Punt redirect trace
260 */
261typedef struct ip4_punt_redirect_trace_t_
262{
Neale Ranns92207752019-06-03 13:21:40 +0000263 index_t rrxi;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700264 u32 next;
265} ip_punt_redirect_trace_t;
266
267/**
268 * Add a punt redirect entry
269 */
Nathan Skrzypczak2a1783f2021-08-10 15:05:29 +0200270extern void ip_punt_redirect_add (fib_protocol_t fproto, u32 rx_sw_if_index,
Neale Ranns92207752019-06-03 13:21:40 +0000271 fib_forward_chain_type_t ct,
Nathan Skrzypczak2a1783f2021-08-10 15:05:29 +0200272 const fib_route_path_t *rpaths);
Neale Ranns92207752019-06-03 13:21:40 +0000273
274extern void ip_punt_redirect_del (fib_protocol_t fproto, u32 rx_sw_if_index);
275extern index_t ip_punt_redirect_find (fib_protocol_t fproto,
276 u32 rx_sw_if_index);
Neale Rannsd91c1db2017-07-31 02:30:50 -0700277extern u8 *format_ip_punt_redirect (u8 * s, va_list * args);
278
279extern u8 *format_ip_punt_redirect_trace (u8 * s, va_list * args);
280
Neale Ranns92207752019-06-03 13:21:40 +0000281typedef walk_rc_t (*ip_punt_redirect_walk_cb_t) (u32 rx_sw_if_index,
282 const ip_punt_redirect_rx_t *
283 redirect, void *arg);
284extern void ip_punt_redirect_walk (fib_protocol_t fproto,
285 ip_punt_redirect_walk_cb_t cb, void *ctx);
Pavel Kotucek609e1212018-11-27 09:59:44 +0100286
Neale Ranns92207752019-06-03 13:21:40 +0000287static_always_inline ip_punt_redirect_rx_t *
288ip_punt_redirect_get (index_t rrxi)
Neale Rannsd91c1db2017-07-31 02:30:50 -0700289{
Neale Ranns92207752019-06-03 13:21:40 +0000290 return (pool_elt_at_index (ip_punt_redirect_cfg.pool, rrxi));
Neale Rannsd91c1db2017-07-31 02:30:50 -0700291}
292
293always_inline uword
294ip_punt_redirect (vlib_main_t * vm,
295 vlib_node_runtime_t * node,
Neale Ranns92207752019-06-03 13:21:40 +0000296 vlib_frame_t * frame, u8 arc_index, fib_protocol_t fproto)
Neale Rannsd91c1db2017-07-31 02:30:50 -0700297{
298 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
299 vnet_feature_main_t *fm = &feature_main;
300 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
Neale Ranns92207752019-06-03 13:21:40 +0000301 index_t *redirects;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700302
303 from = vlib_frame_vector_args (frame);
304 n_left_from = frame->n_vectors;
305 next_index = node->cached_next_index;
Neale Ranns92207752019-06-03 13:21:40 +0000306 redirects = ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto];
Neale Rannsd91c1db2017-07-31 02:30:50 -0700307
308 while (n_left_from > 0)
309 {
310 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
311
312 while (n_left_from > 0 && n_left_to_next > 0)
313 {
Neale Ranns92207752019-06-03 13:21:40 +0000314 u32 rx_sw_if_index0, rrxi0;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700315 ip_punt_redirect_rx_t *rrx0;
316 vlib_buffer_t *b0;
317 u32 next0;
318 u32 bi0;
319
Neale Ranns92207752019-06-03 13:21:40 +0000320 rrxi0 = INDEX_INVALID;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700321 next0 = 0;
322 bi0 = to_next[0] = from[0];
323
324 from += 1;
325 n_left_from -= 1;
326 to_next += 1;
327 n_left_to_next -= 1;
328
329 b0 = vlib_get_buffer (vm, bi0);
330
331 vnet_get_config_data (&cm->config_main,
332 &b0->current_config_index, &next0, 0);
333
334 rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
335
Neale Ranns92207752019-06-03 13:21:40 +0000336 /*
337 * If config exists for this particular RX interface use it,
338 * else use the default (at RX = 0)
339 */
340 if (vec_len (redirects) > rx_sw_if_index0)
Neale Rannsd91c1db2017-07-31 02:30:50 -0700341 {
Neale Ranns92207752019-06-03 13:21:40 +0000342 rrxi0 = redirects[rx_sw_if_index0];
343 if (INDEX_INVALID == rrxi0)
344 rrxi0 = redirects[0];
Neale Rannsd91c1db2017-07-31 02:30:50 -0700345 }
Neale Ranns92207752019-06-03 13:21:40 +0000346 else if (vec_len (redirects) >= 1)
347 rrxi0 = redirects[0];
348
349 if (PREDICT_TRUE (INDEX_INVALID != rrxi0))
Neale Rannsd91c1db2017-07-31 02:30:50 -0700350 {
Aloys Augustin7b2917f2021-04-13 21:14:55 +0200351 /* prevent ttl decrement on forward */
352 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
Neale Ranns92207752019-06-03 13:21:40 +0000353 rrx0 = ip_punt_redirect_get (rrxi0);
354 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = rrx0->dpo.dpoi_index;
355 next0 = rrx0->dpo.dpoi_next_node;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700356 }
357
358 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
359 {
360 ip_punt_redirect_trace_t *t =
361 vlib_add_trace (vm, node, b0, sizeof (*t));
362 t->next = next0;
Neale Ranns92207752019-06-03 13:21:40 +0000363 t->rrxi = rrxi0;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700364 }
365
366 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
367 n_left_to_next, bi0, next0);
368 }
369
370 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
371 }
372
373 return frame->n_vectors;
374}
375
376always_inline uword
377ip_drop_or_punt (vlib_main_t * vm,
378 vlib_node_runtime_t * node,
379 vlib_frame_t * frame, u8 arc_index)
380{
381 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
382
383 from = vlib_frame_vector_args (frame);
384 n_left_from = frame->n_vectors;
385 next_index = node->cached_next_index;
386
387 while (n_left_from > 0)
388 {
389 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
390
391 while (n_left_from >= 8 && n_left_to_next >= 4)
392 {
393 vlib_buffer_t *b0, *b1, *b2, *b3;
394 u32 next0, next1, next2, next3;
395 u32 bi0, bi1, bi2, bi3;
396
397 next0 = next1 = next2 = next3 = 0;
398
399 /* Prefetch next iteration. */
400 {
401 vlib_buffer_t *p4, *p5, *p6, *p7;
402
403 p4 = vlib_get_buffer (vm, from[4]);
404 p5 = vlib_get_buffer (vm, from[5]);
405 p6 = vlib_get_buffer (vm, from[6]);
406 p7 = vlib_get_buffer (vm, from[7]);
407
408 vlib_prefetch_buffer_header (p4, LOAD);
409 vlib_prefetch_buffer_header (p5, LOAD);
410 vlib_prefetch_buffer_header (p6, LOAD);
411 vlib_prefetch_buffer_header (p7, LOAD);
412 }
413
414 bi0 = to_next[0] = from[0];
415 bi1 = to_next[1] = from[1];
416 bi2 = to_next[2] = from[2];
417 bi3 = to_next[3] = from[3];
418
419 from += 4;
420 n_left_from -= 4;
421 to_next += 4;
422 n_left_to_next -= 4;
423
424 b0 = vlib_get_buffer (vm, bi0);
425 b1 = vlib_get_buffer (vm, bi1);
426 b2 = vlib_get_buffer (vm, bi2);
427 b3 = vlib_get_buffer (vm, bi3);
428
429 /* punt and drop features are not associated with a given interface
430 * so the special index 0 is used */
431 vnet_feature_arc_start (arc_index, 0, &next0, b0);
432 vnet_feature_arc_start (arc_index, 0, &next1, b1);
433 vnet_feature_arc_start (arc_index, 0, &next2, b2);
434 vnet_feature_arc_start (arc_index, 0, &next3, b3);
435
436 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
437 to_next, n_left_to_next,
438 bi0, bi1, bi2, bi3,
439 next0, next1, next2, next3);
440 }
441
442 while (n_left_from > 0 && n_left_to_next > 0)
443 {
444 vlib_buffer_t *b0;
445 u32 next0;
446 u32 bi0;
447
448 next0 = 0;
449 bi0 = to_next[0] = from[0];
450
451 from += 1;
452 n_left_from -= 1;
453 to_next += 1;
454 n_left_to_next -= 1;
455
456 b0 = vlib_get_buffer (vm, bi0);
457
458 vnet_feature_arc_start (arc_index, 0, &next0, b0);
459
460 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
461 n_left_to_next, bi0, next0);
462 }
463 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
464 }
465
466 return frame->n_vectors;
467}
468
469#endif
470
471/*
472 * fd.io coding-style-patch-verification: ON
473 *
474 * Local Variables:
475 * eval: (c-set-style "gnu")
476 * End:
477 */