blob: 16d46ba190ead2571de34a59e6a007d68ca5ff17 [file] [log] [blame]
Neale Rannsd91c1db2017-07-31 02:30:50 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef __IP_PUNT_DROP_H__
17#define __IP_PUNT_DROP_H__
18
19#include <vnet/ip/ip.h>
20#include <vnet/policer/policer.h>
21#include <vnet/policer/police_inlines.h>
22
23/**
24 * IP4 punt policer configuration
25 * we police the punt rate to prevent overloading the host
26 */
27typedef struct ip_punt_policer_t_
28{
29 u32 policer_index;
Brian Russellbaebb222021-01-19 16:48:56 +000030 u32 fq_index;
Neale Rannsd91c1db2017-07-31 02:30:50 -070031} ip_punt_policer_t;
32
33typedef enum ip_punt_policer_next_t_
34{
35 IP_PUNT_POLICER_NEXT_DROP,
Brian Russell1b2e16a2021-01-21 14:44:09 +000036 IP_PUNT_POLICER_NEXT_HANDOFF,
Neale Rannsd91c1db2017-07-31 02:30:50 -070037 IP_PUNT_POLICER_N_NEXT,
38} ip_punt_policer_next_t;
39
40typedef struct ip_punt_policer_trace_t_
41{
42 u32 policer_index;
43 u32 next;
44} ip_punt_policer_trace_t;
45
46#define foreach_ip_punt_policer_error \
47_(DROP, "ip punt policer drop")
48
49typedef enum
50{
51#define _(sym,str) IP_PUNT_POLICER_ERROR_##sym,
52 foreach_ip_punt_policer_error
53#undef _
54 IP4_PUNT_POLICER_N_ERROR,
55} ip_punt_policer_error_t;
56
57extern u8 *format_ip_punt_policer_trace (u8 * s, va_list * args);
Brian Russellbaebb222021-01-19 16:48:56 +000058extern vlib_node_registration_t ip4_punt_policer_node;
59extern ip_punt_policer_t ip4_punt_policer_cfg;
60extern vlib_node_registration_t ip6_punt_policer_node;
61extern ip_punt_policer_t ip6_punt_policer_cfg;
Neale Rannsd91c1db2017-07-31 02:30:50 -070062
63/**
64 * IP punt policing node function
65 */
66always_inline uword
67ip_punt_policer (vlib_main_t * vm,
68 vlib_node_runtime_t * node,
69 vlib_frame_t * frame, u8 arc_index, u32 policer_index)
70{
71 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
72 u64 time_in_policer_periods;
73 vnet_feature_main_t *fm = &feature_main;
74 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
75
76 time_in_policer_periods =
77 clib_cpu_time_now () >> POLICER_TICKS_PER_PERIOD_SHIFT;
78
79 from = vlib_frame_vector_args (frame);
80 n_left_from = frame->n_vectors;
81 next_index = node->cached_next_index;
82
83 while (n_left_from > 0)
84 {
85 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
86
87 while (n_left_from >= 4 && n_left_to_next >= 2)
88 {
89 vlib_buffer_t *b0, *b1;
90 u32 next0, next1;
91 u8 act0, act1;
92 u32 bi0, bi1;
93
94 next0 = next1 = 0;
95 bi0 = to_next[0] = from[0];
96 bi1 = to_next[1] = from[1];
97
98 from += 2;
99 n_left_from -= 2;
100 to_next += 2;
101 n_left_to_next -= 2;
102
103 b0 = vlib_get_buffer (vm, bi0);
104 b1 = vlib_get_buffer (vm, bi1);
105
Brian Russellfce88652021-02-18 10:25:23 +0000106 act0 = vnet_policer_police (vm, b0, policer_index,
107 time_in_policer_periods, POLICE_CONFORM,
108 true);
109 act1 = vnet_policer_police (vm, b1, policer_index,
110 time_in_policer_periods, POLICE_CONFORM,
111 true);
112
113 if (PREDICT_FALSE (act0 == QOS_ACTION_HANDOFF))
Neale Rannsd91c1db2017-07-31 02:30:50 -0700114 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000115 next0 = next1 = IP_PUNT_POLICER_NEXT_HANDOFF;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700116 }
Brian Russell1b2e16a2021-01-21 14:44:09 +0000117 else
Neale Rannsd91c1db2017-07-31 02:30:50 -0700118 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000119
120 vnet_get_config_data (&cm->config_main,
121 &b0->current_config_index, &next0, 0);
122 vnet_get_config_data (&cm->config_main,
123 &b1->current_config_index, &next1, 0);
124
Brian Russellc5299ff2021-02-09 10:16:58 +0000125 if (PREDICT_FALSE (act0 == QOS_ACTION_DROP))
Brian Russell1b2e16a2021-01-21 14:44:09 +0000126 {
127 next0 = IP_PUNT_POLICER_NEXT_DROP;
128 b0->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
129 }
Brian Russellc5299ff2021-02-09 10:16:58 +0000130 if (PREDICT_FALSE (act1 == QOS_ACTION_DROP))
Brian Russell1b2e16a2021-01-21 14:44:09 +0000131 {
132 next1 = IP_PUNT_POLICER_NEXT_DROP;
133 b1->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
134 }
135
136 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
137 {
138 ip_punt_policer_trace_t *t =
139 vlib_add_trace (vm, node, b0, sizeof (*t));
140 t->next = next0;
141 t->policer_index = policer_index;
142 }
143 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
144 {
145 ip_punt_policer_trace_t *t =
146 vlib_add_trace (vm, node, b1, sizeof (*t));
147 t->next = next1;
148 t->policer_index = policer_index;
149 }
Neale Rannsd91c1db2017-07-31 02:30:50 -0700150 }
151
Neale Rannsd91c1db2017-07-31 02:30:50 -0700152 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
153 n_left_to_next,
154 bi0, bi1, next0, next1);
155 }
156 while (n_left_from > 0 && n_left_to_next > 0)
157 {
158 vlib_buffer_t *b0;
159 u32 next0;
160 u32 bi0;
161 u8 act0;
162
163 next0 = 0;
164 bi0 = to_next[0] = from[0];
165
166 from += 1;
167 n_left_from -= 1;
168 to_next += 1;
169 n_left_to_next -= 1;
170
171 b0 = vlib_get_buffer (vm, bi0);
172
Brian Russellfce88652021-02-18 10:25:23 +0000173 act0 = vnet_policer_police (vm, b0, policer_index,
174 time_in_policer_periods, POLICE_CONFORM,
175 true);
176 if (PREDICT_FALSE (act0 == QOS_ACTION_HANDOFF))
Neale Rannsd91c1db2017-07-31 02:30:50 -0700177 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000178 next0 = IP_PUNT_POLICER_NEXT_HANDOFF;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700179 }
Brian Russell1b2e16a2021-01-21 14:44:09 +0000180 else
Neale Rannsd91c1db2017-07-31 02:30:50 -0700181 {
Brian Russell1b2e16a2021-01-21 14:44:09 +0000182 vnet_get_config_data (&cm->config_main,
183 &b0->current_config_index, &next0, 0);
Neale Rannsd91c1db2017-07-31 02:30:50 -0700184
Brian Russellc5299ff2021-02-09 10:16:58 +0000185 if (PREDICT_FALSE (act0 == QOS_ACTION_DROP))
Brian Russell1b2e16a2021-01-21 14:44:09 +0000186 {
187 next0 = IP_PUNT_POLICER_NEXT_DROP;
188 b0->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
189 }
190
191 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
192 {
193 ip_punt_policer_trace_t *t =
194 vlib_add_trace (vm, node, b0, sizeof (*t));
195 t->next = next0;
196 t->policer_index = policer_index;
197 }
198 }
Neale Rannsd91c1db2017-07-31 02:30:50 -0700199 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
200 n_left_to_next, bi0, next0);
201 }
202 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
203 }
204
205 return frame->n_vectors;
206}
207
208/**
209 * IP4 punt redirect per-rx interface configuration
210 * redirect punted traffic to another location.
211 */
212typedef struct ip_punt_redirect_rx_t_
213{
214 /**
Neale Ranns92207752019-06-03 13:21:40 +0000215 * Node linkage into the FIB graph
Neale Rannsd91c1db2017-07-31 02:30:50 -0700216 */
Neale Ranns92207752019-06-03 13:21:40 +0000217 fib_node_t node;
218
219 fib_protocol_t fproto;
220 fib_forward_chain_type_t payload_type;
221 fib_node_index_t pl;
222 u32 sibling;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700223
224 /**
Neale Ranns92207752019-06-03 13:21:40 +0000225 * redirect forwarding
Neale Rannsd91c1db2017-07-31 02:30:50 -0700226 */
Neale Ranns92207752019-06-03 13:21:40 +0000227 dpo_id_t dpo;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700228} ip_punt_redirect_rx_t;
229
230/**
231 * IP punt redirect configuration
232 */
233typedef struct ip_punt_redirect_t_
234{
Neale Ranns92207752019-06-03 13:21:40 +0000235 ip_punt_redirect_rx_t *pool;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700236
Neale Ranns92207752019-06-03 13:21:40 +0000237 /**
238 * per-RX interface configuration.
239 * sw_if_index = 0 (from which packets are never received) is used to
240 * indicate 'from-any'
241 */
242 index_t *redirect_by_rx_sw_if_index[FIB_PROTOCOL_IP_MAX];
243} ip_punt_redirect_cfg_t;
244
245extern ip_punt_redirect_cfg_t ip_punt_redirect_cfg;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700246
247/**
248 * IP punt redirect next nodes
249 */
250typedef enum ip_punt_redirect_next_t_
251{
252 IP_PUNT_REDIRECT_NEXT_DROP,
253 IP_PUNT_REDIRECT_NEXT_TX,
254 IP_PUNT_REDIRECT_NEXT_ARP,
255 IP_PUNT_REDIRECT_N_NEXT,
256} ip_punt_redirect_next_t;
257
258/**
259 * IP Punt redirect trace
260 */
261typedef struct ip4_punt_redirect_trace_t_
262{
Neale Ranns92207752019-06-03 13:21:40 +0000263 index_t rrxi;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700264 u32 next;
265} ip_punt_redirect_trace_t;
266
267/**
268 * Add a punt redirect entry
269 */
Neale Ranns92207752019-06-03 13:21:40 +0000270extern void ip_punt_redirect_add (fib_protocol_t fproto,
Neale Rannsd91c1db2017-07-31 02:30:50 -0700271 u32 rx_sw_if_index,
Neale Ranns92207752019-06-03 13:21:40 +0000272 fib_forward_chain_type_t ct,
273 fib_route_path_t * rpaths);
274
275extern void ip_punt_redirect_del (fib_protocol_t fproto, u32 rx_sw_if_index);
276extern index_t ip_punt_redirect_find (fib_protocol_t fproto,
277 u32 rx_sw_if_index);
Neale Rannsd91c1db2017-07-31 02:30:50 -0700278extern u8 *format_ip_punt_redirect (u8 * s, va_list * args);
279
280extern u8 *format_ip_punt_redirect_trace (u8 * s, va_list * args);
281
Neale Ranns92207752019-06-03 13:21:40 +0000282typedef walk_rc_t (*ip_punt_redirect_walk_cb_t) (u32 rx_sw_if_index,
283 const ip_punt_redirect_rx_t *
284 redirect, void *arg);
285extern void ip_punt_redirect_walk (fib_protocol_t fproto,
286 ip_punt_redirect_walk_cb_t cb, void *ctx);
Pavel Kotucek609e1212018-11-27 09:59:44 +0100287
Neale Ranns92207752019-06-03 13:21:40 +0000288static_always_inline ip_punt_redirect_rx_t *
289ip_punt_redirect_get (index_t rrxi)
Neale Rannsd91c1db2017-07-31 02:30:50 -0700290{
Neale Ranns92207752019-06-03 13:21:40 +0000291 return (pool_elt_at_index (ip_punt_redirect_cfg.pool, rrxi));
Neale Rannsd91c1db2017-07-31 02:30:50 -0700292}
293
294always_inline uword
295ip_punt_redirect (vlib_main_t * vm,
296 vlib_node_runtime_t * node,
Neale Ranns92207752019-06-03 13:21:40 +0000297 vlib_frame_t * frame, u8 arc_index, fib_protocol_t fproto)
Neale Rannsd91c1db2017-07-31 02:30:50 -0700298{
299 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
300 vnet_feature_main_t *fm = &feature_main;
301 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
Neale Ranns92207752019-06-03 13:21:40 +0000302 index_t *redirects;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700303
304 from = vlib_frame_vector_args (frame);
305 n_left_from = frame->n_vectors;
306 next_index = node->cached_next_index;
Neale Ranns92207752019-06-03 13:21:40 +0000307 redirects = ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto];
Neale Rannsd91c1db2017-07-31 02:30:50 -0700308
309 while (n_left_from > 0)
310 {
311 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
312
313 while (n_left_from > 0 && n_left_to_next > 0)
314 {
Neale Ranns92207752019-06-03 13:21:40 +0000315 u32 rx_sw_if_index0, rrxi0;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700316 ip_punt_redirect_rx_t *rrx0;
317 vlib_buffer_t *b0;
318 u32 next0;
319 u32 bi0;
320
Neale Ranns92207752019-06-03 13:21:40 +0000321 rrxi0 = INDEX_INVALID;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700322 next0 = 0;
323 bi0 = to_next[0] = from[0];
324
325 from += 1;
326 n_left_from -= 1;
327 to_next += 1;
328 n_left_to_next -= 1;
329
330 b0 = vlib_get_buffer (vm, bi0);
331
332 vnet_get_config_data (&cm->config_main,
333 &b0->current_config_index, &next0, 0);
334
335 rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
336
Neale Ranns92207752019-06-03 13:21:40 +0000337 /*
338 * If config exists for this particular RX interface use it,
339 * else use the default (at RX = 0)
340 */
341 if (vec_len (redirects) > rx_sw_if_index0)
Neale Rannsd91c1db2017-07-31 02:30:50 -0700342 {
Neale Ranns92207752019-06-03 13:21:40 +0000343 rrxi0 = redirects[rx_sw_if_index0];
344 if (INDEX_INVALID == rrxi0)
345 rrxi0 = redirects[0];
Neale Rannsd91c1db2017-07-31 02:30:50 -0700346 }
Neale Ranns92207752019-06-03 13:21:40 +0000347 else if (vec_len (redirects) >= 1)
348 rrxi0 = redirects[0];
349
350 if (PREDICT_TRUE (INDEX_INVALID != rrxi0))
Neale Rannsd91c1db2017-07-31 02:30:50 -0700351 {
Neale Ranns92207752019-06-03 13:21:40 +0000352 rrx0 = ip_punt_redirect_get (rrxi0);
353 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = rrx0->dpo.dpoi_index;
354 next0 = rrx0->dpo.dpoi_next_node;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700355 }
356
357 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
358 {
359 ip_punt_redirect_trace_t *t =
360 vlib_add_trace (vm, node, b0, sizeof (*t));
361 t->next = next0;
Neale Ranns92207752019-06-03 13:21:40 +0000362 t->rrxi = rrxi0;
Neale Rannsd91c1db2017-07-31 02:30:50 -0700363 }
364
365 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
366 n_left_to_next, bi0, next0);
367 }
368
369 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
370 }
371
372 return frame->n_vectors;
373}
374
375always_inline uword
376ip_drop_or_punt (vlib_main_t * vm,
377 vlib_node_runtime_t * node,
378 vlib_frame_t * frame, u8 arc_index)
379{
380 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
381
382 from = vlib_frame_vector_args (frame);
383 n_left_from = frame->n_vectors;
384 next_index = node->cached_next_index;
385
386 while (n_left_from > 0)
387 {
388 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
389
390 while (n_left_from >= 8 && n_left_to_next >= 4)
391 {
392 vlib_buffer_t *b0, *b1, *b2, *b3;
393 u32 next0, next1, next2, next3;
394 u32 bi0, bi1, bi2, bi3;
395
396 next0 = next1 = next2 = next3 = 0;
397
398 /* Prefetch next iteration. */
399 {
400 vlib_buffer_t *p4, *p5, *p6, *p7;
401
402 p4 = vlib_get_buffer (vm, from[4]);
403 p5 = vlib_get_buffer (vm, from[5]);
404 p6 = vlib_get_buffer (vm, from[6]);
405 p7 = vlib_get_buffer (vm, from[7]);
406
407 vlib_prefetch_buffer_header (p4, LOAD);
408 vlib_prefetch_buffer_header (p5, LOAD);
409 vlib_prefetch_buffer_header (p6, LOAD);
410 vlib_prefetch_buffer_header (p7, LOAD);
411 }
412
413 bi0 = to_next[0] = from[0];
414 bi1 = to_next[1] = from[1];
415 bi2 = to_next[2] = from[2];
416 bi3 = to_next[3] = from[3];
417
418 from += 4;
419 n_left_from -= 4;
420 to_next += 4;
421 n_left_to_next -= 4;
422
423 b0 = vlib_get_buffer (vm, bi0);
424 b1 = vlib_get_buffer (vm, bi1);
425 b2 = vlib_get_buffer (vm, bi2);
426 b3 = vlib_get_buffer (vm, bi3);
427
428 /* punt and drop features are not associated with a given interface
429 * so the special index 0 is used */
430 vnet_feature_arc_start (arc_index, 0, &next0, b0);
431 vnet_feature_arc_start (arc_index, 0, &next1, b1);
432 vnet_feature_arc_start (arc_index, 0, &next2, b2);
433 vnet_feature_arc_start (arc_index, 0, &next3, b3);
434
435 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
436 to_next, n_left_to_next,
437 bi0, bi1, bi2, bi3,
438 next0, next1, next2, next3);
439 }
440
441 while (n_left_from > 0 && n_left_to_next > 0)
442 {
443 vlib_buffer_t *b0;
444 u32 next0;
445 u32 bi0;
446
447 next0 = 0;
448 bi0 = to_next[0] = from[0];
449
450 from += 1;
451 n_left_from -= 1;
452 to_next += 1;
453 n_left_to_next -= 1;
454
455 b0 = vlib_get_buffer (vm, bi0);
456
457 vnet_feature_arc_start (arc_index, 0, &next0, b0);
458
459 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
460 n_left_to_next, bi0, next0);
461 }
462 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
463 }
464
465 return frame->n_vectors;
466}
467
468#endif
469
470/*
471 * fd.io coding-style-patch-verification: ON
472 *
473 * Local Variables:
474 * eval: (c-set-style "gnu")
475 * End:
476 */