blob: ccf88e593cffcfff8b370964dea506319151437e [file] [log] [blame]
Neale Rannsd91c1db2017-07-31 02:30:50 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef __IP_PUNT_DROP_H__
17#define __IP_PUNT_DROP_H__
18
19#include <vnet/ip/ip.h>
20#include <vnet/policer/policer.h>
21#include <vnet/policer/police_inlines.h>
22
23/**
24 * IP4 punt policer configuration
25 * we police the punt rate to prevent overloading the host
26 */
27typedef struct ip_punt_policer_t_
28{
29 u32 policer_index;
30} ip_punt_policer_t;
31
32typedef enum ip_punt_policer_next_t_
33{
34 IP_PUNT_POLICER_NEXT_DROP,
35 IP_PUNT_POLICER_N_NEXT,
36} ip_punt_policer_next_t;
37
38typedef struct ip_punt_policer_trace_t_
39{
40 u32 policer_index;
41 u32 next;
42} ip_punt_policer_trace_t;
43
44#define foreach_ip_punt_policer_error \
45_(DROP, "ip punt policer drop")
46
47typedef enum
48{
49#define _(sym,str) IP_PUNT_POLICER_ERROR_##sym,
50 foreach_ip_punt_policer_error
51#undef _
52 IP4_PUNT_POLICER_N_ERROR,
53} ip_punt_policer_error_t;
54
55extern u8 *format_ip_punt_policer_trace (u8 * s, va_list * args);
56
57/**
58 * IP punt policing node function
59 */
60always_inline uword
61ip_punt_policer (vlib_main_t * vm,
62 vlib_node_runtime_t * node,
63 vlib_frame_t * frame, u8 arc_index, u32 policer_index)
64{
65 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
66 u64 time_in_policer_periods;
67 vnet_feature_main_t *fm = &feature_main;
68 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
69
70 time_in_policer_periods =
71 clib_cpu_time_now () >> POLICER_TICKS_PER_PERIOD_SHIFT;
72
73 from = vlib_frame_vector_args (frame);
74 n_left_from = frame->n_vectors;
75 next_index = node->cached_next_index;
76
77 while (n_left_from > 0)
78 {
79 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
80
81 while (n_left_from >= 4 && n_left_to_next >= 2)
82 {
83 vlib_buffer_t *b0, *b1;
84 u32 next0, next1;
85 u8 act0, act1;
86 u32 bi0, bi1;
87
88 next0 = next1 = 0;
89 bi0 = to_next[0] = from[0];
90 bi1 = to_next[1] = from[1];
91
92 from += 2;
93 n_left_from -= 2;
94 to_next += 2;
95 n_left_to_next -= 2;
96
97 b0 = vlib_get_buffer (vm, bi0);
98 b1 = vlib_get_buffer (vm, bi1);
99
100 vnet_get_config_data (&cm->config_main,
101 &b0->current_config_index, &next0, 0);
102 vnet_get_config_data (&cm->config_main,
103 &b1->current_config_index, &next1, 0);
104
105 act0 = vnet_policer_police (vm, b0,
106 policer_index,
107 time_in_policer_periods,
108 POLICE_CONFORM);
109 act1 = vnet_policer_police (vm, b1,
110 policer_index,
111 time_in_policer_periods,
112 POLICE_CONFORM);
113
114 if (PREDICT_FALSE (act0 == SSE2_QOS_ACTION_DROP))
115 {
116 next0 = IP_PUNT_POLICER_NEXT_DROP;
117 b0->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
118 }
119 if (PREDICT_FALSE (act1 == SSE2_QOS_ACTION_DROP))
120 {
121 next1 = IP_PUNT_POLICER_NEXT_DROP;
122 b1->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
123 }
124
125 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
126 {
127 ip_punt_policer_trace_t *t =
128 vlib_add_trace (vm, node, b0, sizeof (*t));
129 t->next = next0;
130 t->policer_index = policer_index;
131 }
132 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
133 {
134 ip_punt_policer_trace_t *t =
135 vlib_add_trace (vm, node, b1, sizeof (*t));
136 t->next = next1;
137 t->policer_index = policer_index;
138 }
139 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
140 n_left_to_next,
141 bi0, bi1, next0, next1);
142 }
143 while (n_left_from > 0 && n_left_to_next > 0)
144 {
145 vlib_buffer_t *b0;
146 u32 next0;
147 u32 bi0;
148 u8 act0;
149
150 next0 = 0;
151 bi0 = to_next[0] = from[0];
152
153 from += 1;
154 n_left_from -= 1;
155 to_next += 1;
156 n_left_to_next -= 1;
157
158 b0 = vlib_get_buffer (vm, bi0);
159
160 vnet_get_config_data (&cm->config_main,
161 &b0->current_config_index, &next0, 0);
162
163 act0 = vnet_policer_police (vm, b0,
164 policer_index,
165 time_in_policer_periods,
166 POLICE_CONFORM);
167 if (PREDICT_FALSE (act0 == SSE2_QOS_ACTION_DROP))
168 {
169 next0 = IP_PUNT_POLICER_NEXT_DROP;
170 b0->error = node->errors[IP_PUNT_POLICER_ERROR_DROP];
171 }
172
173 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
174 {
175 ip_punt_policer_trace_t *t =
176 vlib_add_trace (vm, node, b0, sizeof (*t));
177 t->next = next0;
178 t->policer_index = policer_index;
179 }
180
181 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
182 n_left_to_next, bi0, next0);
183 }
184 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
185 }
186
187 return frame->n_vectors;
188}
189
190/**
191 * IP4 punt redirect per-rx interface configuration
192 * redirect punted traffic to another location.
193 */
194typedef struct ip_punt_redirect_rx_t_
195{
196 /**
197 * The next-hop to send redirected packets to
198 */
199 ip46_address_t nh;
200
201 /**
202 * the TX interface to send redirected packets
203 */
204 u32 tx_sw_if_index;
205
206 /**
207 * redirect forwarding adjacency
208 */
209 adj_index_t adj_index;
210} ip_punt_redirect_rx_t;
211
212/**
213 * IP punt redirect configuration
214 */
215typedef struct ip_punt_redirect_t_
216{
217 /**
218 * any RX interface redirect
219 */
220 ip_punt_redirect_rx_t any_rx_sw_if_index;
221
222 /**
223 * per-RX interface configuration
224 */
225 ip_punt_redirect_rx_t *redirect_by_rx_sw_if_index;
226} ip_punt_redirect_t;
227
228/**
229 * IP punt redirect next nodes
230 */
231typedef enum ip_punt_redirect_next_t_
232{
233 IP_PUNT_REDIRECT_NEXT_DROP,
234 IP_PUNT_REDIRECT_NEXT_TX,
235 IP_PUNT_REDIRECT_NEXT_ARP,
236 IP_PUNT_REDIRECT_N_NEXT,
237} ip_punt_redirect_next_t;
238
239/**
240 * IP Punt redirect trace
241 */
242typedef struct ip4_punt_redirect_trace_t_
243{
244 ip_punt_redirect_rx_t redirect;
245 u32 next;
246} ip_punt_redirect_trace_t;
247
Pavel Kotucek609e1212018-11-27 09:59:44 +0100248typedef struct ip_punt_redirect_detail_t_
249{
250 /**
251 * the RX interface
252 */
253 u32 rx_sw_if_index;
254 /**
255 * IP punt redirect configuration
256 */
257 ip_punt_redirect_rx_t punt_redirect;
258} ip_punt_redirect_detail_t;
259
Neale Rannsd91c1db2017-07-31 02:30:50 -0700260/**
261 * Add a punt redirect entry
262 */
263extern void ip_punt_redirect_add (ip_punt_redirect_t * cfg,
264 u32 rx_sw_if_index,
265 ip_punt_redirect_rx_t * redirect,
266 fib_protocol_t fproto, vnet_link_t linkt);
267extern void ip_punt_redirect_del (ip_punt_redirect_t * cfg,
268 u32 rx_sw_if_index);
269extern u8 *format_ip_punt_redirect (u8 * s, va_list * args);
270
271extern u8 *format_ip_punt_redirect_trace (u8 * s, va_list * args);
272
Pavel Kotucek609e1212018-11-27 09:59:44 +0100273extern ip_punt_redirect_detail_t *ip4_punt_redirect_entries (u32 sw_if_index);
274extern ip_punt_redirect_detail_t *ip6_punt_redirect_entries (u32 sw_if_index);
275
Neale Rannsd91c1db2017-07-31 02:30:50 -0700276always_inline u32
277ip_punt_redirect_tx_via_adj (vlib_buffer_t * b0, adj_index_t ai)
278{
279 ip_adjacency_t *adj = adj_get (ai);
280 u32 next0;
281
282 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
283
284 switch (adj->lookup_next_index)
285 {
286 case IP_LOOKUP_NEXT_ARP:
287 next0 = IP_PUNT_REDIRECT_NEXT_ARP;
288 break;
289 case IP_LOOKUP_NEXT_REWRITE:
290 next0 = IP_PUNT_REDIRECT_NEXT_TX;
291 break;
292 default:
293 next0 = IP_PUNT_REDIRECT_NEXT_DROP;
294 break;
295 }
296
297 return (next0);
298}
299
300always_inline uword
301ip_punt_redirect (vlib_main_t * vm,
302 vlib_node_runtime_t * node,
303 vlib_frame_t * frame,
304 u8 arc_index, ip_punt_redirect_t * redirect)
305{
306 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
307 vnet_feature_main_t *fm = &feature_main;
308 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
309
310 from = vlib_frame_vector_args (frame);
311 n_left_from = frame->n_vectors;
312 next_index = node->cached_next_index;
313
314 while (n_left_from > 0)
315 {
316 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
317
318 while (n_left_from > 0 && n_left_to_next > 0)
319 {
320 u32 rx_sw_if_index0;
321 ip_punt_redirect_rx_t *rrx0;
322 vlib_buffer_t *b0;
323 u32 next0;
324 u32 bi0;
325
326 rrx0 = NULL;
327 next0 = 0;
328 bi0 = to_next[0] = from[0];
329
330 from += 1;
331 n_left_from -= 1;
332 to_next += 1;
333 n_left_to_next -= 1;
334
335 b0 = vlib_get_buffer (vm, bi0);
336
337 vnet_get_config_data (&cm->config_main,
338 &b0->current_config_index, &next0, 0);
339
340 rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
341
342 if (vec_len (redirect->redirect_by_rx_sw_if_index) >
343 rx_sw_if_index0)
344 {
345 rrx0 = &redirect->redirect_by_rx_sw_if_index[rx_sw_if_index0];
346 if (~0 != rrx0->tx_sw_if_index)
347 {
348 next0 = ip_punt_redirect_tx_via_adj (b0, rrx0->adj_index);
349 }
350 else if (~0 != redirect->any_rx_sw_if_index.tx_sw_if_index)
351 {
352 rrx0 = &redirect->any_rx_sw_if_index;
353 next0 = ip_punt_redirect_tx_via_adj (b0, rrx0->adj_index);
354 }
355 }
356 else if (~0 != redirect->any_rx_sw_if_index.tx_sw_if_index)
357 {
358 rrx0 = &redirect->any_rx_sw_if_index;
359 next0 = ip_punt_redirect_tx_via_adj (b0, rrx0->adj_index);
360 }
361
362 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
363 {
364 ip_punt_redirect_trace_t *t =
365 vlib_add_trace (vm, node, b0, sizeof (*t));
366 t->next = next0;
367 if (rrx0)
368 t->redirect = *rrx0;
369 }
370
371 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
372 n_left_to_next, bi0, next0);
373 }
374
375 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
376 }
377
378 return frame->n_vectors;
379}
380
381always_inline uword
382ip_drop_or_punt (vlib_main_t * vm,
383 vlib_node_runtime_t * node,
384 vlib_frame_t * frame, u8 arc_index)
385{
386 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
387
388 from = vlib_frame_vector_args (frame);
389 n_left_from = frame->n_vectors;
390 next_index = node->cached_next_index;
391
392 while (n_left_from > 0)
393 {
394 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
395
396 while (n_left_from >= 8 && n_left_to_next >= 4)
397 {
398 vlib_buffer_t *b0, *b1, *b2, *b3;
399 u32 next0, next1, next2, next3;
400 u32 bi0, bi1, bi2, bi3;
401
402 next0 = next1 = next2 = next3 = 0;
403
404 /* Prefetch next iteration. */
405 {
406 vlib_buffer_t *p4, *p5, *p6, *p7;
407
408 p4 = vlib_get_buffer (vm, from[4]);
409 p5 = vlib_get_buffer (vm, from[5]);
410 p6 = vlib_get_buffer (vm, from[6]);
411 p7 = vlib_get_buffer (vm, from[7]);
412
413 vlib_prefetch_buffer_header (p4, LOAD);
414 vlib_prefetch_buffer_header (p5, LOAD);
415 vlib_prefetch_buffer_header (p6, LOAD);
416 vlib_prefetch_buffer_header (p7, LOAD);
417 }
418
419 bi0 = to_next[0] = from[0];
420 bi1 = to_next[1] = from[1];
421 bi2 = to_next[2] = from[2];
422 bi3 = to_next[3] = from[3];
423
424 from += 4;
425 n_left_from -= 4;
426 to_next += 4;
427 n_left_to_next -= 4;
428
429 b0 = vlib_get_buffer (vm, bi0);
430 b1 = vlib_get_buffer (vm, bi1);
431 b2 = vlib_get_buffer (vm, bi2);
432 b3 = vlib_get_buffer (vm, bi3);
433
434 /* punt and drop features are not associated with a given interface
435 * so the special index 0 is used */
436 vnet_feature_arc_start (arc_index, 0, &next0, b0);
437 vnet_feature_arc_start (arc_index, 0, &next1, b1);
438 vnet_feature_arc_start (arc_index, 0, &next2, b2);
439 vnet_feature_arc_start (arc_index, 0, &next3, b3);
440
441 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
442 to_next, n_left_to_next,
443 bi0, bi1, bi2, bi3,
444 next0, next1, next2, next3);
445 }
446
447 while (n_left_from > 0 && n_left_to_next > 0)
448 {
449 vlib_buffer_t *b0;
450 u32 next0;
451 u32 bi0;
452
453 next0 = 0;
454 bi0 = to_next[0] = from[0];
455
456 from += 1;
457 n_left_from -= 1;
458 to_next += 1;
459 n_left_to_next -= 1;
460
461 b0 = vlib_get_buffer (vm, bi0);
462
463 vnet_feature_arc_start (arc_index, 0, &next0, b0);
464
465 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
466 n_left_to_next, bi0, next0);
467 }
468 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
469 }
470
471 return frame->n_vectors;
472}
473
474#endif
475
476/*
477 * fd.io coding-style-patch-verification: ON
478 *
479 * Local Variables:
480 * eval: (c-set-style "gnu")
481 * End:
482 */