blob: cadf1cbe137f50741a6a25a8b93def67e4a3be52 [file] [log] [blame]
Neale Ranns8f5fef22020-12-21 08:29:34 +00001/*
2 *------------------------------------------------------------------
3 * ip_path_mtu.c
4 *
5 * Copyright (c) 2020 Graphiant.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <vnet/ip/ip_path_mtu.h>
21#include <vnet/ip/ip_frag.h>
22
23typedef enum
24{
25 IP_PMTU_DROP,
26 IP_PMTU_N_NEXT,
27} ip_pmtu_next_t;
28
29typedef struct ip_pmtu_trace_t_
30{
31 u16 pmtu;
32 u16 packet_size;
33} ip_pmtu_trace_t;
34
35static u8 *
36format_ip_pmtu_trace (u8 *s, va_list *args)
37{
38 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40 ip_pmtu_trace_t *t = va_arg (*args, ip_pmtu_trace_t *);
41
42 s = format (s, "path mtu:%d packet size:%d", t->pmtu, t->packet_size);
43
44 return s;
45}
46
47static inline uword
48ip_pmtu_dpo_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
49 vlib_frame_t *frame, ip_address_family_t af)
50{
51 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
Neale Ranns8f5fef22020-12-21 08:29:34 +000052
53 from = vlib_frame_vector_args (frame);
54 n_left_from = frame->n_vectors;
55 next_index = node->cached_next_index;
56
57 u32 *buffer = 0;
58
59 while (n_left_from > 0)
60 {
61 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
62
63 while (n_left_from > 0 && n_left_to_next > 0)
64 {
65 const ip_pmtu_dpo_t *ipm0;
66 u32 pi0, *frag_from, frag_left;
67 vlib_buffer_t *p0;
68 ip_frag_error_t error0;
69 u16 next0;
70
71 /*
72 * Note: The packet is not enqueued now. It is instead put
73 * in a vector where other fragments will be put as well.
74 */
75 pi0 = from[0];
76 from += 1;
77 n_left_from -= 1;
78
79 p0 = vlib_get_buffer (vm, pi0);
80 ipm0 = ip_pmtu_dpo_get (vnet_buffer (p0)->ip.adj_index[VLIB_TX]);
81 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = ipm0->ipm_dpo.dpoi_index;
82 next0 = ipm0->ipm_dpo.dpoi_next_node;
83
84 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
85 {
86 ip_pmtu_trace_t *t;
87 t = vlib_add_trace (vm, node, p0, sizeof (*t));
88 t->pmtu = ipm0->ipm_pmtu;
89 t->packet_size = vlib_buffer_length_in_chain (vm, p0);
90 }
91
92 if (AF_IP6 == af)
93 error0 =
94 ip6_frag_do_fragment (vm, pi0, ipm0->ipm_pmtu, 0, &buffer);
95 else
96 error0 =
97 ip4_frag_do_fragment (vm, pi0, ipm0->ipm_pmtu, 0, &buffer);
98
99 if (AF_IP4 == af && error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
100 {
101 icmp4_error_set_vnet_buffer (
102 p0, ICMP4_destination_unreachable,
103 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
104 ipm0->ipm_pmtu);
105 next0 = IP_FRAG_NEXT_ICMP_ERROR;
106 }
107 else
108 {
109 next0 =
110 (error0 == IP_FRAG_ERROR_NONE ? next0 : IP_FRAG_NEXT_DROP);
111 }
112
113 if (error0 == IP_FRAG_ERROR_NONE)
114 {
115 /* Free original buffer chain */
Neale Ranns8f5fef22020-12-21 08:29:34 +0000116 vlib_buffer_free_one (vm, pi0); /* Free original packet */
117 }
118 else
119 {
120 vlib_error_count (vm, node->node_index, error0, 1);
121 vec_add1 (buffer, pi0); /* Get rid of the original buffer */
122 }
123
124 /* Send fragments that were added in the frame */
125 frag_from = buffer;
126 frag_left = vec_len (buffer);
127
128 while (frag_left > 0)
129 {
130 while (frag_left > 0 && n_left_to_next > 0)
131 {
132 u32 i;
133 i = to_next[0] = frag_from[0];
134 frag_from += 1;
135 frag_left -= 1;
136 to_next += 1;
137 n_left_to_next -= 1;
138
139 vlib_get_buffer (vm, i)->error = node->errors[error0];
140 vlib_validate_buffer_enqueue_x1 (
141 vm, node, next_index, to_next, n_left_to_next, i, next0);
142 }
143 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
144 vlib_get_next_frame (vm, node, next_index, to_next,
145 n_left_to_next);
146 }
147 vec_reset_length (buffer);
148 }
149 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
150 }
151 vec_free (buffer);
152
153 return frame->n_vectors;
154}
155
156// clang-format off
157
158VLIB_NODE_FN (ip4_ip_pmtu_dpo_node) (vlib_main_t *vm,
159 vlib_node_runtime_t *node,
160 vlib_frame_t *from_frame)
161{
162 return (ip_pmtu_dpo_inline (vm, node, from_frame, 0));
163}
164
165VLIB_NODE_FN (ip6_ip_pmtu_dpo_node) (vlib_main_t *vm,
166 vlib_node_runtime_t *node,
167 vlib_frame_t *from_frame)
168{
169 return (ip_pmtu_dpo_inline (vm, node, from_frame, 1));
170}
171
172VLIB_REGISTER_NODE (ip4_ip_pmtu_dpo_node) = {
173 .name = "ip4-pmtu-dpo",
174 .vector_size = sizeof (u32),
175 .format_trace = format_ip_pmtu_trace,
Benoît Ganne43289952021-09-09 11:58:28 +0200176 .n_errors = IP_FRAG_N_ERROR,
Neale Rannse22a7042022-08-09 03:03:29 +0000177 .error_counters = ip_frag_error_counters,
Neale Ranns8f5fef22020-12-21 08:29:34 +0000178 .n_next_nodes = IP_PMTU_N_NEXT,
179 .next_nodes =
180 {
181 [IP_PMTU_DROP] = "ip4-drop",
182 }
183};
184VLIB_REGISTER_NODE (ip6_ip_pmtu_dpo_node) = {
185 .name = "ip6-pmtu-dpo",
186 .vector_size = sizeof (u32),
187 .format_trace = format_ip_pmtu_trace,
Benoît Ganne43289952021-09-09 11:58:28 +0200188 .n_errors = IP_FRAG_N_ERROR,
Neale Rannse22a7042022-08-09 03:03:29 +0000189 .error_counters = ip_frag_error_counters,
Neale Ranns8f5fef22020-12-21 08:29:34 +0000190 .n_next_nodes = IP_PMTU_N_NEXT,
191 .next_nodes =
192 {
193 [IP_PMTU_DROP] = "ip6-drop",
194 }
195};
196
197// clang-format on
198
199/*
200 * fd.io coding-style-patch-verification: ON
201 *
202 * Local Variables:
203 * eval: (c-set-style "gnu")
204 * End:
205 */