blob: 21f5485e6c799ca2d39ed5448ead3c7287e2f59e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * replication.c : packet replication
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vppinfra/error.h>
21#include <vnet/ip/ip4_packet.h>
22#include <vnet/replication.h>
23
24
25replication_main_t replication_main;
26
27
28replication_context_t *
29replication_prep (vlib_main_t * vm,
Dave Barachba868bb2016-08-08 09:51:21 -040030 vlib_buffer_t * b0, u32 recycle_node_index, u32 l2_packet)
Ed Warnickecb9cada2015-12-08 15:45:58 -070031{
Dave Barachba868bb2016-08-08 09:51:21 -040032 replication_main_t *rm = &replication_main;
33 replication_context_t *ctx;
Damjan Marion586afd72017-04-05 19:18:20 +020034 uword thread_index = vm->thread_index;
Dave Barachba868bb2016-08-08 09:51:21 -040035 ip4_header_t *ip;
Ed Warnickecb9cada2015-12-08 15:45:58 -070036 u32 ctx_id;
37
Dave Barachba868bb2016-08-08 09:51:21 -040038 /* Allocate a context, reserve context 0 */
Damjan Marion586afd72017-04-05 19:18:20 +020039 if (PREDICT_FALSE (rm->contexts[thread_index] == 0))
40 pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES);
Dave Barachba868bb2016-08-08 09:51:21 -040041
Damjan Marion586afd72017-04-05 19:18:20 +020042 pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES);
43 ctx_id = ctx - rm->contexts[thread_index];
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachba868bb2016-08-08 09:51:21 -040045 /* Save state from vlib buffer */
Damjan Marion072401e2017-07-13 18:53:27 +020046 ctx->saved_free_list_index = vlib_buffer_get_free_list_index (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070047 ctx->current_data = b0->current_data;
Steve Shin49806fe2017-11-13 15:46:35 -080048 ctx->flags = b0->flags & VNET_BUFFER_FLAGS_VLAN_BITS;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Dave Barachba868bb2016-08-08 09:51:21 -040050 /* Set up vlib buffer hooks */
Dave Barachb5adaea2016-06-17 14:09:56 -040051 b0->recycle_count = ctx_id;
Damjan Marion072401e2017-07-13 18:53:27 +020052 vlib_buffer_set_free_list_index (b0, rm->recycle_list_index);
Dave Barachb5adaea2016-06-17 14:09:56 -040053 b0->flags |= VLIB_BUFFER_RECYCLE;
Ed Warnickecb9cada2015-12-08 15:45:58 -070054
Dave Barachba868bb2016-08-08 09:51:21 -040055 /* Save feature state */
Ed Warnickecb9cada2015-12-08 15:45:58 -070056 ctx->recycle_node_index = recycle_node_index;
57
Dave Barachba868bb2016-08-08 09:51:21 -040058 /* Save vnet state */
59 clib_memcpy (ctx->vnet_buffer, vnet_buffer (b0),
60 sizeof (vnet_buffer_opaque_t));
Ed Warnickecb9cada2015-12-08 15:45:58 -070061
Dave Barachba868bb2016-08-08 09:51:21 -040062 /* Save packet contents */
Ed Warnickecb9cada2015-12-08 15:45:58 -070063 ctx->l2_packet = l2_packet;
Dave Barachba868bb2016-08-08 09:51:21 -040064 ip = (ip4_header_t *) vlib_buffer_get_current (b0);
65 if (l2_packet)
66 {
67 /* Save ethernet header */
68 ctx->l2_header[0] = ((u64 *) ip)[0];
69 ctx->l2_header[1] = ((u64 *) ip)[1];
70 ctx->l2_header[2] = ((u64 *) ip)[2];
71 /* set ip to the true ip header */
72 ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
73 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070074
Dave Barachba868bb2016-08-08 09:51:21 -040075 /*
76 * Copy L3 fields.
77 * We need to save TOS for ip4 and ip6 packets.
78 * Fortunately the TOS field is
79 * in the first two bytes of both the ip4 and ip6 headers.
80 */
81 ctx->ip_tos = *((u16 *) (ip));
Ed Warnickecb9cada2015-12-08 15:45:58 -070082
Dave Barachba868bb2016-08-08 09:51:21 -040083 /*
84 * Save the ip4 checksum as well. We just blindly save the corresponding two
85 * bytes even for ip6 packets.
86 */
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 ctx->ip4_checksum = ip->checksum;
88
89 return ctx;
90}
91
92
93replication_context_t *
Dave Barachba868bb2016-08-08 09:51:21 -040094replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last)
Ed Warnickecb9cada2015-12-08 15:45:58 -070095{
Dave Barachba868bb2016-08-08 09:51:21 -040096 replication_main_t *rm = &replication_main;
97 replication_context_t *ctx;
Damjan Marion586afd72017-04-05 19:18:20 +020098 uword thread_index = vm->thread_index;
Dave Barachba868bb2016-08-08 09:51:21 -040099 ip4_header_t *ip;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700100
Dave Barachba868bb2016-08-08 09:51:21 -0400101 /* Get access to the replication context */
Damjan Marion586afd72017-04-05 19:18:20 +0200102 ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700103
Dave Barachba868bb2016-08-08 09:51:21 -0400104 /* Restore vnet buffer state */
105 clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer,
106 sizeof (vnet_buffer_opaque_t));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107
Steve Shin49806fe2017-11-13 15:46:35 -0800108 /* Restore the vlan flags */
109 b0->flags &= ~VNET_BUFFER_FLAGS_VLAN_BITS;
110 b0->flags |= ctx->flags;
111
Dave Barachba868bb2016-08-08 09:51:21 -0400112 /* Restore the packet start (current_data) and length */
113 vlib_buffer_advance (b0, ctx->current_data - b0->current_data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114
Dave Barachba868bb2016-08-08 09:51:21 -0400115 /* Restore packet contents */
116 ip = (ip4_header_t *) vlib_buffer_get_current (b0);
117 if (ctx->l2_packet)
118 {
119 /* Restore ethernet header */
120 ((u64 *) ip)[0] = ctx->l2_header[0];
121 ((u64 *) ip)[1] = ctx->l2_header[1];
122 ((u64 *) ip)[2] = ctx->l2_header[2];
123 /* set ip to the true ip header */
124 ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
125 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126
127 // Restore L3 fields
Dave Barachba868bb2016-08-08 09:51:21 -0400128 *((u16 *) (ip)) = ctx->ip_tos;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129 ip->checksum = ctx->ip4_checksum;
130
Dave Barachba868bb2016-08-08 09:51:21 -0400131 if (is_last)
132 {
133 /*
134 * This is the last replication in the list.
135 * Restore original buffer free functionality.
136 */
Damjan Marion072401e2017-07-13 18:53:27 +0200137 vlib_buffer_set_free_list_index (b0, ctx->saved_free_list_index);
Dave Barachba868bb2016-08-08 09:51:21 -0400138 b0->flags &= ~VLIB_BUFFER_RECYCLE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
Dave Barachba868bb2016-08-08 09:51:21 -0400140 /* Free context back to its pool */
Damjan Marion586afd72017-04-05 19:18:20 +0200141 pool_put (rm->contexts[thread_index], ctx);
Dave Barachba868bb2016-08-08 09:51:21 -0400142 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143
144 return ctx;
145}
146
147
148
149/*
150 * fish pkts back from the recycle queue/freelist
151 * un-flatten the context chains
152 */
Dave Barachba868bb2016-08-08 09:51:21 -0400153static void
154replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
Dave Barachba868bb2016-08-08 09:51:21 -0400156 vlib_frame_t *f = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157 u32 n_left_from;
158 u32 n_left_to_next = 0;
159 u32 n_this_frame = 0;
Dave Barachba868bb2016-08-08 09:51:21 -0400160 u32 *from;
161 u32 *to_next = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162 u32 bi0, pi0;
163 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700164 int i;
Dave Barachba868bb2016-08-08 09:51:21 -0400165 replication_main_t *rm = &replication_main;
166 replication_context_t *ctx;
167 u32 feature_node_index = 0;
Damjan Marion586afd72017-04-05 19:18:20 +0200168 uword thread_index = vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700169
Dave Barachba868bb2016-08-08 09:51:21 -0400170 /*
171 * All buffers in the list are destined to the same recycle node.
172 * Pull the recycle node index from the first buffer.
173 * Note: this could be sped up if the node index were stuffed into
174 * the freelist itself.
175 */
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100176 if (vec_len (fl->buffers) > 0)
Dave Barachba868bb2016-08-08 09:51:21 -0400177 {
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100178 bi0 = fl->buffers[0];
Dave Barachba868bb2016-08-08 09:51:21 -0400179 b0 = vlib_get_buffer (vm, bi0);
Damjan Marion586afd72017-04-05 19:18:20 +0200180 ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count);
Dave Barachba868bb2016-08-08 09:51:21 -0400181 feature_node_index = ctx->recycle_node_index;
182 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100184 /* buffers */
Dave Barachba868bb2016-08-08 09:51:21 -0400185 for (i = 0; i < 2; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186 {
187 if (i == 0)
Dave Barachba868bb2016-08-08 09:51:21 -0400188 {
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100189 from = fl->buffers;
Dave Barachba868bb2016-08-08 09:51:21 -0400190 n_left_from = vec_len (from);
191 }
192
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 while (n_left_from > 0)
Dave Barachba868bb2016-08-08 09:51:21 -0400194 {
195 if (PREDICT_FALSE (n_left_to_next == 0))
196 {
197 if (f)
198 {
199 f->n_vectors = n_this_frame;
200 vlib_put_frame_to_node (vm, feature_node_index, f);
201 }
202
203 f = vlib_get_frame_to_node (vm, feature_node_index);
204 to_next = vlib_frame_vector_args (f);
205 n_left_to_next = VLIB_FRAME_SIZE;
206 n_this_frame = 0;
207 }
208
209 bi0 = from[0];
210 if (PREDICT_TRUE (n_left_from > 1))
211 {
212 pi0 = from[1];
213 vlib_prefetch_buffer_with_index (vm, pi0, LOAD);
214 }
John Lo17f17cb2016-05-31 16:25:13 -0400215
216 b0 = vlib_get_buffer (vm, bi0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700217
Dave Barachba868bb2016-08-08 09:51:21 -0400218 /* Mark that this buffer was just recycled */
219 b0->flags |= VLIB_BUFFER_IS_RECYCLED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220
Neale Rannsb80c5362016-10-08 13:03:40 +0100221#if (CLIB_DEBUG > 0)
Damjan Mariond1274cb2018-03-13 21:32:17 +0100222 if (buffer_main.callbacks_registered == 0)
Steven899a84b2018-01-29 20:09:09 -0800223 vlib_buffer_set_known_state (bi0, VLIB_BUFFER_KNOWN_ALLOCATED);
Neale Rannsb80c5362016-10-08 13:03:40 +0100224#endif
225
Dave Barachba868bb2016-08-08 09:51:21 -0400226 /* If buffer is traced, mark frame as traced */
227 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
228 f->flags |= VLIB_FRAME_TRACE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
Dave Barachba868bb2016-08-08 09:51:21 -0400230 to_next[0] = bi0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231
Dave Barachba868bb2016-08-08 09:51:21 -0400232 from++;
233 to_next++;
234 n_this_frame++;
235 n_left_to_next--;
236 n_left_from--;
237 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238 }
Dave Barachba868bb2016-08-08 09:51:21 -0400239
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100240 vec_reset_length (fl->buffers);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241
242 if (f)
243 {
Dave Barachba868bb2016-08-08 09:51:21 -0400244 ASSERT (n_this_frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700245 f->n_vectors = n_this_frame;
246 vlib_put_frame_to_node (vm, feature_node_index, f);
247 }
248}
249
Dave Barachba868bb2016-08-08 09:51:21 -0400250clib_error_t *
251replication_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252{
Dave Barachba868bb2016-08-08 09:51:21 -0400253 replication_main_t *rm = &replication_main;
Dave Barachba868bb2016-08-08 09:51:21 -0400254 vlib_buffer_free_list_t *fl;
255 __attribute__ ((unused)) replication_context_t *ctx;
256 vlib_thread_main_t *tm = vlib_get_thread_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257
Dave Barachba868bb2016-08-08 09:51:21 -0400258 rm->vlib_main = vm;
259 rm->vnet_main = vnet_get_main ();
260 rm->recycle_list_index =
261 vlib_buffer_create_free_list (vm, 1024 /* fictional */ ,
262 "replication-recycle");
263
Damjan Mariond1274cb2018-03-13 21:32:17 +0100264 fl = pool_elt_at_index (vm->buffer_free_list_pool, rm->recycle_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
266 fl->buffers_added_to_freelist_function = replication_recycle_callback;
267
Dave Barachba868bb2016-08-08 09:51:21 -0400268 /* Verify the replication context is the expected size */
269 ASSERT (sizeof (replication_context_t) == 128); /* 2 cache lines */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270
271 vec_validate (rm->contexts, tm->n_vlib_mains - 1);
272 return 0;
273}
274
275VLIB_INIT_FUNCTION (replication_init);
Dave Barachba868bb2016-08-08 09:51:21 -0400276
277/*
278 * fd.io coding-style-patch-verification: ON
279 *
280 * Local Variables:
281 * eval: (c-set-style "gnu")
282 * End:
283 */