blob: 6350aedfe7f65d520e52f26ce94e97e9481aeaf8 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * replication.h : packet replication
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef included_replication_h
19#define included_replication_h
20
21
22#include <vlib/vlib.h>
23#include <vnet/vnet.h>
24#include <vnet/replication.h>
25
26
Dave Barachba868bb2016-08-08 09:51:21 -040027typedef struct
28{
29 /* The entire vnet buffer header restored for each replica */
Dave Barach365a2b82017-07-15 08:56:22 -040030 u8 vnet_buffer[40]; /* 16B aligned to allow vector unit copy */
31 u8 reserved[24]; /* space for future expansion of vnet buffer header */
Ed Warnickecb9cada2015-12-08 15:45:58 -070032
Dave Barachba868bb2016-08-08 09:51:21 -040033 /* feature state used during this replication */
34 u64 feature_replicas; /* feature's id for its set of replicas */
35 u32 feature_counter; /* feature's current index into set of replicas */
36 u32 recycle_node_index; /* feature's recycle node index */
Ed Warnickecb9cada2015-12-08 15:45:58 -070037
Dave Barachba868bb2016-08-08 09:51:21 -040038 /*
39 * data saved from the start of replication and restored
40 * at the end of replication
41 */
Damjan Mariondac03522018-02-01 15:30:13 +010042 vlib_buffer_free_list_index_t saved_free_list_index; /* from vlib buffer */
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barachba868bb2016-08-08 09:51:21 -040044 /* data saved from the original packet and restored for each replica */
45 u64 l2_header[3]; /* 24B (must be at least 22B for l2 packets) */
Steve Shin49806fe2017-11-13 15:46:35 -080046 u32 flags; /* vnet buffer flags */
Dave Barachba868bb2016-08-08 09:51:21 -040047 u16 ip_tos; /* v4 and v6 */
48 u16 ip4_checksum; /* needed for v4 only */
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Dave Barachba868bb2016-08-08 09:51:21 -040050 /* data saved from the vlib buffer header and restored for each replica */
51 i16 current_data; /* offset of first byte of packet in packet data */
Steve Shin49806fe2017-11-13 15:46:35 -080052 u8 pad[2]; /* to 64B */
Dave Barachba868bb2016-08-08 09:51:21 -040053 u8 l2_packet; /* flag for l2 vs l3 packet data */
Ed Warnickecb9cada2015-12-08 15:45:58 -070054
Dave Barachba868bb2016-08-08 09:51:21 -040055} replication_context_t; /* 128B */
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
57
Dave Barachba868bb2016-08-08 09:51:21 -040058typedef struct
59{
Ed Warnickecb9cada2015-12-08 15:45:58 -070060
61 u32 recycle_list_index;
62
Dave Barachba868bb2016-08-08 09:51:21 -040063 /* per-thread pools of replication contexts */
64 replication_context_t **contexts;
Ed Warnickecb9cada2015-12-08 15:45:58 -070065
Dave Barachba868bb2016-08-08 09:51:21 -040066 vlib_main_t *vlib_main;
67 vnet_main_t *vnet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070068
69} replication_main_t;
70
71
72extern replication_main_t replication_main;
73
74
Dave Barachba868bb2016-08-08 09:51:21 -040075/* Return 1 if this buffer just came from the replication recycle handler. */
Ed Warnickecb9cada2015-12-08 15:45:58 -070076always_inline u32
77replication_is_recycled (vlib_buffer_t * b0)
78{
79 return b0->flags & VLIB_BUFFER_IS_RECYCLED;
80}
81
Dave Barachba868bb2016-08-08 09:51:21 -040082/*
83 * Clear the recycle flag. If buffer came from the replication recycle
84 * handler, this flag must be cleared before the packet is transmitted again.
85 */
Ed Warnickecb9cada2015-12-08 15:45:58 -070086always_inline void
87replication_clear_recycled (vlib_buffer_t * b0)
88{
89 b0->flags &= ~VLIB_BUFFER_IS_RECYCLED;
90}
91
Dave Barachba868bb2016-08-08 09:51:21 -040092/*
93 * Return the active replication context if this buffer has
94 * been recycled, otherwise return 0. (Note that this essentially
95 * restricts access to the replication context to the replication
96 * feature's prep and recycle nodes.)
97 */
Ed Warnickecb9cada2015-12-08 15:45:58 -070098always_inline replication_context_t *
99replication_get_ctx (vlib_buffer_t * b0)
100{
Dave Barachba868bb2016-08-08 09:51:21 -0400101 replication_main_t *rm = &replication_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102
Dave Barachba868bb2016-08-08 09:51:21 -0400103 return replication_is_recycled (b0) ?
Damjan Marion586afd72017-04-05 19:18:20 +0200104 pool_elt_at_index (rm->contexts[vlib_get_thread_index ()],
Dave Barachba868bb2016-08-08 09:51:21 -0400105 b0->recycle_count) : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106}
107
Dave Barachba868bb2016-08-08 09:51:21 -0400108/* Prefetch the replication context for this buffer, if it exists */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109always_inline void
110replication_prefetch_ctx (vlib_buffer_t * b0)
111{
112 replication_context_t *ctx = replication_get_ctx (b0);
113
Dave Barachba868bb2016-08-08 09:51:21 -0400114 if (ctx)
115 {
116 CLIB_PREFETCH (ctx, (2 * CLIB_CACHE_LINE_BYTES), STORE);
117 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
Dave Barachba868bb2016-08-08 09:51:21 -0400120replication_context_t *replication_prep (vlib_main_t * vm,
121 vlib_buffer_t * b0,
122 u32 recycle_node_index,
123 u32 l2_packet);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
Dave Barachba868bb2016-08-08 09:51:21 -0400125replication_context_t *replication_recycle (vlib_main_t * vm,
126 vlib_buffer_t * b0, u32 is_last);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127
128
129#endif
Dave Barachba868bb2016-08-08 09:51:21 -0400130
131/*
132 * fd.io coding-style-patch-verification: ON
133 *
134 * Local Variables:
135 * eval: (c-set-style "gnu")
136 * End:
137 */