Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * replication.h : packet replication |
| 3 | * |
| 4 | * Copyright (c) 2013 Cisco and/or its affiliates. |
| 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at: |
| 8 | * |
| 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | * |
| 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
| 16 | */ |
| 17 | |
| 18 | #ifndef included_replication_h |
| 19 | #define included_replication_h |
| 20 | |
| 21 | |
| 22 | #include <vlib/vlib.h> |
| 23 | #include <vnet/vnet.h> |
| 24 | #include <vnet/replication.h> |
| 25 | |
| 26 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 27 | typedef struct |
| 28 | { |
| 29 | /* The entire vnet buffer header restored for each replica */ |
Dave Barach | 365a2b8 | 2017-07-15 08:56:22 -0400 | [diff] [blame] | 30 | u8 vnet_buffer[40]; /* 16B aligned to allow vector unit copy */ |
| 31 | u8 reserved[24]; /* space for future expansion of vnet buffer header */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 32 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 33 | /* feature state used during this replication */ |
| 34 | u64 feature_replicas; /* feature's id for its set of replicas */ |
| 35 | u32 feature_counter; /* feature's current index into set of replicas */ |
| 36 | u32 recycle_node_index; /* feature's recycle node index */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 37 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 38 | /* |
| 39 | * data saved from the start of replication and restored |
| 40 | * at the end of replication |
| 41 | */ |
| 42 | u32 saved_free_list_index; /* from vlib buffer */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 43 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 44 | /* data saved from the original packet and restored for each replica */ |
| 45 | u64 l2_header[3]; /* 24B (must be at least 22B for l2 packets) */ |
Steve Shin | 49806fe | 2017-11-13 15:46:35 -0800 | [diff] [blame] | 46 | u32 flags; /* vnet buffer flags */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 47 | u16 ip_tos; /* v4 and v6 */ |
| 48 | u16 ip4_checksum; /* needed for v4 only */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 49 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 50 | /* data saved from the vlib buffer header and restored for each replica */ |
| 51 | i16 current_data; /* offset of first byte of packet in packet data */ |
Steve Shin | 49806fe | 2017-11-13 15:46:35 -0800 | [diff] [blame] | 52 | u8 pad[2]; /* to 64B */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 53 | u8 l2_packet; /* flag for l2 vs l3 packet data */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 54 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 55 | } replication_context_t; /* 128B */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 56 | |
| 57 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 58 | typedef struct |
| 59 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 60 | |
| 61 | u32 recycle_list_index; |
| 62 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 63 | /* per-thread pools of replication contexts */ |
| 64 | replication_context_t **contexts; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 65 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 66 | vlib_main_t *vlib_main; |
| 67 | vnet_main_t *vnet_main; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 68 | |
| 69 | } replication_main_t; |
| 70 | |
| 71 | |
| 72 | extern replication_main_t replication_main; |
| 73 | |
| 74 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 75 | /* Return 1 if this buffer just came from the replication recycle handler. */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 76 | always_inline u32 |
| 77 | replication_is_recycled (vlib_buffer_t * b0) |
| 78 | { |
| 79 | return b0->flags & VLIB_BUFFER_IS_RECYCLED; |
| 80 | } |
| 81 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 82 | /* |
| 83 | * Clear the recycle flag. If buffer came from the replication recycle |
| 84 | * handler, this flag must be cleared before the packet is transmitted again. |
| 85 | */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 86 | always_inline void |
| 87 | replication_clear_recycled (vlib_buffer_t * b0) |
| 88 | { |
| 89 | b0->flags &= ~VLIB_BUFFER_IS_RECYCLED; |
| 90 | } |
| 91 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 92 | /* |
| 93 | * Return the active replication context if this buffer has |
| 94 | * been recycled, otherwise return 0. (Note that this essentially |
| 95 | * restricts access to the replication context to the replication |
| 96 | * feature's prep and recycle nodes.) |
| 97 | */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 98 | always_inline replication_context_t * |
| 99 | replication_get_ctx (vlib_buffer_t * b0) |
| 100 | { |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 101 | replication_main_t *rm = &replication_main; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 102 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 103 | return replication_is_recycled (b0) ? |
Damjan Marion | 586afd7 | 2017-04-05 19:18:20 +0200 | [diff] [blame] | 104 | pool_elt_at_index (rm->contexts[vlib_get_thread_index ()], |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 105 | b0->recycle_count) : 0; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 106 | } |
| 107 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 108 | /* Prefetch the replication context for this buffer, if it exists */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 109 | always_inline void |
| 110 | replication_prefetch_ctx (vlib_buffer_t * b0) |
| 111 | { |
| 112 | replication_context_t *ctx = replication_get_ctx (b0); |
| 113 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 114 | if (ctx) |
| 115 | { |
| 116 | CLIB_PREFETCH (ctx, (2 * CLIB_CACHE_LINE_BYTES), STORE); |
| 117 | } |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 118 | } |
| 119 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 120 | replication_context_t *replication_prep (vlib_main_t * vm, |
| 121 | vlib_buffer_t * b0, |
| 122 | u32 recycle_node_index, |
| 123 | u32 l2_packet); |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 124 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 125 | replication_context_t *replication_recycle (vlib_main_t * vm, |
| 126 | vlib_buffer_t * b0, u32 is_last); |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 127 | |
| 128 | |
| 129 | #endif |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 130 | |
| 131 | /* |
| 132 | * fd.io coding-style-patch-verification: ON |
| 133 | * |
| 134 | * Local Variables: |
| 135 | * eval: (c-set-style "gnu") |
| 136 | * End: |
| 137 | */ |