blob: 5933153e16ca739aa0dd806ed5ea7d243bf54289 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001;;; tunnel-encap-skel.el - tunnel interface output skeleton
2
3(require 'skeleton)
4
5(define-skeleton tunnel-encap-skel
6"Insert a tunnel encap implementation"
7nil
8'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): "))
9'(setq ENCAP_STACK (upcase encap_stack))
10'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack))
11'(setq ENCAP-STACK (upcase encap-stack))
12"
13#include <clib/error.h>
14#include <clib/hash.h>
15#include <vnet/vnet.h>
16#include <vnet/ip/ip.h>
17#include <vnet/ethernet/ethernet.h>
18#include <vnet/" encap-stack "/" encap_stack ".h>
19
20/* Statistics (not really errors) */
21#define foreach_" encap_stack "_encap_error \\
22_(ENCAPSULATED, \"good packets encapsulated\")
23
24static char * " encap_stack "_encap_error_strings[] = {
25#define _(sym,string) string,
26 foreach_" encap_stack "_encap_error
27#undef _
28};
29
30typedef enum {
31#define _(sym,str) " ENCAP_STACK "_ENCAP_ERROR_##sym,
32 foreach_" encap_stack "_encap_error
33#undef _
34 " ENCAP_STACK "_ENCAP_N_ERROR,
35} " encap_stack "_encap_error_t;
36
37typedef enum {
38 " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP,
39 " ENCAP_STACK "_ENCAP_NEXT_DROP,
40 " ENCAP_STACK "_ENCAP_N_NEXT,
41} " encap_stack "_encap_next_t;
42
43typedef struct {
44 u32 tunnel_index;
45} " encap_stack "_encap_trace_t;
46
47u8 * format_" encap_stack "_encap_trace (u8 * s, va_list * args)
48{
49 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
50 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
51 " encap_stack "_encap_trace_t * t
52 = va_arg (*args, " encap_stack "_encap_trace_t *);
53
54 s = format (s, \"" ENCAP-STACK ": tunnel %d\", t->tunnel_index);
55 return s;
56}
57
58/* $$$$ FIXME adjust to match the rewrite string */
59#define foreach_fixed_header_offset \\
60_(0) _(1) _(2) _(3) _(FIXME)
61
62static uword
63" encap_stack "_encap (vlib_main_t * vm,
64 vlib_node_runtime_t * node,
65 vlib_frame_t * from_frame)
66{
67 u32 n_left_from, next_index, * from, * to_next;
68 " encap_stack "_main_t * ngm = &" encap_stack "_main;
69 vnet_main_t * vnm = ngm->vnet_main;
70 u32 pkts_encapsulated = 0;
71 u16 old_l0 = 0, old_l1 = 0;
72
73 from = vlib_frame_vector_args (from_frame);
74 n_left_from = from_frame->n_vectors;
75
76 next_index = node->cached_next_index;
77
78 while (n_left_from > 0)
79 {
80 u32 n_left_to_next;
81
82 vlib_get_next_frame (vm, node, next_index,
83 to_next, n_left_to_next);
84
85#if 0 /* $$$ dual loop when the single loop works */
86 while (n_left_from >= 4 && n_left_to_next >= 2)
87 {
88 u32 bi0, bi1;
89 vlib_buffer_t * b0, * b1;
90 nsh_unicast_header_t * h0, * h1;
91 u32 label0, label1;
92 u32 next0, next1;
93 uword * p0, * p1;
94
95 /* Prefetch next iteration. */
96 {
97 vlib_buffer_t * p2, * p3;
98
99 p2 = vlib_get_buffer (vm, from[2]);
100 p3 = vlib_get_buffer (vm, from[3]);
101
102 vlib_prefetch_buffer_header (p2, LOAD);
103 vlib_prefetch_buffer_header (p3, LOAD);
104
105 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
106 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
107 }
108
109 bi0 = from[0];
110 bi1 = from[1];
111 to_next[0] = bi0;
112 to_next[1] = bi1;
113 from += 2;
114 to_next += 2;
115 n_left_to_next -= 2;
116 n_left_from -= 2;
117
118 b0 = vlib_get_buffer (vm, bi0);
119 b1 = vlib_get_buffer (vm, bi1);
120
121 h0 = vlib_buffer_get_current (b0);
122 h1 = vlib_buffer_get_current (b1);
123
124 next0 = next1 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP;
125
126 vlib_buffer_advance (b0, sizeof (*h0));
127 vlib_buffer_advance (b1, sizeof (*h1));
128
129 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
130 to_next, n_left_to_next,
131 bi0, bi1, next0, next1);
132 }
133#endif
134
135 while (n_left_from > 0 && n_left_to_next > 0)
136 {
137 u32 bi0;
138 vlib_buffer_t * b0;
139 u32 next0 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP;
140 vnet_hw_interface_t * hi0;
141 ip4_header_t * ip0;
142 udp_header_t * udp0;
143 u64 * copy_src0, * copy_dst0;
144 u32 * copy_src_last0, * copy_dst_last0;
145 " encap_stack "_tunnel_t * t0;
146 u16 new_l0;
147 ip_csum_t sum0;
148
149 bi0 = from[0];
150 to_next[0] = bi0;
151 from += 1;
152 to_next += 1;
153 n_left_from -= 1;
154 n_left_to_next -= 1;
155
156 b0 = vlib_get_buffer (vm, bi0);
157
158 /* 1-wide cache? */
159 hi0 = vnet_get_sup_hw_interface
160 (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
161
162 t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
163
164 ASSERT(vec_len(t0->rewrite) >= 24);
165
166 /* Apply the rewrite string. $$$$ vnet_rewrite? */
167 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
168
169 ip0 = vlib_buffer_get_current(b0);
170 /* Copy the fixed header */
171 copy_dst0 = (u64 *) ip0;
172 copy_src0 = (u64 *) t0->rewrite;
173
174 ASSERT (sizeof (ip4_udp_" encap_stack "_header_t) == FIXME);
175
176 /* Copy first N octets 8-bytes at a time */
177#define _(offs) copy_dst0[offs] = copy_src0[offs];
178 foreach_fixed_header_offset;
179#undef _
180#if 0 /* needed if encap not a multiple of 8 bytes */
181 /* Last 4 octets. Hopefully gcc will be our friend */
182 copy_dst_last0 = (u32 *)(&copy_dst0[FIXME]);
183 copy_src_last0 = (u32 *)(&copy_src0[FIXME]);
184 copy_dst_last0[0] = copy_src_last0[0];
185
186#endif
187 /* fix the <bleep>ing outer-IP checksum */
188 sum0 = ip0->checksum;
189 /* old_l0 always 0, see the rewrite setup */
190 new_l0 =
191 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
192
193 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
194 length /* changed member */);
195 ip0->checksum = ip_csum_fold (sum0);
196 ip0->length = new_l0;
197
198 /* Fix UDP length */
199 udp0 = (udp_header_t *)(ip0+1);
200 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
201 - sizeof (*ip0));
202
203 udp0->length = new_l0;
204
205 /* Reset to look up tunnel partner in the configured FIB */
206 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
207 pkts_encapsulated ++;
208
209 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
210 {
211 " encap_stack "_encap_trace_t *tr =
212 vlib_add_trace (vm, node, b0, sizeof (*tr));
213 tr->tunnel_index = t0 - ngm->tunnels;
214 }
215 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
216 to_next, n_left_to_next,
217 bi0, next0);
218 }
219
220 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
221 }
222 vlib_node_increment_counter (vm, node->node_index,
223 " ENCAP_STACK "_ENCAP_ERROR_ENCAPSULATED,
224 pkts_encapsulated);
225 return from_frame->n_vectors;
226}
227
228VLIB_REGISTER_NODE (" encap_stack "_encap_node) = {
229 .function = " encap_stack "_encap,
230 .name = \"" encap-stack "-encap\",
231 .vector_size = sizeof (u32),
232 .format_trace = format_" encap_stack "_encap_trace,
233 .type = VLIB_NODE_TYPE_INTERNAL,
234
235 .n_errors = ARRAY_LEN(" encap_stack "_encap_error_strings),
236 .error_strings = " encap_stack "_encap_error_strings,
237
238 .n_next_nodes = " ENCAP_STACK "_ENCAP_N_NEXT,
239
240 .next_nodes = {
241 [" ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP] = \"ip4-lookup\",
242 [" ENCAP_STACK "_ENCAP_NEXT_DROP] = \"error-drop\",
243 },
244};
245")