blob: b22fbf2e69ea22526c074b5b0b86f86d0fb3bbd6 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vnet/vnet.h>
16#include <vppinfra/vec.h>
17#include <vppinfra/format.h>
18#include <vlib/unix/cj.h>
Shesha Sreenivasamurthyb8f45b32016-02-03 09:38:36 -080019#include <assert.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070020
21#include <vnet/ethernet/ethernet.h>
22#include <vnet/devices/dpdk/dpdk.h>
23
24#include "dpdk_priv.h"
25#include <vppinfra/error.h>
26
27#define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(RING_FULL, "Tx packet drops (ring full)") \
30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
31 _(REPL_FAIL, "Tx packet drops (replication failure)")
32
Damjan Marion25665672016-08-16 18:56:08 +020033typedef enum
34{
Ed Warnickecb9cada2015-12-08 15:45:58 -070035#define _(f,s) DPDK_TX_FUNC_ERROR_##f,
36 foreach_dpdk_tx_func_error
37#undef _
Damjan Marion25665672016-08-16 18:56:08 +020038 DPDK_TX_FUNC_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -070039} dpdk_tx_func_error_t;
40
Damjan Marion25665672016-08-16 18:56:08 +020041static char *dpdk_tx_func_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070042#define _(n,s) s,
Damjan Marion25665672016-08-16 18:56:08 +020043 foreach_dpdk_tx_func_error
Ed Warnickecb9cada2015-12-08 15:45:58 -070044#undef _
45};
46
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050047clib_error_t *
Damjan Marion25665672016-08-16 18:56:08 +020048dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address)
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050049{
Damjan Marion25665672016-08-16 18:56:08 +020050 int error;
51 dpdk_main_t *dm = &dpdk_main;
52 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050053
Damjan Marion25665672016-08-16 18:56:08 +020054 error = rte_eth_dev_default_mac_addr_set (xd->device_index,
55 (struct ether_addr *) address);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050056
Damjan Marion25665672016-08-16 18:56:08 +020057 if (error)
58 {
59 return clib_error_return (0, "mac address set failed: %d", error);
60 }
61 else
62 {
63 return NULL;
64 }
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050065}
66
67clib_error_t *
68dpdk_set_mc_filter (vnet_hw_interface_t * hi,
Damjan Marion25665672016-08-16 18:56:08 +020069 struct ether_addr mc_addr_vec[], int naddr)
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050070{
71 int error;
Damjan Marion25665672016-08-16 18:56:08 +020072 dpdk_main_t *dm = &dpdk_main;
73 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050074
Damjan Marion25665672016-08-16 18:56:08 +020075 error = rte_eth_dev_set_mc_addr_list (xd->device_index, mc_addr_vec, naddr);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050076
Damjan Marion25665672016-08-16 18:56:08 +020077 if (error)
78 {
79 return clib_error_return (0, "mc addr list failed: %d", error);
80 }
81 else
82 {
83 return NULL;
84 }
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050085}
86
Damjan Marion25665672016-08-16 18:56:08 +020087struct rte_mbuf *
88dpdk_replicate_packet_mb (vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -070089{
Damjan Marion25665672016-08-16 18:56:08 +020090 vlib_main_t *vm = vlib_get_main ();
91 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion22766b82016-11-15 12:50:28 +010092 struct rte_mbuf **mbufs = 0, *s, *d;
93 u8 nb_segs;
Damjan Marion25665672016-08-16 18:56:08 +020094 unsigned socket_id = rte_socket_id ();
Damjan Marion22766b82016-11-15 12:50:28 +010095 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97 ASSERT (bm->pktmbuf_pools[socket_id]);
Damjan Marion22766b82016-11-15 12:50:28 +010098 s = rte_mbuf_from_vlib_buffer (b);
99 nb_segs = s->nb_segs;
100 vec_validate (mbufs, nb_segs - 1);
101
102 if (rte_pktmbuf_alloc_bulk (bm->pktmbuf_pools[socket_id], mbufs, nb_segs))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700103 {
Damjan Marion22766b82016-11-15 12:50:28 +0100104 vec_free (mbufs);
105 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106 }
107
Damjan Marion22766b82016-11-15 12:50:28 +0100108 d = mbufs[0];
109 d->nb_segs = s->nb_segs;
110 d->data_len = s->data_len;
111 d->pkt_len = s->pkt_len;
112 d->data_off = s->data_off;
113 clib_memcpy (d->buf_addr, s->buf_addr, RTE_PKTMBUF_HEADROOM + s->data_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114
Damjan Marion22766b82016-11-15 12:50:28 +0100115 for (i = 1; i < nb_segs; i++)
Keith Burns (alagalah)52fc44d2016-03-25 09:38:50 -0700116 {
Damjan Marion22766b82016-11-15 12:50:28 +0100117 d->next = mbufs[i];
118 d = mbufs[i];
119 s = s->next;
120 d->data_len = s->data_len;
121 clib_memcpy (d->buf_addr, s->buf_addr,
122 RTE_PKTMBUF_HEADROOM + s->data_len);
Keith Burns (alagalah)52fc44d2016-03-25 09:38:50 -0700123 }
124
Damjan Marion22766b82016-11-15 12:50:28 +0100125 d = mbufs[0];
126 vec_free (mbufs);
127 return d;
Keith Burns (alagalah)52fc44d2016-03-25 09:38:50 -0700128}
129
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130static void
131dpdk_tx_trace_buffer (dpdk_main_t * dm,
132 vlib_node_runtime_t * node,
133 dpdk_device_t * xd,
Damjan Marion25665672016-08-16 18:56:08 +0200134 u16 queue_id, u32 buffer_index, vlib_buffer_t * buffer)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135{
Damjan Marion25665672016-08-16 18:56:08 +0200136 vlib_main_t *vm = vlib_get_main ();
137 dpdk_tx_dma_trace_t *t0;
138 struct rte_mbuf *mb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
Damjan Marion25665672016-08-16 18:56:08 +0200140 mb = rte_mbuf_from_vlib_buffer (buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700141
142 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
143 t0->queue_index = queue_id;
144 t0->device_index = xd->device_index;
145 t0->buffer_index = buffer_index;
Damjan Marionf1213b82016-03-13 02:22:06 +0100146 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
Damjan Marion25665672016-08-16 18:56:08 +0200147 clib_memcpy (&t0->buffer, buffer,
148 sizeof (buffer[0]) - sizeof (buffer->pre_data));
Damjan Marionf1213b82016-03-13 02:22:06 +0100149 clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
Damjan Marion25665672016-08-16 18:56:08 +0200150 sizeof (t0->buffer.pre_data));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151}
152
Damjan Marion22766b82016-11-15 12:50:28 +0100153static_always_inline void
154dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
155 int maybe_multiseg)
156{
157 struct rte_mbuf *mb, *first_mb, *last_mb;
158
159 /* buffer is coming from non-dpdk source so we need to init
160 rte_mbuf header */
161 if (PREDICT_FALSE ((b->flags & VNET_BUFFER_RTE_MBUF_VALID) == 0))
162 {
Pierre Pfister58290ea2016-12-02 10:55:13 +0000163 vlib_buffer_t *b2 = b;
164 last_mb = mb = rte_mbuf_from_vlib_buffer (b2);
Damjan Marion22766b82016-11-15 12:50:28 +0100165 rte_pktmbuf_reset (mb);
Pierre Pfister58290ea2016-12-02 10:55:13 +0000166 while (maybe_multiseg && (b2->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marion22766b82016-11-15 12:50:28 +0100167 {
Pierre Pfister58290ea2016-12-02 10:55:13 +0000168 b2 = vlib_get_buffer (vm, b2->next_buffer);
169 mb = rte_mbuf_from_vlib_buffer (b2);
Damjan Marion22766b82016-11-15 12:50:28 +0100170 last_mb->next = mb;
171 last_mb = mb;
172 rte_pktmbuf_reset (mb);
173 }
174 }
175
176 first_mb = mb = rte_mbuf_from_vlib_buffer (b);
177 first_mb->nb_segs = 1;
178 mb->data_len = b->current_length;
179 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
180 b->current_length;
181 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
182
183 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
184 {
185 b = vlib_get_buffer (vm, b->next_buffer);
186 mb = rte_mbuf_from_vlib_buffer (b);
187 mb->data_len = b->current_length;
188 mb->pkt_len = b->current_length;
189 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
190 first_mb->nb_segs++;
191 }
192}
193
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194/*
195 * This function calls the dpdk's tx_burst function to transmit the packets
196 * on the tx_vector. It manages a lock per-device if the device does not
Damjan Marion25665672016-08-16 18:56:08 +0200197 * support multiple queues. It returns the number of packets untransmitted
198 * on the tx_vector. If all packets are transmitted (the normal case), the
Ed Warnickecb9cada2015-12-08 15:45:58 -0700199 * function returns 0.
Damjan Marion25665672016-08-16 18:56:08 +0200200 *
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201 * The function assumes there is at least one packet on the tx_vector.
202 */
203static_always_inline
Damjan Marion25665672016-08-16 18:56:08 +0200204 u32 tx_burst_vector_internal (vlib_main_t * vm,
205 dpdk_device_t * xd,
206 struct rte_mbuf **tx_vector)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207{
Damjan Marion25665672016-08-16 18:56:08 +0200208 dpdk_main_t *dm = &dpdk_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209 u32 n_packets;
210 u32 tx_head;
211 u32 tx_tail;
212 u32 n_retry;
213 int rv;
214 int queue_id;
215 tx_ring_hdr_t *ring;
216
Damjan Marion25665672016-08-16 18:56:08 +0200217 ring = vec_header (tx_vector, sizeof (*ring));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
219 n_packets = ring->tx_head - ring->tx_tail;
220
Damjan Marionb58598b2016-09-19 13:44:37 +0200221 tx_head = ring->tx_head % xd->nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700222
223 /*
224 * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
225 * unpredictable results.
226 */
Damjan Marion25665672016-08-16 18:56:08 +0200227 ASSERT (n_packets > 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
229 /*
230 * Check for tx_vector overflow. If this fails it is a system configuration
231 * error. The ring should be sized big enough to handle the largest un-flowed
232 * off burst from a traffic manager. A larger size also helps performance
233 * a bit because it decreases the probability of having to issue two tx_burst
234 * calls due to a ring wrap.
235 */
Damjan Marionb58598b2016-09-19 13:44:37 +0200236 ASSERT (n_packets < xd->nb_tx_desc);
Damjan Marion4ee3a652016-11-15 19:47:58 +0100237 ASSERT (ring->tx_tail == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238
Damjan Marion4ee3a652016-11-15 19:47:58 +0100239 n_retry = 16;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240 queue_id = vm->cpu_index;
241
Damjan Marion25665672016-08-16 18:56:08 +0200242 do
243 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244 /* start the burst at the tail */
Damjan Marionb58598b2016-09-19 13:44:37 +0200245 tx_tail = ring->tx_tail % xd->nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246
Damjan Marion25665672016-08-16 18:56:08 +0200247 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248 * This device only supports one TX queue,
249 * and we're running multi-threaded...
250 */
Damjan Marionf530a552016-10-25 18:53:41 +0200251 if (PREDICT_FALSE (xd->lockp != 0))
Damjan Marion25665672016-08-16 18:56:08 +0200252 {
253 queue_id = queue_id % xd->tx_q_used;
254 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
255 /* zzzz */
256 queue_id = (queue_id + 1) % xd->tx_q_used;
257 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
Damjan Marion4ee3a652016-11-15 19:47:58 +0100259 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
Jasvinder Singh85ecc812016-07-21 17:02:19 +0100260 {
Damjan Marion4ee3a652016-11-15 19:47:58 +0100261 /* no wrap, transmit in one burst */
262 dpdk_device_hqos_per_worker_thread_t *hqos =
263 &xd->hqos_wt[vm->cpu_index];
Jasvinder Singh85ecc812016-07-21 17:02:19 +0100264
Jasvinder Singh199ca9c2016-12-08 13:52:47 +0000265 ASSERT (hqos->swq != NULL);
266
Damjan Marion4ee3a652016-11-15 19:47:58 +0100267 dpdk_hqos_metadata_set (hqos,
268 &tx_vector[tx_tail], tx_head - tx_tail);
269 rv = rte_ring_sp_enqueue_burst (hqos->swq,
270 (void **) &tx_vector[tx_tail],
271 (uint16_t) (tx_head - tx_tail));
Jasvinder Singh85ecc812016-07-21 17:02:19 +0100272 }
273 else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
Damjan Marion25665672016-08-16 18:56:08 +0200274 {
Damjan Marion4ee3a652016-11-15 19:47:58 +0100275 /* no wrap, transmit in one burst */
276 rv = rte_eth_tx_burst (xd->device_index,
277 (uint16_t) queue_id,
278 &tx_vector[tx_tail],
279 (uint16_t) (tx_head - tx_tail));
Damjan Marion25665672016-08-16 18:56:08 +0200280 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281 else
Damjan Marion25665672016-08-16 18:56:08 +0200282 {
283 ASSERT (0);
284 rv = 0;
285 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286
Damjan Marionf530a552016-10-25 18:53:41 +0200287 if (PREDICT_FALSE (xd->lockp != 0))
Damjan Marion25665672016-08-16 18:56:08 +0200288 *xd->lockp[queue_id] = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700289
Damjan Marion25665672016-08-16 18:56:08 +0200290 if (PREDICT_FALSE (rv < 0))
291 {
292 // emit non-fatal message, bump counter
293 vnet_main_t *vnm = dm->vnet_main;
294 vnet_interface_main_t *im = &vnm->interface_main;
295 u32 node_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296
Damjan Marion25665672016-08-16 18:56:08 +0200297 node_index = vec_elt_at_index (im->hw_interfaces,
298 xd->vlib_hw_if_index)->tx_node_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299
Damjan Marion25665672016-08-16 18:56:08 +0200300 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
301 clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index,
302 rv);
303 return n_packets; // untransmitted packets
304 }
305 ring->tx_tail += (u16) rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700306 n_packets -= (uint16_t) rv;
Damjan Marion25665672016-08-16 18:56:08 +0200307 }
308 while (rv && n_packets && (n_retry > 0));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309
310 return n_packets;
311}
312
Damjan Marion22766b82016-11-15 12:50:28 +0100313static_always_inline void
314dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
315{
316 vlib_buffer_t *b;
317 struct rte_mbuf *mb;
318 b = vlib_get_buffer (vm, bi);
319 mb = rte_mbuf_from_vlib_buffer (b);
320 CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
321 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
322}
323
324static_always_inline void
325dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node,
326 vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp)
327{
328 dpdk_main_t *dm = &dpdk_main;
Dave Barach971158e2016-11-23 08:57:37 -0500329 u32 my_cpu = vm->cpu_index;
Damjan Marion22766b82016-11-15 12:50:28 +0100330 struct rte_mbuf *mb_new;
331
332 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0)
333 return;
334
335 mb_new = dpdk_replicate_packet_mb (b);
336 if (PREDICT_FALSE (mb_new == 0))
337 {
338 vlib_error_count (vm, node->node_index,
339 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
340 b->flags |= VLIB_BUFFER_REPL_FAIL;
341 }
342 else
343 *mbp = mb_new;
344
345 vec_add1 (dm->recycle[my_cpu], bi);
346}
347
Ed Warnickecb9cada2015-12-08 15:45:58 -0700348/*
349 * Transmits the packets on the frame to the interface associated with the
Damjan Marion25665672016-08-16 18:56:08 +0200350 * node. It first copies packets on the frame to a tx_vector containing the
351 * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352 * which calls the dpdk tx_burst function.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 */
354static uword
355dpdk_interface_tx (vlib_main_t * vm,
Damjan Marion25665672016-08-16 18:56:08 +0200356 vlib_node_runtime_t * node, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357{
Damjan Marion25665672016-08-16 18:56:08 +0200358 dpdk_main_t *dm = &dpdk_main;
359 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
360 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361 u32 n_packets = f->n_vectors;
362 u32 n_left;
Damjan Marion25665672016-08-16 18:56:08 +0200363 u32 *from;
364 struct rte_mbuf **tx_vector;
Damjan Marion22766b82016-11-15 12:50:28 +0100365 u16 i;
366 u16 nb_tx_desc = xd->nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367 int queue_id;
368 u32 my_cpu;
369 u32 tx_pkts = 0;
370 tx_ring_hdr_t *ring;
371 u32 n_on_ring;
372
373 my_cpu = vm->cpu_index;
374
375 queue_id = my_cpu;
376
377 tx_vector = xd->tx_vectors[queue_id];
Damjan Marion25665672016-08-16 18:56:08 +0200378 ring = vec_header (tx_vector, sizeof (*ring));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
380 n_on_ring = ring->tx_head - ring->tx_tail;
381 from = vlib_frame_vector_args (f);
382
Damjan Marion25665672016-08-16 18:56:08 +0200383 ASSERT (n_packets <= VLIB_FRAME_SIZE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384
Damjan Marion22766b82016-11-15 12:50:28 +0100385 if (PREDICT_FALSE (n_on_ring + n_packets > nb_tx_desc))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386 {
387 /*
Damjan Marion25665672016-08-16 18:56:08 +0200388 * Overflowing the ring should never happen.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389 * If it does then drop the whole frame.
390 */
391 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
Damjan Marion25665672016-08-16 18:56:08 +0200392 n_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700393
Damjan Marion25665672016-08-16 18:56:08 +0200394 while (n_packets--)
395 {
396 u32 bi0 = from[n_packets];
397 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
398 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer (b0);
399 rte_pktmbuf_free (mb0);
400 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700401 return n_on_ring;
402 }
403
Damjan Marion25665672016-08-16 18:56:08 +0200404 if (PREDICT_FALSE (dm->tx_pcap_enable))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700405 {
406 n_left = n_packets;
407 while (n_left > 0)
Damjan Marion25665672016-08-16 18:56:08 +0200408 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700409 u32 bi0 = from[0];
Damjan Marion25665672016-08-16 18:56:08 +0200410 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700411 if (dm->pcap_sw_if_index == 0 ||
Damjan Marion25665672016-08-16 18:56:08 +0200412 dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX])
413 pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414 from++;
415 n_left--;
416 }
417 }
418
419 from = vlib_frame_vector_args (f);
420 n_left = n_packets;
Damjan Marion22766b82016-11-15 12:50:28 +0100421 i = ring->tx_head % nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700422
Damjan Marion22766b82016-11-15 12:50:28 +0100423 while (n_left >= 8)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424 {
Damjan Marion22766b82016-11-15 12:50:28 +0100425 u32 bi0, bi1, bi2, bi3;
426 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
427 vlib_buffer_t *b0, *b1, *b2, *b3;
428 u32 or_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429
Damjan Marion22766b82016-11-15 12:50:28 +0100430 dpdk_prefetch_buffer_by_index (vm, from[4]);
431 dpdk_prefetch_buffer_by_index (vm, from[5]);
432 dpdk_prefetch_buffer_by_index (vm, from[6]);
433 dpdk_prefetch_buffer_by_index (vm, from[7]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434
435 bi0 = from[0];
436 bi1 = from[1];
Damjan Marion22766b82016-11-15 12:50:28 +0100437 bi2 = from[2];
438 bi3 = from[3];
439 from += 4;
Damjan Marion25665672016-08-16 18:56:08 +0200440
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441 b0 = vlib_get_buffer (vm, bi0);
442 b1 = vlib_get_buffer (vm, bi1);
Damjan Marion22766b82016-11-15 12:50:28 +0100443 b2 = vlib_get_buffer (vm, bi2);
444 b3 = vlib_get_buffer (vm, bi3);
445
446 or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
447
448 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
449 {
450 dpdk_validate_rte_mbuf (vm, b0, 1);
451 dpdk_validate_rte_mbuf (vm, b1, 1);
452 dpdk_validate_rte_mbuf (vm, b2, 1);
453 dpdk_validate_rte_mbuf (vm, b3, 1);
454 }
455 else
456 {
457 dpdk_validate_rte_mbuf (vm, b0, 0);
458 dpdk_validate_rte_mbuf (vm, b1, 0);
459 dpdk_validate_rte_mbuf (vm, b2, 0);
460 dpdk_validate_rte_mbuf (vm, b3, 0);
461 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700462
Damjan Marion25665672016-08-16 18:56:08 +0200463 mb0 = rte_mbuf_from_vlib_buffer (b0);
464 mb1 = rte_mbuf_from_vlib_buffer (b1);
Damjan Marion22766b82016-11-15 12:50:28 +0100465 mb2 = rte_mbuf_from_vlib_buffer (b2);
466 mb3 = rte_mbuf_from_vlib_buffer (b3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700467
Damjan Marion22766b82016-11-15 12:50:28 +0100468 if (PREDICT_FALSE (or_flags & VLIB_BUFFER_RECYCLE))
Damjan Marion25665672016-08-16 18:56:08 +0200469 {
Damjan Marion22766b82016-11-15 12:50:28 +0100470 dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
471 dpdk_buffer_recycle (vm, node, b1, bi1, &mb1);
472 dpdk_buffer_recycle (vm, node, b2, bi2, &mb2);
473 dpdk_buffer_recycle (vm, node, b3, bi3, &mb3);
474
475 /* dont enqueue packets if replication failed as they must
476 be sent back to recycle */
477 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
478 tx_vector[i++ % nb_tx_desc] = mb0;
479 if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
480 tx_vector[i++ % nb_tx_desc] = mb1;
481 if (PREDICT_TRUE ((b2->flags & VLIB_BUFFER_REPL_FAIL) == 0))
482 tx_vector[i++ % nb_tx_desc] = mb2;
483 if (PREDICT_TRUE ((b3->flags & VLIB_BUFFER_REPL_FAIL) == 0))
484 tx_vector[i++ % nb_tx_desc] = mb3;
485 }
486 else
487 {
488 if (PREDICT_FALSE (i + 3 >= nb_tx_desc))
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700489 {
Damjan Marion22766b82016-11-15 12:50:28 +0100490 tx_vector[i++ % nb_tx_desc] = mb0;
491 tx_vector[i++ % nb_tx_desc] = mb1;
492 tx_vector[i++ % nb_tx_desc] = mb2;
493 tx_vector[i++ % nb_tx_desc] = mb3;
494 i %= nb_tx_desc;
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700495 }
Damjan Marion22766b82016-11-15 12:50:28 +0100496 else
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700497 {
Damjan Marion22766b82016-11-15 12:50:28 +0100498 tx_vector[i++] = mb0;
499 tx_vector[i++] = mb1;
500 tx_vector[i++] = mb2;
501 tx_vector[i++] = mb3;
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700502 }
503 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700504
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505
Damjan Marion25665672016-08-16 18:56:08 +0200506 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700507 {
Damjan Marion25665672016-08-16 18:56:08 +0200508 if (b0->flags & VLIB_BUFFER_IS_TRACED)
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700509 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
Damjan Marion25665672016-08-16 18:56:08 +0200510 if (b1->flags & VLIB_BUFFER_IS_TRACED)
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700511 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
Damjan Marion22766b82016-11-15 12:50:28 +0100512 if (b2->flags & VLIB_BUFFER_IS_TRACED)
513 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi2, b2);
514 if (b3->flags & VLIB_BUFFER_IS_TRACED)
515 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi3, b3);
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700516 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700517
Damjan Marion22766b82016-11-15 12:50:28 +0100518 n_left -= 4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700519 }
520 while (n_left > 0)
521 {
522 u32 bi0;
Damjan Marion25665672016-08-16 18:56:08 +0200523 struct rte_mbuf *mb0;
524 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700525
526 bi0 = from[0];
527 from++;
Damjan Marion25665672016-08-16 18:56:08 +0200528
Ed Warnickecb9cada2015-12-08 15:45:58 -0700529 b0 = vlib_get_buffer (vm, bi0);
530
Damjan Marion22766b82016-11-15 12:50:28 +0100531 dpdk_validate_rte_mbuf (vm, b0, 1);
532
Damjan Marion25665672016-08-16 18:56:08 +0200533 mb0 = rte_mbuf_from_vlib_buffer (b0);
Damjan Marion22766b82016-11-15 12:50:28 +0100534 dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700535
Damjan Marion25665672016-08-16 18:56:08 +0200536 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700537 if (b0->flags & VLIB_BUFFER_IS_TRACED)
538 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700539
Damjan Marion25665672016-08-16 18:56:08 +0200540 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
541 {
Damjan Marion22766b82016-11-15 12:50:28 +0100542 tx_vector[i % nb_tx_desc] = mb0;
Damjan Marion25665672016-08-16 18:56:08 +0200543 i++;
544 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700545 n_left--;
546 }
547
548 /* account for additional packets in the ring */
549 ring->tx_head += n_packets;
550 n_on_ring = ring->tx_head - ring->tx_tail;
551
552 /* transmit as many packets as possible */
553 n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
554
555 /*
556 * tx_pkts is the number of packets successfully transmitted
557 * This is the number originally on ring minus the number remaining on ring
558 */
Damjan Marion25665672016-08-16 18:56:08 +0200559 tx_pkts = n_on_ring - n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700560
Damjan Marion4ee3a652016-11-15 19:47:58 +0100561 {
562 /* If there is no callback then drop any non-transmitted packets */
563 if (PREDICT_FALSE (n_packets))
564 {
565 vlib_simple_counter_main_t *cm;
566 vnet_main_t *vnm = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567
Damjan Marion4ee3a652016-11-15 19:47:58 +0100568 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
569 VNET_INTERFACE_COUNTER_TX_ERROR);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
Damjan Marion4ee3a652016-11-15 19:47:58 +0100571 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
572 n_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700573
Damjan Marion4ee3a652016-11-15 19:47:58 +0100574 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
575 n_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700576
Damjan Marion4ee3a652016-11-15 19:47:58 +0100577 while (n_packets--)
578 rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
579 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700580
Damjan Marion4ee3a652016-11-15 19:47:58 +0100581 /* Reset head/tail to avoid unnecessary wrap */
582 ring->tx_head = 0;
583 ring->tx_tail = 0;
584 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700585
586 /* Recycle replicated buffers */
Damjan Marion25665672016-08-16 18:56:08 +0200587 if (PREDICT_FALSE (vec_len (dm->recycle[my_cpu])))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700588 {
Damjan Marion25665672016-08-16 18:56:08 +0200589 vlib_buffer_free (vm, dm->recycle[my_cpu],
590 vec_len (dm->recycle[my_cpu]));
591 _vec_len (dm->recycle[my_cpu]) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700592 }
593
Damjan Marion25665672016-08-16 18:56:08 +0200594 ASSERT (ring->tx_head >= ring->tx_tail);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700595
596 return tx_pkts;
597}
598
Damjan Marion25665672016-08-16 18:56:08 +0200599static void
600dpdk_clear_hw_interface_counters (u32 instance)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601{
Damjan Marion25665672016-08-16 18:56:08 +0200602 dpdk_main_t *dm = &dpdk_main;
603 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604
605 /*
Damjan Marionb28e4982016-08-22 22:34:38 +0200606 * Set the "last_cleared_stats" to the current stats, so that
607 * things appear to clear from a display perspective.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700608 */
Damjan Marionb28e4982016-08-22 22:34:38 +0200609 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
Sean Hopea4f16a02016-03-28 13:11:31 -0400610
Damjan Marionb28e4982016-08-22 22:34:38 +0200611 clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats));
612 clib_memcpy (xd->last_cleared_xstats, xd->xstats,
613 vec_len (xd->last_cleared_xstats) *
614 sizeof (xd->last_cleared_xstats[0]));
Shesha Sreenivasamurthy94550842016-03-02 10:33:26 -0800615
Ed Warnickecb9cada2015-12-08 15:45:58 -0700616}
617
Ed Warnickecb9cada2015-12-08 15:45:58 -0700618static clib_error_t *
619dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
620{
Damjan Marion25665672016-08-16 18:56:08 +0200621 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700622 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion25665672016-08-16 18:56:08 +0200623 dpdk_main_t *dm = &dpdk_main;
624 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700625 int rv = 0;
626
Ed Warnickecb9cada2015-12-08 15:45:58 -0700627 if (is_up)
628 {
629 f64 now = vlib_time_now (dm->vlib_main);
630
Damjan Marionb28e4982016-08-22 22:34:38 +0200631 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700632 rv = rte_eth_dev_start (xd->device_index);
633
Damjan Marionb28e4982016-08-22 22:34:38 +0200634 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
Damjan Marion25665672016-08-16 18:56:08 +0200635 rte_eth_promiscuous_enable (xd->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700636 else
Damjan Marion25665672016-08-16 18:56:08 +0200637 rte_eth_promiscuous_disable (xd->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700638
639 rte_eth_allmulticast_enable (xd->device_index);
Damjan Marionb28e4982016-08-22 22:34:38 +0200640 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700641 dpdk_update_counters (xd, now);
642 dpdk_update_link_state (xd, now);
643 }
644 else
645 {
Damjan Marionb28e4982016-08-22 22:34:38 +0200646 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
Georgi Savov3a035982016-02-25 12:56:03 -0500647
Ed Warnickecb9cada2015-12-08 15:45:58 -0700648 rte_eth_allmulticast_disable (xd->device_index);
649 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
Damjan Marionb28e4982016-08-22 22:34:38 +0200650 rte_eth_dev_stop (xd->device_index);
John Lo3b2944d2016-07-18 18:06:14 -0400651
652 /* For bonded interface, stop slave links */
Damjan Marion25665672016-08-16 18:56:08 +0200653 if (xd->pmd == VNET_DPDK_PMD_BOND)
654 {
655 u8 slink[16];
656 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
657 while (nlink >= 1)
John Lo3b2944d2016-07-18 18:06:14 -0400658 {
659 u8 dpdk_port = slink[--nlink];
660 rte_eth_dev_stop (dpdk_port);
661 }
Damjan Marion25665672016-08-16 18:56:08 +0200662 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700663 }
664
665 if (rv < 0)
Damjan Marion25665672016-08-16 18:56:08 +0200666 clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop", rv);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700667
668 return /* no error */ 0;
669}
670
671/*
672 * Dynamically redirect all pkts from a specific interface
673 * to the specified node
674 */
Damjan Marion25665672016-08-16 18:56:08 +0200675static void
676dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
677 u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700678{
Damjan Marion25665672016-08-16 18:56:08 +0200679 dpdk_main_t *xm = &dpdk_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion25665672016-08-16 18:56:08 +0200681 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
682
Ed Warnickecb9cada2015-12-08 15:45:58 -0700683 /* Shut off redirection */
684 if (node_index == ~0)
685 {
686 xd->per_interface_next_index = node_index;
687 return;
688 }
Damjan Marion25665672016-08-16 18:56:08 +0200689
690 xd->per_interface_next_index =
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
692}
693
694
695static clib_error_t *
696dpdk_subif_add_del_function (vnet_main_t * vnm,
Damjan Marion25665672016-08-16 18:56:08 +0200697 u32 hw_if_index,
698 struct vnet_sw_interface_t *st, int is_add)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700699{
Damjan Marion25665672016-08-16 18:56:08 +0200700 dpdk_main_t *xm = &dpdk_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700701 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion25665672016-08-16 18:56:08 +0200702 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
703 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700704 int r, vlan_offload;
Damjan Mariona7cc4472016-09-19 13:31:49 +0200705 u32 prev_subifs = xd->num_subifs;
706 clib_error_t *err = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700707
Damjan Marion25665672016-08-16 18:56:08 +0200708 if (is_add)
Damjan Mariona7cc4472016-09-19 13:31:49 +0200709 xd->num_subifs++;
710 else if (xd->num_subifs)
711 xd->num_subifs--;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700712
Damjan Marion56431702016-09-19 13:18:09 +0200713 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
Damjan Mariona7cc4472016-09-19 13:31:49 +0200714 goto done;
Todd Foggoa (tfoggoa)ad8b4722016-03-28 13:52:32 -0400715
716 /* currently we program VLANS only for IXGBE VF and I40E VF */
Damjan Marion25665672016-08-16 18:56:08 +0200717 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
Damjan Mariona7cc4472016-09-19 13:31:49 +0200718 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700719
720 if (t->sub.eth.flags.no_tags == 1)
Damjan Mariona7cc4472016-09-19 13:31:49 +0200721 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700722
Damjan Marion25665672016-08-16 18:56:08 +0200723 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
724 {
Damjan Mariona7cc4472016-09-19 13:31:49 +0200725 xd->num_subifs = prev_subifs;
726 err = clib_error_return (0, "unsupported VLAN setup");
727 goto done;
Damjan Marion25665672016-08-16 18:56:08 +0200728 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700729
Damjan Marion25665672016-08-16 18:56:08 +0200730 vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700731 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
732
Damjan Marion25665672016-08-16 18:56:08 +0200733 if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload)))
734 {
Damjan Mariona7cc4472016-09-19 13:31:49 +0200735 xd->num_subifs = prev_subifs;
736 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
737 xd->device_index, r);
738 goto done;
Damjan Marion25665672016-08-16 18:56:08 +0200739 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700740
741
Damjan Marion25665672016-08-16 18:56:08 +0200742 if ((r =
743 rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id,
744 is_add)))
745 {
Damjan Mariona7cc4472016-09-19 13:31:49 +0200746 xd->num_subifs = prev_subifs;
747 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
748 xd->device_index, r);
749 goto done;
Damjan Marion25665672016-08-16 18:56:08 +0200750 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700751
Damjan Mariona7cc4472016-09-19 13:31:49 +0200752done:
753 if (xd->num_subifs)
754 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
755 else
756 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
757
758 return err;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700759}
760
Damjan Marion25665672016-08-16 18:56:08 +0200761/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700762VNET_DEVICE_CLASS (dpdk_device_class) = {
763 .name = "dpdk",
764 .tx_function = dpdk_interface_tx,
765 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
766 .tx_function_error_strings = dpdk_tx_func_error_strings,
767 .format_device_name = format_dpdk_device_name,
768 .format_device = format_dpdk_device,
769 .format_tx_trace = format_dpdk_tx_dma_trace,
770 .clear_counters = dpdk_clear_hw_interface_counters,
771 .admin_up_down_function = dpdk_interface_admin_up_down,
772 .subif_add_del_function = dpdk_subif_add_del_function,
773 .rx_redirect_to_node = dpdk_set_interface_next_node,
Pavel Kotucekc631f2d2016-09-26 10:40:02 +0200774 .mac_addr_change_function = dpdk_set_mac_address,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700775};
776
Damjan Marion25665672016-08-16 18:56:08 +0200777VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, dpdk_interface_tx)
778/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +0200779
Ed Warnickecb9cada2015-12-08 15:45:58 -0700780#define UP_DOWN_FLAG_EVENT 1
781
Todd Foggoae3eefff2016-05-20 22:10:34 -0400782uword
Ed Warnickecb9cada2015-12-08 15:45:58 -0700783admin_up_down_process (vlib_main_t * vm,
Damjan Marion25665672016-08-16 18:56:08 +0200784 vlib_node_runtime_t * rt, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700785{
Damjan Marion25665672016-08-16 18:56:08 +0200786 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787 uword event_type;
788 uword *event_data = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700789 u32 sw_if_index;
790 u32 flags;
791
Damjan Marion25665672016-08-16 18:56:08 +0200792 while (1)
793 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700794 vlib_process_wait_for_event (vm);
795
796 event_type = vlib_process_get_events (vm, &event_data);
797
798 dpdk_main.admin_up_down_in_progress = 1;
799
Damjan Marion25665672016-08-16 18:56:08 +0200800 switch (event_type)
801 {
802 case UP_DOWN_FLAG_EVENT:
803 {
804 if (vec_len (event_data) == 2)
805 {
806 sw_if_index = event_data[0];
807 flags = event_data[1];
808 error =
809 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
810 flags);
811 clib_error_report (error);
812 }
813 }
814 break;
815 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700816
817 vec_reset_length (event_data);
818
819 dpdk_main.admin_up_down_in_progress = 0;
820
821 }
Damjan Marion25665672016-08-16 18:56:08 +0200822 return 0; /* or not */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700823}
824
Damjan Marion25665672016-08-16 18:56:08 +0200825/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700826VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
827 .function = admin_up_down_process,
828 .type = VLIB_NODE_TYPE_PROCESS,
829 .name = "admin-up-down-process",
830 .process_log2_n_stack_bytes = 17, // 256KB
831};
Damjan Marion25665672016-08-16 18:56:08 +0200832/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700833
834/*
Damjan Marion25665672016-08-16 18:56:08 +0200835 * fd.io coding-style-patch-verification: ON
836 *
837 * Local Variables:
838 * eval: (c-set-style "gnu")
839 * End:
840 */