blob: cd32389c69d4e96e3c1084ede76c82175af90953 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vnet/vnet.h>
16#include <vppinfra/vec.h>
17#include <vppinfra/format.h>
18#include <vlib/unix/cj.h>
Shesha Sreenivasamurthyb8f45b32016-02-03 09:38:36 -080019#include <assert.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070020
21#include <vnet/ethernet/ethernet.h>
22#include <vnet/devices/dpdk/dpdk.h>
23
24#include "dpdk_priv.h"
25#include <vppinfra/error.h>
26
27#define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(RING_FULL, "Tx packet drops (ring full)") \
30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
31 _(REPL_FAIL, "Tx packet drops (replication failure)")
32
Damjan Marion25665672016-08-16 18:56:08 +020033typedef enum
34{
Ed Warnickecb9cada2015-12-08 15:45:58 -070035#define _(f,s) DPDK_TX_FUNC_ERROR_##f,
36 foreach_dpdk_tx_func_error
37#undef _
Damjan Marion25665672016-08-16 18:56:08 +020038 DPDK_TX_FUNC_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -070039} dpdk_tx_func_error_t;
40
Damjan Marion25665672016-08-16 18:56:08 +020041static char *dpdk_tx_func_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070042#define _(n,s) s,
Damjan Marion25665672016-08-16 18:56:08 +020043 foreach_dpdk_tx_func_error
Ed Warnickecb9cada2015-12-08 15:45:58 -070044#undef _
45};
46
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050047clib_error_t *
Damjan Marion25665672016-08-16 18:56:08 +020048dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address)
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050049{
Damjan Marion25665672016-08-16 18:56:08 +020050 int error;
51 dpdk_main_t *dm = &dpdk_main;
52 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050053
Damjan Marion25665672016-08-16 18:56:08 +020054 error = rte_eth_dev_default_mac_addr_set (xd->device_index,
55 (struct ether_addr *) address);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050056
Damjan Marion25665672016-08-16 18:56:08 +020057 if (error)
58 {
59 return clib_error_return (0, "mac address set failed: %d", error);
60 }
61 else
62 {
Pavel Kotucek50132fa2017-01-23 15:24:49 +010063 vec_reset_length (xd->default_mac_address);
64 vec_add (xd->default_mac_address, address, sizeof (address));
Damjan Marion25665672016-08-16 18:56:08 +020065 return NULL;
66 }
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050067}
68
69clib_error_t *
70dpdk_set_mc_filter (vnet_hw_interface_t * hi,
Damjan Marion25665672016-08-16 18:56:08 +020071 struct ether_addr mc_addr_vec[], int naddr)
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050072{
73 int error;
Damjan Marion25665672016-08-16 18:56:08 +020074 dpdk_main_t *dm = &dpdk_main;
75 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050076
Damjan Marion25665672016-08-16 18:56:08 +020077 error = rte_eth_dev_set_mc_addr_list (xd->device_index, mc_addr_vec, naddr);
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050078
Damjan Marion25665672016-08-16 18:56:08 +020079 if (error)
80 {
81 return clib_error_return (0, "mc addr list failed: %d", error);
82 }
83 else
84 {
85 return NULL;
86 }
Christian Dechamplain2073cfe2016-02-19 12:26:57 -050087}
88
Damjan Marion25665672016-08-16 18:56:08 +020089struct rte_mbuf *
90dpdk_replicate_packet_mb (vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -070091{
Damjan Marion878c6092017-01-04 13:19:27 +010092 dpdk_main_t *dm = &dpdk_main;
Damjan Marion22766b82016-11-15 12:50:28 +010093 struct rte_mbuf **mbufs = 0, *s, *d;
94 u8 nb_segs;
Damjan Marion25665672016-08-16 18:56:08 +020095 unsigned socket_id = rte_socket_id ();
Damjan Marion22766b82016-11-15 12:50:28 +010096 int i;
Ed Warnickecb9cada2015-12-08 15:45:58 -070097
Damjan Marion878c6092017-01-04 13:19:27 +010098 ASSERT (dm->pktmbuf_pools[socket_id]);
Damjan Marion22766b82016-11-15 12:50:28 +010099 s = rte_mbuf_from_vlib_buffer (b);
100 nb_segs = s->nb_segs;
101 vec_validate (mbufs, nb_segs - 1);
102
Damjan Marion878c6092017-01-04 13:19:27 +0100103 if (rte_pktmbuf_alloc_bulk (dm->pktmbuf_pools[socket_id], mbufs, nb_segs))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104 {
Damjan Marion22766b82016-11-15 12:50:28 +0100105 vec_free (mbufs);
106 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107 }
108
Damjan Marion22766b82016-11-15 12:50:28 +0100109 d = mbufs[0];
110 d->nb_segs = s->nb_segs;
111 d->data_len = s->data_len;
112 d->pkt_len = s->pkt_len;
113 d->data_off = s->data_off;
114 clib_memcpy (d->buf_addr, s->buf_addr, RTE_PKTMBUF_HEADROOM + s->data_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700115
Damjan Marion22766b82016-11-15 12:50:28 +0100116 for (i = 1; i < nb_segs; i++)
Keith Burns (alagalah)52fc44d2016-03-25 09:38:50 -0700117 {
Damjan Marion22766b82016-11-15 12:50:28 +0100118 d->next = mbufs[i];
119 d = mbufs[i];
120 s = s->next;
121 d->data_len = s->data_len;
122 clib_memcpy (d->buf_addr, s->buf_addr,
123 RTE_PKTMBUF_HEADROOM + s->data_len);
Keith Burns (alagalah)52fc44d2016-03-25 09:38:50 -0700124 }
125
Damjan Marion22766b82016-11-15 12:50:28 +0100126 d = mbufs[0];
127 vec_free (mbufs);
128 return d;
Keith Burns (alagalah)52fc44d2016-03-25 09:38:50 -0700129}
130
Ed Warnickecb9cada2015-12-08 15:45:58 -0700131static void
132dpdk_tx_trace_buffer (dpdk_main_t * dm,
133 vlib_node_runtime_t * node,
134 dpdk_device_t * xd,
Damjan Marion25665672016-08-16 18:56:08 +0200135 u16 queue_id, u32 buffer_index, vlib_buffer_t * buffer)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Damjan Marion25665672016-08-16 18:56:08 +0200137 vlib_main_t *vm = vlib_get_main ();
138 dpdk_tx_dma_trace_t *t0;
139 struct rte_mbuf *mb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140
Damjan Marion25665672016-08-16 18:56:08 +0200141 mb = rte_mbuf_from_vlib_buffer (buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142
143 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
144 t0->queue_index = queue_id;
145 t0->device_index = xd->device_index;
146 t0->buffer_index = buffer_index;
Damjan Marionf1213b82016-03-13 02:22:06 +0100147 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
Damjan Marion25665672016-08-16 18:56:08 +0200148 clib_memcpy (&t0->buffer, buffer,
149 sizeof (buffer[0]) - sizeof (buffer->pre_data));
Damjan Marionf1213b82016-03-13 02:22:06 +0100150 clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
Damjan Marion25665672016-08-16 18:56:08 +0200151 sizeof (t0->buffer.pre_data));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152}
153
Damjan Marion22766b82016-11-15 12:50:28 +0100154static_always_inline void
155dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
156 int maybe_multiseg)
157{
158 struct rte_mbuf *mb, *first_mb, *last_mb;
159
160 /* buffer is coming from non-dpdk source so we need to init
161 rte_mbuf header */
162 if (PREDICT_FALSE ((b->flags & VNET_BUFFER_RTE_MBUF_VALID) == 0))
163 {
Pierre Pfister58290ea2016-12-02 10:55:13 +0000164 vlib_buffer_t *b2 = b;
165 last_mb = mb = rte_mbuf_from_vlib_buffer (b2);
Damjan Marion22766b82016-11-15 12:50:28 +0100166 rte_pktmbuf_reset (mb);
Pierre Pfister58290ea2016-12-02 10:55:13 +0000167 while (maybe_multiseg && (b2->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marion22766b82016-11-15 12:50:28 +0100168 {
Pierre Pfister58290ea2016-12-02 10:55:13 +0000169 b2 = vlib_get_buffer (vm, b2->next_buffer);
170 mb = rte_mbuf_from_vlib_buffer (b2);
Damjan Marion22766b82016-11-15 12:50:28 +0100171 last_mb->next = mb;
172 last_mb = mb;
173 rte_pktmbuf_reset (mb);
174 }
175 }
176
177 first_mb = mb = rte_mbuf_from_vlib_buffer (b);
178 first_mb->nb_segs = 1;
179 mb->data_len = b->current_length;
180 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
181 b->current_length;
182 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
183
184 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
185 {
186 b = vlib_get_buffer (vm, b->next_buffer);
187 mb = rte_mbuf_from_vlib_buffer (b);
188 mb->data_len = b->current_length;
189 mb->pkt_len = b->current_length;
190 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
191 first_mb->nb_segs++;
192 }
193}
194
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195/*
196 * This function calls the dpdk's tx_burst function to transmit the packets
197 * on the tx_vector. It manages a lock per-device if the device does not
Damjan Marion25665672016-08-16 18:56:08 +0200198 * support multiple queues. It returns the number of packets untransmitted
199 * on the tx_vector. If all packets are transmitted (the normal case), the
Ed Warnickecb9cada2015-12-08 15:45:58 -0700200 * function returns 0.
Damjan Marion25665672016-08-16 18:56:08 +0200201 *
Ed Warnickecb9cada2015-12-08 15:45:58 -0700202 * The function assumes there is at least one packet on the tx_vector.
203 */
204static_always_inline
Damjan Marion25665672016-08-16 18:56:08 +0200205 u32 tx_burst_vector_internal (vlib_main_t * vm,
206 dpdk_device_t * xd,
207 struct rte_mbuf **tx_vector)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208{
Damjan Marion25665672016-08-16 18:56:08 +0200209 dpdk_main_t *dm = &dpdk_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210 u32 n_packets;
211 u32 tx_head;
212 u32 tx_tail;
213 u32 n_retry;
214 int rv;
215 int queue_id;
216 tx_ring_hdr_t *ring;
217
Damjan Marion25665672016-08-16 18:56:08 +0200218 ring = vec_header (tx_vector, sizeof (*ring));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219
220 n_packets = ring->tx_head - ring->tx_tail;
221
Damjan Marionb58598b2016-09-19 13:44:37 +0200222 tx_head = ring->tx_head % xd->nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700223
224 /*
225 * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
226 * unpredictable results.
227 */
Damjan Marion25665672016-08-16 18:56:08 +0200228 ASSERT (n_packets > 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
230 /*
231 * Check for tx_vector overflow. If this fails it is a system configuration
232 * error. The ring should be sized big enough to handle the largest un-flowed
233 * off burst from a traffic manager. A larger size also helps performance
234 * a bit because it decreases the probability of having to issue two tx_burst
235 * calls due to a ring wrap.
236 */
Damjan Marionb58598b2016-09-19 13:44:37 +0200237 ASSERT (n_packets < xd->nb_tx_desc);
Damjan Marion4ee3a652016-11-15 19:47:58 +0100238 ASSERT (ring->tx_tail == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239
Damjan Marion4ee3a652016-11-15 19:47:58 +0100240 n_retry = 16;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241 queue_id = vm->cpu_index;
242
Damjan Marion25665672016-08-16 18:56:08 +0200243 do
244 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700245 /* start the burst at the tail */
Damjan Marionb58598b2016-09-19 13:44:37 +0200246 tx_tail = ring->tx_tail % xd->nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247
Damjan Marion25665672016-08-16 18:56:08 +0200248 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 * This device only supports one TX queue,
250 * and we're running multi-threaded...
251 */
Damjan Marionf530a552016-10-25 18:53:41 +0200252 if (PREDICT_FALSE (xd->lockp != 0))
Damjan Marion25665672016-08-16 18:56:08 +0200253 {
254 queue_id = queue_id % xd->tx_q_used;
255 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
256 /* zzzz */
257 queue_id = (queue_id + 1) % xd->tx_q_used;
258 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259
Damjan Marion4ee3a652016-11-15 19:47:58 +0100260 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
Jasvinder Singh85ecc812016-07-21 17:02:19 +0100261 {
Damjan Marion4ee3a652016-11-15 19:47:58 +0100262 /* no wrap, transmit in one burst */
263 dpdk_device_hqos_per_worker_thread_t *hqos =
264 &xd->hqos_wt[vm->cpu_index];
Jasvinder Singh85ecc812016-07-21 17:02:19 +0100265
Jasvinder Singh199ca9c2016-12-08 13:52:47 +0000266 ASSERT (hqos->swq != NULL);
267
Damjan Marion4ee3a652016-11-15 19:47:58 +0100268 dpdk_hqos_metadata_set (hqos,
269 &tx_vector[tx_tail], tx_head - tx_tail);
270 rv = rte_ring_sp_enqueue_burst (hqos->swq,
271 (void **) &tx_vector[tx_tail],
272 (uint16_t) (tx_head - tx_tail));
Jasvinder Singh85ecc812016-07-21 17:02:19 +0100273 }
274 else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
Damjan Marion25665672016-08-16 18:56:08 +0200275 {
Damjan Marion4ee3a652016-11-15 19:47:58 +0100276 /* no wrap, transmit in one burst */
277 rv = rte_eth_tx_burst (xd->device_index,
278 (uint16_t) queue_id,
279 &tx_vector[tx_tail],
280 (uint16_t) (tx_head - tx_tail));
Damjan Marion25665672016-08-16 18:56:08 +0200281 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282 else
Damjan Marion25665672016-08-16 18:56:08 +0200283 {
284 ASSERT (0);
285 rv = 0;
286 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
Damjan Marionf530a552016-10-25 18:53:41 +0200288 if (PREDICT_FALSE (xd->lockp != 0))
Damjan Marion25665672016-08-16 18:56:08 +0200289 *xd->lockp[queue_id] = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290
Damjan Marion25665672016-08-16 18:56:08 +0200291 if (PREDICT_FALSE (rv < 0))
292 {
293 // emit non-fatal message, bump counter
294 vnet_main_t *vnm = dm->vnet_main;
295 vnet_interface_main_t *im = &vnm->interface_main;
296 u32 node_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297
Damjan Marion25665672016-08-16 18:56:08 +0200298 node_index = vec_elt_at_index (im->hw_interfaces,
299 xd->vlib_hw_if_index)->tx_node_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700300
Damjan Marion25665672016-08-16 18:56:08 +0200301 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
302 clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index,
303 rv);
304 return n_packets; // untransmitted packets
305 }
306 ring->tx_tail += (u16) rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307 n_packets -= (uint16_t) rv;
Damjan Marion25665672016-08-16 18:56:08 +0200308 }
309 while (rv && n_packets && (n_retry > 0));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310
311 return n_packets;
312}
313
Damjan Marion22766b82016-11-15 12:50:28 +0100314static_always_inline void
315dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
316{
317 vlib_buffer_t *b;
318 struct rte_mbuf *mb;
319 b = vlib_get_buffer (vm, bi);
320 mb = rte_mbuf_from_vlib_buffer (b);
321 CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
322 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
323}
324
325static_always_inline void
326dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node,
327 vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp)
328{
329 dpdk_main_t *dm = &dpdk_main;
Dave Barach971158e2016-11-23 08:57:37 -0500330 u32 my_cpu = vm->cpu_index;
Damjan Marion22766b82016-11-15 12:50:28 +0100331 struct rte_mbuf *mb_new;
332
333 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0)
334 return;
335
336 mb_new = dpdk_replicate_packet_mb (b);
337 if (PREDICT_FALSE (mb_new == 0))
338 {
339 vlib_error_count (vm, node->node_index,
340 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
341 b->flags |= VLIB_BUFFER_REPL_FAIL;
342 }
343 else
344 *mbp = mb_new;
345
346 vec_add1 (dm->recycle[my_cpu], bi);
347}
348
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349/*
350 * Transmits the packets on the frame to the interface associated with the
Damjan Marion25665672016-08-16 18:56:08 +0200351 * node. It first copies packets on the frame to a tx_vector containing the
352 * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 * which calls the dpdk tx_burst function.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354 */
355static uword
356dpdk_interface_tx (vlib_main_t * vm,
Damjan Marion25665672016-08-16 18:56:08 +0200357 vlib_node_runtime_t * node, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358{
Damjan Marion25665672016-08-16 18:56:08 +0200359 dpdk_main_t *dm = &dpdk_main;
360 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
361 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362 u32 n_packets = f->n_vectors;
363 u32 n_left;
Damjan Marion25665672016-08-16 18:56:08 +0200364 u32 *from;
365 struct rte_mbuf **tx_vector;
Damjan Marion22766b82016-11-15 12:50:28 +0100366 u16 i;
367 u16 nb_tx_desc = xd->nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368 int queue_id;
369 u32 my_cpu;
370 u32 tx_pkts = 0;
371 tx_ring_hdr_t *ring;
372 u32 n_on_ring;
373
374 my_cpu = vm->cpu_index;
375
376 queue_id = my_cpu;
377
378 tx_vector = xd->tx_vectors[queue_id];
Damjan Marion25665672016-08-16 18:56:08 +0200379 ring = vec_header (tx_vector, sizeof (*ring));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380
381 n_on_ring = ring->tx_head - ring->tx_tail;
382 from = vlib_frame_vector_args (f);
383
Damjan Marion25665672016-08-16 18:56:08 +0200384 ASSERT (n_packets <= VLIB_FRAME_SIZE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385
Damjan Marion22766b82016-11-15 12:50:28 +0100386 if (PREDICT_FALSE (n_on_ring + n_packets > nb_tx_desc))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387 {
388 /*
Damjan Marion25665672016-08-16 18:56:08 +0200389 * Overflowing the ring should never happen.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390 * If it does then drop the whole frame.
391 */
392 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
Damjan Marion25665672016-08-16 18:56:08 +0200393 n_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394
Damjan Marion25665672016-08-16 18:56:08 +0200395 while (n_packets--)
396 {
397 u32 bi0 = from[n_packets];
398 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
399 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer (b0);
400 rte_pktmbuf_free (mb0);
401 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402 return n_on_ring;
403 }
404
Damjan Marion25665672016-08-16 18:56:08 +0200405 if (PREDICT_FALSE (dm->tx_pcap_enable))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406 {
407 n_left = n_packets;
408 while (n_left > 0)
Damjan Marion25665672016-08-16 18:56:08 +0200409 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700410 u32 bi0 = from[0];
Damjan Marion25665672016-08-16 18:56:08 +0200411 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700412 if (dm->pcap_sw_if_index == 0 ||
Damjan Marion25665672016-08-16 18:56:08 +0200413 dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX])
414 pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415 from++;
416 n_left--;
417 }
418 }
419
420 from = vlib_frame_vector_args (f);
421 n_left = n_packets;
Damjan Marion22766b82016-11-15 12:50:28 +0100422 i = ring->tx_head % nb_tx_desc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423
Damjan Marion22766b82016-11-15 12:50:28 +0100424 while (n_left >= 8)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700425 {
Damjan Marion22766b82016-11-15 12:50:28 +0100426 u32 bi0, bi1, bi2, bi3;
427 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
428 vlib_buffer_t *b0, *b1, *b2, *b3;
429 u32 or_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430
Damjan Marion22766b82016-11-15 12:50:28 +0100431 dpdk_prefetch_buffer_by_index (vm, from[4]);
432 dpdk_prefetch_buffer_by_index (vm, from[5]);
433 dpdk_prefetch_buffer_by_index (vm, from[6]);
434 dpdk_prefetch_buffer_by_index (vm, from[7]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435
436 bi0 = from[0];
437 bi1 = from[1];
Damjan Marion22766b82016-11-15 12:50:28 +0100438 bi2 = from[2];
439 bi3 = from[3];
440 from += 4;
Damjan Marion25665672016-08-16 18:56:08 +0200441
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442 b0 = vlib_get_buffer (vm, bi0);
443 b1 = vlib_get_buffer (vm, bi1);
Damjan Marion22766b82016-11-15 12:50:28 +0100444 b2 = vlib_get_buffer (vm, bi2);
445 b3 = vlib_get_buffer (vm, bi3);
446
447 or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
448
449 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
450 {
451 dpdk_validate_rte_mbuf (vm, b0, 1);
452 dpdk_validate_rte_mbuf (vm, b1, 1);
453 dpdk_validate_rte_mbuf (vm, b2, 1);
454 dpdk_validate_rte_mbuf (vm, b3, 1);
455 }
456 else
457 {
458 dpdk_validate_rte_mbuf (vm, b0, 0);
459 dpdk_validate_rte_mbuf (vm, b1, 0);
460 dpdk_validate_rte_mbuf (vm, b2, 0);
461 dpdk_validate_rte_mbuf (vm, b3, 0);
462 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700463
Damjan Marion25665672016-08-16 18:56:08 +0200464 mb0 = rte_mbuf_from_vlib_buffer (b0);
465 mb1 = rte_mbuf_from_vlib_buffer (b1);
Damjan Marion22766b82016-11-15 12:50:28 +0100466 mb2 = rte_mbuf_from_vlib_buffer (b2);
467 mb3 = rte_mbuf_from_vlib_buffer (b3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468
Damjan Marion22766b82016-11-15 12:50:28 +0100469 if (PREDICT_FALSE (or_flags & VLIB_BUFFER_RECYCLE))
Damjan Marion25665672016-08-16 18:56:08 +0200470 {
Damjan Marion22766b82016-11-15 12:50:28 +0100471 dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
472 dpdk_buffer_recycle (vm, node, b1, bi1, &mb1);
473 dpdk_buffer_recycle (vm, node, b2, bi2, &mb2);
474 dpdk_buffer_recycle (vm, node, b3, bi3, &mb3);
475
476 /* dont enqueue packets if replication failed as they must
477 be sent back to recycle */
478 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
479 tx_vector[i++ % nb_tx_desc] = mb0;
480 if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
481 tx_vector[i++ % nb_tx_desc] = mb1;
482 if (PREDICT_TRUE ((b2->flags & VLIB_BUFFER_REPL_FAIL) == 0))
483 tx_vector[i++ % nb_tx_desc] = mb2;
484 if (PREDICT_TRUE ((b3->flags & VLIB_BUFFER_REPL_FAIL) == 0))
485 tx_vector[i++ % nb_tx_desc] = mb3;
486 }
487 else
488 {
489 if (PREDICT_FALSE (i + 3 >= nb_tx_desc))
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700490 {
Damjan Marion22766b82016-11-15 12:50:28 +0100491 tx_vector[i++ % nb_tx_desc] = mb0;
492 tx_vector[i++ % nb_tx_desc] = mb1;
493 tx_vector[i++ % nb_tx_desc] = mb2;
494 tx_vector[i++ % nb_tx_desc] = mb3;
495 i %= nb_tx_desc;
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700496 }
Damjan Marion22766b82016-11-15 12:50:28 +0100497 else
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700498 {
Damjan Marion22766b82016-11-15 12:50:28 +0100499 tx_vector[i++] = mb0;
500 tx_vector[i++] = mb1;
501 tx_vector[i++] = mb2;
502 tx_vector[i++] = mb3;
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700503 }
504 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505
Ed Warnickecb9cada2015-12-08 15:45:58 -0700506
Damjan Marion25665672016-08-16 18:56:08 +0200507 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700508 {
Damjan Marion25665672016-08-16 18:56:08 +0200509 if (b0->flags & VLIB_BUFFER_IS_TRACED)
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700510 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
Damjan Marion25665672016-08-16 18:56:08 +0200511 if (b1->flags & VLIB_BUFFER_IS_TRACED)
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700512 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
Damjan Marion22766b82016-11-15 12:50:28 +0100513 if (b2->flags & VLIB_BUFFER_IS_TRACED)
514 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi2, b2);
515 if (b3->flags & VLIB_BUFFER_IS_TRACED)
516 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi3, b3);
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700517 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700518
Damjan Marion22766b82016-11-15 12:50:28 +0100519 n_left -= 4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700520 }
521 while (n_left > 0)
522 {
523 u32 bi0;
Damjan Marion25665672016-08-16 18:56:08 +0200524 struct rte_mbuf *mb0;
525 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700526
527 bi0 = from[0];
528 from++;
Damjan Marion25665672016-08-16 18:56:08 +0200529
Ed Warnickecb9cada2015-12-08 15:45:58 -0700530 b0 = vlib_get_buffer (vm, bi0);
531
Damjan Marion22766b82016-11-15 12:50:28 +0100532 dpdk_validate_rte_mbuf (vm, b0, 1);
533
Damjan Marion25665672016-08-16 18:56:08 +0200534 mb0 = rte_mbuf_from_vlib_buffer (b0);
Damjan Marion22766b82016-11-15 12:50:28 +0100535 dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536
Damjan Marion25665672016-08-16 18:56:08 +0200537 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
Keith Burns (alagalah)c02f02d2016-04-19 12:54:12 -0700538 if (b0->flags & VLIB_BUFFER_IS_TRACED)
539 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700540
Damjan Marion25665672016-08-16 18:56:08 +0200541 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
542 {
Damjan Marion22766b82016-11-15 12:50:28 +0100543 tx_vector[i % nb_tx_desc] = mb0;
Damjan Marion25665672016-08-16 18:56:08 +0200544 i++;
545 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700546 n_left--;
547 }
548
549 /* account for additional packets in the ring */
550 ring->tx_head += n_packets;
551 n_on_ring = ring->tx_head - ring->tx_tail;
552
553 /* transmit as many packets as possible */
554 n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
555
556 /*
557 * tx_pkts is the number of packets successfully transmitted
558 * This is the number originally on ring minus the number remaining on ring
559 */
Damjan Marion25665672016-08-16 18:56:08 +0200560 tx_pkts = n_on_ring - n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700561
Damjan Marion4ee3a652016-11-15 19:47:58 +0100562 {
563 /* If there is no callback then drop any non-transmitted packets */
564 if (PREDICT_FALSE (n_packets))
565 {
566 vlib_simple_counter_main_t *cm;
567 vnet_main_t *vnm = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700568
Damjan Marion4ee3a652016-11-15 19:47:58 +0100569 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
570 VNET_INTERFACE_COUNTER_TX_ERROR);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700571
Damjan Marion4ee3a652016-11-15 19:47:58 +0100572 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
573 n_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700574
Damjan Marion4ee3a652016-11-15 19:47:58 +0100575 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
576 n_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700577
Damjan Marion4ee3a652016-11-15 19:47:58 +0100578 while (n_packets--)
579 rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
580 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700581
Damjan Marion4ee3a652016-11-15 19:47:58 +0100582 /* Reset head/tail to avoid unnecessary wrap */
583 ring->tx_head = 0;
584 ring->tx_tail = 0;
585 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700586
587 /* Recycle replicated buffers */
Damjan Marion25665672016-08-16 18:56:08 +0200588 if (PREDICT_FALSE (vec_len (dm->recycle[my_cpu])))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700589 {
Damjan Marion25665672016-08-16 18:56:08 +0200590 vlib_buffer_free (vm, dm->recycle[my_cpu],
591 vec_len (dm->recycle[my_cpu]));
592 _vec_len (dm->recycle[my_cpu]) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700593 }
594
Damjan Marion25665672016-08-16 18:56:08 +0200595 ASSERT (ring->tx_head >= ring->tx_tail);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700596
597 return tx_pkts;
598}
599
Damjan Marion25665672016-08-16 18:56:08 +0200600static void
601dpdk_clear_hw_interface_counters (u32 instance)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700602{
Damjan Marion25665672016-08-16 18:56:08 +0200603 dpdk_main_t *dm = &dpdk_main;
604 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700605
606 /*
Damjan Marionb28e4982016-08-22 22:34:38 +0200607 * Set the "last_cleared_stats" to the current stats, so that
608 * things appear to clear from a display perspective.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700609 */
Damjan Marionb28e4982016-08-22 22:34:38 +0200610 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
Sean Hopea4f16a02016-03-28 13:11:31 -0400611
Damjan Marionb28e4982016-08-22 22:34:38 +0200612 clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats));
613 clib_memcpy (xd->last_cleared_xstats, xd->xstats,
614 vec_len (xd->last_cleared_xstats) *
615 sizeof (xd->last_cleared_xstats[0]));
Shesha Sreenivasamurthy94550842016-03-02 10:33:26 -0800616
Ed Warnickecb9cada2015-12-08 15:45:58 -0700617}
618
Ed Warnickecb9cada2015-12-08 15:45:58 -0700619static clib_error_t *
620dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
621{
Damjan Marion25665672016-08-16 18:56:08 +0200622 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700623 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion25665672016-08-16 18:56:08 +0200624 dpdk_main_t *dm = &dpdk_main;
625 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700626 int rv = 0;
627
Ed Warnickecb9cada2015-12-08 15:45:58 -0700628 if (is_up)
629 {
630 f64 now = vlib_time_now (dm->vlib_main);
631
Damjan Marionb28e4982016-08-22 22:34:38 +0200632 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
Pavel Kotucek50132fa2017-01-23 15:24:49 +0100633 {
634 rv = rte_eth_dev_start (xd->device_index);
635 if (!rv && xd->default_mac_address)
636 rv = rte_eth_dev_default_mac_addr_set (xd->device_index,
637 (struct ether_addr *)
638 xd->default_mac_address);
639 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700640
Damjan Marionb28e4982016-08-22 22:34:38 +0200641 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
Damjan Marion25665672016-08-16 18:56:08 +0200642 rte_eth_promiscuous_enable (xd->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700643 else
Damjan Marion25665672016-08-16 18:56:08 +0200644 rte_eth_promiscuous_disable (xd->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700645
646 rte_eth_allmulticast_enable (xd->device_index);
Damjan Marionb28e4982016-08-22 22:34:38 +0200647 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700648 dpdk_update_counters (xd, now);
649 dpdk_update_link_state (xd, now);
650 }
651 else
652 {
Damjan Marionb28e4982016-08-22 22:34:38 +0200653 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
Georgi Savov3a035982016-02-25 12:56:03 -0500654
Ed Warnickecb9cada2015-12-08 15:45:58 -0700655 rte_eth_allmulticast_disable (xd->device_index);
656 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
Damjan Marionb28e4982016-08-22 22:34:38 +0200657 rte_eth_dev_stop (xd->device_index);
John Lo3b2944d2016-07-18 18:06:14 -0400658
659 /* For bonded interface, stop slave links */
Damjan Marion25665672016-08-16 18:56:08 +0200660 if (xd->pmd == VNET_DPDK_PMD_BOND)
661 {
662 u8 slink[16];
663 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
664 while (nlink >= 1)
John Lo3b2944d2016-07-18 18:06:14 -0400665 {
666 u8 dpdk_port = slink[--nlink];
667 rte_eth_dev_stop (dpdk_port);
668 }
Damjan Marion25665672016-08-16 18:56:08 +0200669 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700670 }
671
672 if (rv < 0)
Damjan Marion25665672016-08-16 18:56:08 +0200673 clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop", rv);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700674
675 return /* no error */ 0;
676}
677
678/*
679 * Dynamically redirect all pkts from a specific interface
680 * to the specified node
681 */
Damjan Marion25665672016-08-16 18:56:08 +0200682static void
683dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
684 u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700685{
Damjan Marion25665672016-08-16 18:56:08 +0200686 dpdk_main_t *xm = &dpdk_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700687 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion25665672016-08-16 18:56:08 +0200688 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
689
Ed Warnickecb9cada2015-12-08 15:45:58 -0700690 /* Shut off redirection */
691 if (node_index == ~0)
692 {
693 xd->per_interface_next_index = node_index;
694 return;
695 }
Damjan Marion25665672016-08-16 18:56:08 +0200696
697 xd->per_interface_next_index =
Ed Warnickecb9cada2015-12-08 15:45:58 -0700698 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
699}
700
701
702static clib_error_t *
703dpdk_subif_add_del_function (vnet_main_t * vnm,
Damjan Marion25665672016-08-16 18:56:08 +0200704 u32 hw_if_index,
705 struct vnet_sw_interface_t *st, int is_add)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700706{
Damjan Marion25665672016-08-16 18:56:08 +0200707 dpdk_main_t *xm = &dpdk_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion25665672016-08-16 18:56:08 +0200709 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
710 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700711 int r, vlan_offload;
Damjan Mariona7cc4472016-09-19 13:31:49 +0200712 u32 prev_subifs = xd->num_subifs;
713 clib_error_t *err = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700714
Damjan Marion25665672016-08-16 18:56:08 +0200715 if (is_add)
Damjan Mariona7cc4472016-09-19 13:31:49 +0200716 xd->num_subifs++;
717 else if (xd->num_subifs)
718 xd->num_subifs--;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700719
Damjan Marion56431702016-09-19 13:18:09 +0200720 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
Damjan Mariona7cc4472016-09-19 13:31:49 +0200721 goto done;
Todd Foggoa (tfoggoa)ad8b4722016-03-28 13:52:32 -0400722
723 /* currently we program VLANS only for IXGBE VF and I40E VF */
Damjan Marion25665672016-08-16 18:56:08 +0200724 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
Damjan Mariona7cc4472016-09-19 13:31:49 +0200725 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700726
727 if (t->sub.eth.flags.no_tags == 1)
Damjan Mariona7cc4472016-09-19 13:31:49 +0200728 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700729
Damjan Marion25665672016-08-16 18:56:08 +0200730 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
731 {
Damjan Mariona7cc4472016-09-19 13:31:49 +0200732 xd->num_subifs = prev_subifs;
733 err = clib_error_return (0, "unsupported VLAN setup");
734 goto done;
Damjan Marion25665672016-08-16 18:56:08 +0200735 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700736
Damjan Marion25665672016-08-16 18:56:08 +0200737 vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700738 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
739
Damjan Marion25665672016-08-16 18:56:08 +0200740 if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload)))
741 {
Damjan Mariona7cc4472016-09-19 13:31:49 +0200742 xd->num_subifs = prev_subifs;
743 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
744 xd->device_index, r);
745 goto done;
Damjan Marion25665672016-08-16 18:56:08 +0200746 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700747
748
Damjan Marion25665672016-08-16 18:56:08 +0200749 if ((r =
750 rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id,
751 is_add)))
752 {
Damjan Mariona7cc4472016-09-19 13:31:49 +0200753 xd->num_subifs = prev_subifs;
754 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
755 xd->device_index, r);
756 goto done;
Damjan Marion25665672016-08-16 18:56:08 +0200757 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700758
Damjan Mariona7cc4472016-09-19 13:31:49 +0200759done:
760 if (xd->num_subifs)
761 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
762 else
763 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
764
765 return err;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700766}
767
Damjan Marion25665672016-08-16 18:56:08 +0200768/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700769VNET_DEVICE_CLASS (dpdk_device_class) = {
770 .name = "dpdk",
771 .tx_function = dpdk_interface_tx,
772 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
773 .tx_function_error_strings = dpdk_tx_func_error_strings,
774 .format_device_name = format_dpdk_device_name,
775 .format_device = format_dpdk_device,
776 .format_tx_trace = format_dpdk_tx_dma_trace,
777 .clear_counters = dpdk_clear_hw_interface_counters,
778 .admin_up_down_function = dpdk_interface_admin_up_down,
779 .subif_add_del_function = dpdk_subif_add_del_function,
780 .rx_redirect_to_node = dpdk_set_interface_next_node,
Pavel Kotucekc631f2d2016-09-26 10:40:02 +0200781 .mac_addr_change_function = dpdk_set_mac_address,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700782};
783
Damjan Marion25665672016-08-16 18:56:08 +0200784VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, dpdk_interface_tx)
785/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +0200786
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787#define UP_DOWN_FLAG_EVENT 1
788
Todd Foggoae3eefff2016-05-20 22:10:34 -0400789uword
Ed Warnickecb9cada2015-12-08 15:45:58 -0700790admin_up_down_process (vlib_main_t * vm,
Damjan Marion25665672016-08-16 18:56:08 +0200791 vlib_node_runtime_t * rt, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700792{
Damjan Marion25665672016-08-16 18:56:08 +0200793 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700794 uword event_type;
795 uword *event_data = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796 u32 sw_if_index;
797 u32 flags;
798
Damjan Marion25665672016-08-16 18:56:08 +0200799 while (1)
800 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700801 vlib_process_wait_for_event (vm);
802
803 event_type = vlib_process_get_events (vm, &event_data);
804
805 dpdk_main.admin_up_down_in_progress = 1;
806
Damjan Marion25665672016-08-16 18:56:08 +0200807 switch (event_type)
808 {
809 case UP_DOWN_FLAG_EVENT:
810 {
811 if (vec_len (event_data) == 2)
812 {
813 sw_if_index = event_data[0];
814 flags = event_data[1];
815 error =
816 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
817 flags);
818 clib_error_report (error);
819 }
820 }
821 break;
822 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700823
824 vec_reset_length (event_data);
825
826 dpdk_main.admin_up_down_in_progress = 0;
827
828 }
Damjan Marion25665672016-08-16 18:56:08 +0200829 return 0; /* or not */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700830}
831
Damjan Marion25665672016-08-16 18:56:08 +0200832/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700833VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
834 .function = admin_up_down_process,
835 .type = VLIB_NODE_TYPE_PROCESS,
836 .name = "admin-up-down-process",
837 .process_log2_n_stack_bytes = 17, // 256KB
838};
Damjan Marion25665672016-08-16 18:56:08 +0200839/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700840
841/*
Damjan Marion25665672016-08-16 18:56:08 +0200842 * fd.io coding-style-patch-verification: ON
843 *
844 * Local Variables:
845 * eval: (c-set-style "gnu")
846 * End:
847 */