blob: fac7e9acfeb03dd924d2ab5e37644017f3f39be9 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vnet/vnet.h>
16#include <vppinfra/vec.h>
17#include <vppinfra/error.h>
18#include <vppinfra/format.h>
John Lobcebbb92016-04-05 15:47:43 -040019#include <vppinfra/bitmap.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070020
21#include <vnet/ethernet/ethernet.h>
22#include <vnet/devices/dpdk/dpdk.h>
23#include <vlib/unix/physmem.h>
Damjan Mariona42cd342016-04-13 18:03:20 +020024#include <vlib/pci/pci.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
26#include <stdio.h>
27#include <stdlib.h>
28#include <unistd.h>
29#include <sys/stat.h>
30#include <sys/mount.h>
31#include <string.h>
32#include <fcntl.h>
33
34#include "dpdk_priv.h"
35
36dpdk_main_t dpdk_main;
37
38/* force linker to link functions used by vlib and declared weak */
39void *vlib_weakly_linked_functions[] = {
40 &rte_pktmbuf_init,
41 &rte_pktmbuf_pool_init,
42};
43
44#define LINK_STATE_ELOGS 0
45
46#define DEFAULT_HUGE_DIR "/run/vpp/hugepages"
47#define VPP_RUN_DIR "/run/vpp"
48
49/* Port configuration, mildly modified Intel app values */
50
51static struct rte_eth_conf port_conf_template = {
52 .rxmode = {
53 .split_hdr_size = 0,
54 .header_split = 0, /**< Header Split disabled */
55 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
56 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
57 .hw_strip_crc = 1, /**< CRC stripped by hardware */
58 },
59 .txmode = {
60 .mq_mode = ETH_MQ_TX_NONE,
61 },
62};
63
64clib_error_t *
65dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd)
66{
67 vlib_main_t * vm = vlib_get_main();
68 vlib_buffer_main_t * bm = vm->buffer_main;
69 int rv;
70 int j;
71
72 ASSERT(os_get_cpu_number() == 0);
73
74 if (xd->admin_up) {
75 vnet_hw_interface_set_flags (dm->vnet_main, xd->vlib_hw_if_index, 0);
76 rte_eth_dev_stop (xd->device_index);
77 }
78
79 rv = rte_eth_dev_configure (xd->device_index, xd->rx_q_used,
80 xd->tx_q_used, &xd->port_conf);
81
82 if (rv < 0)
83 return clib_error_return (0, "rte_eth_dev_configure[%d]: err %d",
84 xd->device_index, rv);
85
86 /* Set up one TX-queue per worker thread */
87 for (j = 0; j < xd->tx_q_used; j++)
88 {
89 rv = rte_eth_tx_queue_setup(xd->device_index, j, xd->nb_tx_desc,
90 xd->cpu_socket, &xd->tx_conf);
Damjan Marionae605262016-04-21 21:42:40 +020091
92 /* retry with any other CPU socket */
93 if (rv < 0)
94 rv = rte_eth_tx_queue_setup(xd->device_index, j, xd->nb_tx_desc,
95 SOCKET_ID_ANY, &xd->tx_conf);
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 if (rv < 0)
97 break;
98 }
99
100 if (rv < 0)
101 return clib_error_return (0, "rte_eth_tx_queue_setup[%d]: err %d",
102 xd->device_index, rv);
103
104 for (j = 0; j < xd->rx_q_used; j++)
105 {
106
107 rv = rte_eth_rx_queue_setup(xd->device_index, j, xd->nb_rx_desc,
108 xd->cpu_socket, 0,
109 bm->pktmbuf_pools[xd->cpu_socket_id_by_queue[j]]);
Damjan Marionae605262016-04-21 21:42:40 +0200110
111 /* retry with any other CPU socket */
112 if (rv < 0)
113 rv = rte_eth_rx_queue_setup(xd->device_index, j, xd->nb_rx_desc,
114 SOCKET_ID_ANY, 0,
115 bm->pktmbuf_pools[xd->cpu_socket_id_by_queue[j]]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116 if (rv < 0)
117 return clib_error_return (0, "rte_eth_rx_queue_setup[%d]: err %d",
118 xd->device_index, rv);
119 }
120
121 if (xd->admin_up) {
122 rte_eth_dev_start (xd->device_index);
123 }
124 return 0;
125}
126
127static u32 dpdk_flag_change (vnet_main_t * vnm,
128 vnet_hw_interface_t * hi,
129 u32 flags)
130{
131 dpdk_main_t * dm = &dpdk_main;
132 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
133 u32 old = 0;
134
135 if (ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC(flags))
136 {
137 old = xd->promisc;
138 xd->promisc = flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL;
139
140 if (xd->admin_up)
141 {
142 if (xd->promisc)
143 rte_eth_promiscuous_enable(xd->device_index);
144 else
145 rte_eth_promiscuous_disable(xd->device_index);
146 }
147 }
148 else if (ETHERNET_INTERFACE_FLAG_CONFIG_MTU(flags))
149 {
150 /*
151 * DAW-FIXME: The Cisco VIC firmware does not provide an api for a
152 * driver to dynamically change the mtu. If/when the
153 * VIC firmware gets fixed, then this should be removed.
154 */
155 if (xd->pmd == VNET_DPDK_PMD_VICE ||
156 xd->pmd == VNET_DPDK_PMD_ENIC)
157 {
158 struct rte_eth_dev_info dev_info;
159
160 /*
161 * Restore mtu to what has been set by CIMC in the firmware cfg.
162 */
163 rte_eth_dev_info_get(xd->device_index, &dev_info);
164 hi->max_packet_bytes = dev_info.max_rx_pktlen;
165
166 vlib_cli_output (vlib_get_main(),
167 "Cisco VIC mtu can only be changed "
168 "using CIMC then rebooting the server!");
169 }
170 else
171 {
172 int rv;
173
Ed Warnickecb9cada2015-12-08 15:45:58 -0700174 xd->port_conf.rxmode.max_rx_pkt_len = hi->max_packet_bytes;
175
176 if (xd->admin_up)
177 rte_eth_dev_stop (xd->device_index);
178
179 rv = rte_eth_dev_configure
180 (xd->device_index,
181 xd->rx_q_used,
182 xd->tx_q_used,
183 &xd->port_conf);
184
185 if (rv < 0)
186 vlib_cli_output (vlib_get_main(),
187 "rte_eth_dev_configure[%d]: err %d",
188 xd->device_index, rv);
189
190 rte_eth_dev_set_mtu(xd->device_index, hi->max_packet_bytes);
191
192 if (xd->admin_up)
193 rte_eth_dev_start (xd->device_index);
194 }
195 }
196 return old;
197}
198
199#ifdef NETMAP
200extern int rte_netmap_probe(void);
201#endif
202
Damjan Marion85cdbd02016-02-12 18:00:23 +0100203void
204dpdk_device_lock_init(dpdk_device_t * xd)
205{
206 int q;
207 vec_validate(xd->lockp, xd->tx_q_used - 1);
208 for (q = 0; q < xd->tx_q_used; q++)
209 {
210 xd->lockp[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
211 CLIB_CACHE_LINE_BYTES);
212 memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES);
213 }
Shesha Sreenivasamurthy9ad5adc2016-02-19 13:36:53 -0800214 xd->need_txlock = 1;
Damjan Marion85cdbd02016-02-12 18:00:23 +0100215}
216
217void
218dpdk_device_lock_free(dpdk_device_t * xd)
219{
220 int q;
221
222 for (q = 0; q < vec_len(xd->lockp); q++)
223 clib_mem_free((void *) xd->lockp[q]);
224 vec_free(xd->lockp);
225 xd->lockp = 0;
Shesha Sreenivasamurthy9ad5adc2016-02-19 13:36:53 -0800226 xd->need_txlock = 0;
Damjan Marion85cdbd02016-02-12 18:00:23 +0100227}
228
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229static clib_error_t *
230dpdk_lib_init (dpdk_main_t * dm)
231{
232 u32 nports;
233 u32 nb_desc = 0;
234 int i;
235 clib_error_t * error;
236 vlib_main_t * vm = vlib_get_main();
237 vlib_thread_main_t * tm = vlib_get_thread_main();
238 vnet_sw_interface_t * sw;
239 vnet_hw_interface_t * hi;
240 dpdk_device_t * xd;
241 vlib_thread_registration_t * tr;
242 uword * p;
243
244 u32 next_cpu = 0;
245 u8 af_packet_port_id = 0;
246
247 dm->input_cpu_first_index = 0;
248 dm->input_cpu_count = 1;
249
250 /* find out which cpus will be used for input */
251 p = hash_get_mem (tm->thread_registrations_by_name, "io");
252 tr = p ? (vlib_thread_registration_t *) p[0] : 0;
253
254 if (!tr || tr->count == 0)
255 {
256 /* no io threads, workers doing input */
257 p = hash_get_mem (tm->thread_registrations_by_name, "workers");
258 tr = p ? (vlib_thread_registration_t *) p[0] : 0;
259 }
260 else
261 {
262 dm->have_io_threads = 1;
263 }
264
265 if (tr && tr->count > 0)
266 {
267 dm->input_cpu_first_index = tr->first_index;
268 dm->input_cpu_count = tr->count;
269 }
270
271 vec_validate_aligned (dm->devices_by_cpu, tm->n_vlib_mains - 1,
272 CLIB_CACHE_LINE_BYTES);
273
274 vec_validate_aligned (dm->workers, tm->n_vlib_mains - 1,
275 CLIB_CACHE_LINE_BYTES);
276
277#ifdef NETMAP
278 if(rte_netmap_probe() < 0)
279 return clib_error_return (0, "rte netmap probe failed");
280#endif
281
282 nports = rte_eth_dev_count();
283 if (nports < 1)
284 {
285 clib_warning ("DPDK drivers found no ports...");
286 }
287
288 if (CLIB_DEBUG > 0)
289 clib_warning ("DPDK drivers found %d ports...", nports);
290
291 /*
292 * All buffers are all allocated from the same rte_mempool.
293 * Thus they all have the same number of data bytes.
294 */
295 dm->vlib_buffer_free_list_index =
296 vlib_buffer_get_or_create_free_list (
297 vm, VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES, "dpdk rx");
298
Damjan Marion64ae6692016-05-25 18:40:13 +0200299 if (dm->conf->enable_tcp_udp_checksum)
300 dm->buffer_flags_template &= ~(IP_BUFFER_L4_CHECKSUM_CORRECT
301 | IP_BUFFER_L4_CHECKSUM_COMPUTED);
302
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303 for (i = 0; i < nports; i++)
304 {
305 u8 addr[6];
306 int j;
307 struct rte_eth_dev_info dev_info;
308 clib_error_t * rv;
309 struct rte_eth_link l;
310
311 /* Create vnet interface */
312 vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
313 xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT;
314 xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT;
315 xd->cpu_socket = (i8) rte_eth_dev_socket_id(i);
316 rte_eth_dev_info_get(i, &dev_info);
317
Damjan Marionf1213b82016-03-13 02:22:06 +0100318 clib_memcpy(&xd->tx_conf, &dev_info.default_txconf,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319 sizeof(struct rte_eth_txconf));
Damjan Marion64ae6692016-05-25 18:40:13 +0200320 if (dm->conf->no_multi_seg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 {
322 xd->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
323 port_conf_template.rxmode.jumbo_frame = 0;
324 }
325 else
326 {
327 xd->tx_conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
328 port_conf_template.rxmode.jumbo_frame = 1;
329 }
330
Damjan Marionf1213b82016-03-13 02:22:06 +0100331 clib_memcpy(&xd->port_conf, &port_conf_template, sizeof(struct rte_eth_conf));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332
Damjan Marion85cdbd02016-02-12 18:00:23 +0100333 xd->tx_q_used = clib_min(dev_info.max_tx_queues, tm->n_vlib_mains);
334
Damjan Marion64ae6692016-05-25 18:40:13 +0200335 if (dm->conf->max_tx_queues)
336 xd->tx_q_used = clib_min(xd->tx_q_used, dm->conf->max_tx_queues);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700337
Damjan Marion64ae6692016-05-25 18:40:13 +0200338 if (dm->conf->use_rss > 1 && dev_info.max_rx_queues >= dm->conf->use_rss)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339 {
Damjan Marion64ae6692016-05-25 18:40:13 +0200340 xd->rx_q_used = dm->conf->use_rss;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700341 xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
342 xd->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP;
343 }
344 else
345 xd->rx_q_used = 1;
346
347 xd->dev_type = VNET_DPDK_DEV_ETH;
Damjan Marion2068e982016-01-27 16:59:04 +0100348
349 /* workaround for drivers not setting driver_name */
350 if (!dev_info.driver_name)
351 dev_info.driver_name = dev_info.pci_dev->driver->name;
352 ASSERT(dev_info.driver_name);
353
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354 if (!xd->pmd) {
355
356
357#define _(s,f) else if (!strcmp(dev_info.driver_name, s)) \
358 xd->pmd = VNET_DPDK_PMD_##f;
359 if (0)
360 ;
361 foreach_dpdk_pmd
362#undef _
363 else
364 xd->pmd = VNET_DPDK_PMD_UNKNOWN;
365
366
367 switch (xd->pmd) {
368 /* 1G adapters */
369 case VNET_DPDK_PMD_E1000EM:
370 case VNET_DPDK_PMD_IGB:
371 case VNET_DPDK_PMD_IGBVF:
372 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
373 break;
374
375 /* 10G adapters */
376 case VNET_DPDK_PMD_IXGBE:
377 case VNET_DPDK_PMD_IXGBEVF:
Dave Barach61efa142016-01-22 08:23:09 -0500378 case VNET_DPDK_PMD_THUNDERX:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
380 xd->nb_rx_desc = DPDK_NB_RX_DESC_10GE;
381 xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE;
382 break;
383
384 /* Cisco VIC */
385 case VNET_DPDK_PMD_VICE:
386 case VNET_DPDK_PMD_ENIC:
John Lo1c32a892016-02-25 10:17:30 -0500387 rte_eth_link_get_nowait(i, &l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388 if (l.link_speed == 40000)
389 {
390 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
391 xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE;
392 xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE;
393 }
394 else
395 {
396 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
397 xd->nb_rx_desc = DPDK_NB_RX_DESC_10GE;
398 xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE;
399 }
400 break;
401
402 /* Intel Fortville */
403 case VNET_DPDK_PMD_I40E:
404 case VNET_DPDK_PMD_I40EVF:
405 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
406 xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE;
407 xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE;
408
409 switch (dev_info.pci_dev->id.device_id) {
410 case I40E_DEV_ID_10G_BASE_T:
411 case I40E_DEV_ID_SFP_XL710:
412 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
413 break;
414 case I40E_DEV_ID_QSFP_A:
415 case I40E_DEV_ID_QSFP_B:
416 case I40E_DEV_ID_QSFP_C:
417 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
418 break;
419 case I40E_DEV_ID_VF:
John Lo1c32a892016-02-25 10:17:30 -0500420 rte_eth_link_get_nowait(i, &l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421 xd->port_type = l.link_speed == 10000 ?
422 VNET_DPDK_PORT_TYPE_ETH_10G : VNET_DPDK_PORT_TYPE_ETH_40G;
423 break;
424 default:
425 xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
426 }
427 break;
428
Damjan Marion2068e982016-01-27 16:59:04 +0100429 case VNET_DPDK_PMD_CXGBE:
430 switch (dev_info.pci_dev->id.device_id) {
431 case 0x5410: /* T580-LP-cr */
432 xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE;
433 xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE;
434 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G;
435 break;
436 default:
437 xd->nb_rx_desc = DPDK_NB_RX_DESC_10GE;
438 xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE;
439 xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
440 }
441 break;
442
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443 /* Intel Red Rock Canyon */
444 case VNET_DPDK_PMD_FM10K:
445 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH;
446 xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE;
447 xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE;
448 break;
449
450 /* virtio */
451 case VNET_DPDK_PMD_VIRTIO:
452 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
453 xd->nb_rx_desc = DPDK_NB_RX_DESC_VIRTIO;
454 xd->nb_tx_desc = DPDK_NB_TX_DESC_VIRTIO;
455 break;
456
457 /* vmxnet3 */
458 case VNET_DPDK_PMD_VMXNET3:
459 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
460 xd->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
461 break;
462
463 case VNET_DPDK_PMD_AF_PACKET:
464 xd->port_type = VNET_DPDK_PORT_TYPE_AF_PACKET;
465 xd->af_packet_port_id = af_packet_port_id++;
466 break;
467
John Lod9bf9ab2016-02-25 11:17:55 -0500468 case VNET_DPDK_PMD_BOND:
469 xd->port_type = VNET_DPDK_PORT_TYPE_ETH_BOND;
470 break;
471
Ed Warnickecb9cada2015-12-08 15:45:58 -0700472 default:
473 xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
474 }
475
476 #ifdef NETMAP
477 if(strncmp(dev_info.driver_name, "vale", 4) == 0
478 || strncmp(dev_info.driver_name, "netmap", 6) == 0)
479 {
480 xd->pmd = VNET_DPDK_PMD_NETMAP;
481 xd->port_type = VNET_DPDK_PORT_TYPE_NETMAP;
482 }
483 #endif
484
485 }
486
487 /*
488 * Ensure default mtu is not > the mtu read from the hardware.
489 * Otherwise rte_eth_dev_configure() will fail and the port will
490 * not be available.
491 */
Jurek Matuszewskia69318b2016-03-28 15:15:15 -0400492 if (ETHERNET_MAX_PACKET_BYTES > dev_info.max_rx_pktlen)
493 {
494 /*
495 * This device does not support the platforms's max frame
496 * size. Use it's advertised mru instead.
497 */
498 xd->port_conf.rxmode.max_rx_pkt_len = dev_info.max_rx_pktlen;
499 }
500 else
501 {
502 xd->port_conf.rxmode.max_rx_pkt_len = ETHERNET_MAX_PACKET_BYTES;
503
504 /*
505 * Some platforms do not account for Ethernet FCS (4 bytes) in
506 * MTU calculations. To interop with them increase mru but only
507 * if the device's settings can support it.
508 */
509 if ((dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES + 4)) &&
510 xd->port_conf.rxmode.hw_strip_crc)
511 {
512 /*
513 * Allow additional 4 bytes (for Ethernet FCS). These bytes are
514 * stripped by h/w and so will not consume any buffer memory.
515 */
516 xd->port_conf.rxmode.max_rx_pkt_len += 4;
517 }
518 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700519
John Lo350dc1c2016-04-25 12:32:11 -0400520#if RTE_VERSION < RTE_VERSION_NUM(16, 4, 0, 0)
521 /*
522 * Older VMXNET3 driver doesn't support jumbo / multi-buffer pkts
523 */
524 if (xd->pmd == VNET_DPDK_PMD_VMXNET3)
525 {
526 xd->port_conf.rxmode.max_rx_pkt_len = 1518;
527 xd->port_conf.rxmode.jumbo_frame = 0;
528 }
529#endif
530
Ed Warnickecb9cada2015-12-08 15:45:58 -0700531 if (xd->pmd == VNET_DPDK_PMD_AF_PACKET)
532 {
533 f64 now = vlib_time_now(vm);
534 u32 rnd;
535 rnd = (u32) (now * 1e6);
536 rnd = random_u32 (&rnd);
Damjan Marionf1213b82016-03-13 02:22:06 +0100537 clib_memcpy (addr+2, &rnd, sizeof(rnd));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700538 addr[0] = 2;
539 addr[1] = 0xfe;
540 }
541 else
542 rte_eth_macaddr_get(i,(struct ether_addr *)addr);
543
544 if (xd->tx_q_used < tm->n_vlib_mains)
Damjan Marion85cdbd02016-02-12 18:00:23 +0100545 dpdk_device_lock_init(xd);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700546
547 xd->device_index = xd - dm->devices;
548 ASSERT(i == xd->device_index);
549 xd->per_interface_next_index = ~0;
550
551 /* assign interface to input thread */
552 dpdk_device_and_queue_t * dq;
553 int q;
554
555 for (q = 0; q < xd->rx_q_used; q++)
556 {
557 int cpu = dm->input_cpu_first_index + next_cpu;
558 unsigned lcore = vlib_worker_threads[cpu].dpdk_lcore_id;
559
560 /*
561 * numa node for worker thread handling this queue
562 * needed for taking buffers from the right mempool
563 */
564 vec_validate(xd->cpu_socket_id_by_queue, q);
565 xd->cpu_socket_id_by_queue[q] = rte_lcore_to_socket_id(lcore);
566
567 /*
568 * construct vector of (device,queue) pairs for each worker thread
569 */
570 vec_add2(dm->devices_by_cpu[cpu], dq, 1);
571 dq->device = xd->device_index;
572 dq->queue_id = q;
573
574 next_cpu++;
575 if (next_cpu == dm->input_cpu_count)
576 next_cpu = 0;
577 }
578
579 vec_validate_aligned (xd->tx_vectors, tm->n_vlib_mains,
580 CLIB_CACHE_LINE_BYTES);
581 for (j = 0; j < tm->n_vlib_mains; j++)
582 {
583 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
584 sizeof(tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
585 vec_reset_length (xd->tx_vectors[j]);
586 }
587
588 vec_validate_aligned (xd->rx_vectors, xd->rx_q_used,
589 CLIB_CACHE_LINE_BYTES);
590 for (j = 0; j< xd->rx_q_used; j++)
591 {
592 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE-1,
593 CLIB_CACHE_LINE_BYTES);
594 vec_reset_length (xd->rx_vectors[j]);
595 }
596
597 vec_validate_aligned (xd->frames, tm->n_vlib_mains,
598 CLIB_CACHE_LINE_BYTES);
599
600 rv = dpdk_port_setup(dm, xd);
601
602 if (rv < 0)
603 return rv;
604
605 /* count the number of descriptors used for this device */
606 nb_desc += xd->nb_rx_desc + xd->nb_tx_desc * xd->tx_q_used;
607
608 error = ethernet_register_interface
609 (dm->vnet_main,
610 dpdk_device_class.index,
611 xd->device_index,
612 /* ethernet address */ addr,
613 &xd->vlib_hw_if_index,
614 dpdk_flag_change);
615 if (error)
616 return error;
617
618 sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->vlib_hw_if_index);
619 xd->vlib_sw_if_index = sw->sw_if_index;
620 hi = vnet_get_hw_interface (dm->vnet_main, xd->vlib_hw_if_index);
621
622 /*
623 * DAW-FIXME: The Cisco VIC firmware does not provide an api for a
624 * driver to dynamically change the mtu. If/when the
625 * VIC firmware gets fixed, then this should be removed.
626 */
627 if (xd->pmd == VNET_DPDK_PMD_VICE ||
628 xd->pmd == VNET_DPDK_PMD_ENIC)
629 {
630 /*
631 * Initialize mtu to what has been set by CIMC in the firmware cfg.
632 */
633 hi->max_packet_bytes = dev_info.max_rx_pktlen;
634 /*
635 * remove vlan tag from VIC port to fix VLAN0 issue.
636 * TODO Handle VLAN tagged traffic
637 */
638 int vlan_off;
639 vlan_off = rte_eth_dev_get_vlan_offload(xd->device_index);
640 vlan_off |= ETH_VLAN_STRIP_OFFLOAD;
641 rte_eth_dev_set_vlan_offload(xd->device_index, vlan_off);
642 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700643
John Lo350dc1c2016-04-25 12:32:11 -0400644#if RTE_VERSION < RTE_VERSION_NUM(16, 4, 0, 0)
645 /*
646 * Older VMXNET3 driver doesn't support jumbo / multi-buffer pkts
647 */
648 else if (xd->pmd == VNET_DPDK_PMD_VMXNET3)
649 hi->max_packet_bytes = 1518;
650#endif
651
Ed Warnickecb9cada2015-12-08 15:45:58 -0700652 hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] =
653 xd->port_conf.rxmode.max_rx_pkt_len - sizeof(ethernet_header_t);
654
655 rte_eth_dev_set_mtu(xd->device_index, hi->max_packet_bytes);
656 }
657
Damjan Marione90892e2016-02-23 19:20:28 +0100658#ifdef RTE_LIBRTE_KNI
Damjan Marion64ae6692016-05-25 18:40:13 +0200659 if (dm->conf->num_kni) {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700660 clib_warning("Initializing KNI interfaces...");
Damjan Marion64ae6692016-05-25 18:40:13 +0200661 rte_kni_init(dm->conf->num_kni);
662 for (i = 0; i < dm->conf->num_kni; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700663 {
664 u8 addr[6];
665 int j;
666
667 /* Create vnet interface */
668 vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
669 xd->dev_type = VNET_DPDK_DEV_KNI;
670
671 xd->device_index = xd - dm->devices;
672 ASSERT(nports + i == xd->device_index);
673 xd->per_interface_next_index = ~0;
674 xd->kni_port_id = i;
675 xd->cpu_socket = -1;
676 hash_set (dm->dpdk_device_by_kni_port_id, i, xd - dm->devices);
677 xd->rx_q_used = 1;
678
679 /* assign interface to input thread */
680 dpdk_device_and_queue_t * dq;
681 vec_add2(dm->devices_by_cpu[dm->input_cpu_first_index], dq, 1);
682 dq->device = xd->device_index;
683 dq->queue_id = 0;
684
685 vec_validate_aligned (xd->tx_vectors, tm->n_vlib_mains,
686 CLIB_CACHE_LINE_BYTES);
687 for (j = 0; j < tm->n_vlib_mains; j++)
688 {
689 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
690 sizeof(tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
691 vec_reset_length (xd->tx_vectors[j]);
692 }
693
694 vec_validate_aligned (xd->rx_vectors, xd->rx_q_used,
695 CLIB_CACHE_LINE_BYTES);
696 for (j = 0; j< xd->rx_q_used; j++)
697 {
698 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE-1,
699 CLIB_CACHE_LINE_BYTES);
700 vec_reset_length (xd->rx_vectors[j]);
701 }
702
703 vec_validate_aligned (xd->frames, tm->n_vlib_mains,
704 CLIB_CACHE_LINE_BYTES);
705
706 /* FIXME Set up one TX-queue per worker thread */
707
708 {
709 f64 now = vlib_time_now(vm);
710 u32 rnd;
711 rnd = (u32) (now * 1e6);
712 rnd = random_u32 (&rnd);
713
Damjan Marionf1213b82016-03-13 02:22:06 +0100714 clib_memcpy (addr+2, &rnd, sizeof(rnd));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700715 addr[0] = 2;
716 addr[1] = 0xfe;
717 }
718
719 error = ethernet_register_interface
720 (dm->vnet_main,
721 dpdk_device_class.index,
722 xd->device_index,
723 /* ethernet address */ addr,
724 &xd->vlib_hw_if_index,
725 dpdk_flag_change);
726
727 if (error)
728 return error;
729
730 sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->vlib_hw_if_index);
731 xd->vlib_sw_if_index = sw->sw_if_index;
732 hi = vnet_get_hw_interface (dm->vnet_main, xd->vlib_hw_if_index);
733 }
734 }
Damjan Marione90892e2016-02-23 19:20:28 +0100735#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700736
Damjan Marion64ae6692016-05-25 18:40:13 +0200737 if (nb_desc > dm->conf->num_mbufs)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700738 clib_warning ("%d mbufs allocated but total rx/tx ring size is %d\n",
Damjan Marion64ae6692016-05-25 18:40:13 +0200739 dm->conf->num_mbufs, nb_desc);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700740
741 /* init next vhost-user if index */
742 dm->next_vu_if_id = 0;
743
744 return 0;
745}
746
Damjan Mariona42cd342016-04-13 18:03:20 +0200747static void
Damjan Marion64ae6692016-05-25 18:40:13 +0200748dpdk_bind_devices_to_uio (dpdk_config_main_t * conf)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700749{
Damjan Marion5a206ea2016-05-12 22:11:03 +0200750 vlib_pci_main_t * pm = &pci_main;
Damjan Mariona42cd342016-04-13 18:03:20 +0200751 clib_error_t * error;
752 vlib_pci_device_t * d;
753 pci_config_header_t * c;
754 u8 * pci_addr = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700755
Damjan Mariona42cd342016-04-13 18:03:20 +0200756 pool_foreach (d, pm->pci_devs, ({
757 c = &d->config0.header;
758 vec_reset_length (pci_addr);
759 pci_addr = format (pci_addr, "%U%c", format_vlib_pci_addr, &d->bus_address, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700760
Damjan Mariona42cd342016-04-13 18:03:20 +0200761 if (c->device_class != PCI_CLASS_NETWORK_ETHERNET)
762 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700763
Damjan Mariona42cd342016-04-13 18:03:20 +0200764 /* if whitelist exists process only whitelisted devices */
Damjan Marion64ae6692016-05-25 18:40:13 +0200765 if (conf->eth_if_whitelist &&
766 !strstr ((char *) conf->eth_if_whitelist, (char *) pci_addr))
Damjan Mariona42cd342016-04-13 18:03:20 +0200767 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700768
Damjan Mariona42cd342016-04-13 18:03:20 +0200769 /* virtio */
770 if (c->vendor_id == 0x1af4 && c->device_id == 0x1000)
771 ;
772 /* vmxnet3 */
773 else if (c->vendor_id == 0x15ad && c->device_id == 0x07b0)
774 ;
775 /* all Intel devices */
776 else if (c->vendor_id == 0x8086)
777 ;
778 /* Cisco VIC */
779 else if (c->vendor_id == 0x1137 && c->device_id == 0x0043)
780 ;
781 /* Chelsio T4/T5 */
782 else if (c->vendor_id == 0x1425 && (c->device_id & 0xe000) == 0x4000)
783 ;
784 else
785 {
786 clib_warning ("Unsupported Ethernet PCI device 0x%04x:0x%04x found "
787 "at PCI address %s\n", (u16) c->vendor_id, (u16) c->device_id,
788 pci_addr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700789 continue;
Damjan Mariona42cd342016-04-13 18:03:20 +0200790 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700791
Damjan Marion64ae6692016-05-25 18:40:13 +0200792 error = vlib_pci_bind_to_uio (d, (char *) conf->uio_driver_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700793
Damjan Mariona42cd342016-04-13 18:03:20 +0200794 if (error)
795 {
Damjan Marion64ae6692016-05-25 18:40:13 +0200796 if (!conf->eth_if_whitelist)
797 conf->eth_if_blacklist = format (conf->eth_if_blacklist, "%U ",
798 format_vlib_pci_addr, &d->bus_address);
Damjan Mariona42cd342016-04-13 18:03:20 +0200799 clib_error_report (error);
800 }
801 }));
802 vec_free (pci_addr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700803}
804
Ed Warnickecb9cada2015-12-08 15:45:58 -0700805static clib_error_t *
806dpdk_config (vlib_main_t * vm, unformat_input_t * input)
807{
808 clib_error_t * error = 0;
Damjan Marion64ae6692016-05-25 18:40:13 +0200809 dpdk_config_main_t * conf = &dpdk_config_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700810 vlib_thread_main_t * tm = vlib_get_thread_main();
Damjan Marion1c80e832016-05-11 23:07:18 +0200811 vlib_node_runtime_t * rt = vlib_node_get_runtime (vm, dpdk_input_node.index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700812 u8 * s, * tmp = 0;
813 u8 * pci_dev_id = 0;
814 u8 * rte_cmd = 0, * ethname = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700815 u32 log_level;
816 int ret, i;
817 char * fmt;
818#ifdef NETMAP
819 int rxrings, txrings, rxslots, txslots, txburst;
820 char * nmnam;
821#endif
822 unformat_input_t _in;
823 unformat_input_t * in = &_in;
824 u8 no_pci = 0;
825 u8 no_huge = 0;
826 u8 huge_dir = 0;
827 u8 file_prefix = 0;
828 u8 * socket_mem = 0;
829
830 // MATT-FIXME: inverted virtio-vhost logic to use virtio by default
Damjan Marion64ae6692016-05-25 18:40:13 +0200831 conf->use_virtio_vhost = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700832
833 while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
834 {
835 /* Prime the pump */
836 if (unformat (input, "no-hugetlb"))
837 {
Damjan Marion64ae6692016-05-25 18:40:13 +0200838 vec_add1 (conf->eal_init_args, (u8 *) "no-huge");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700839 no_huge = 1;
840 }
841
Dave Barachd81566f2016-02-15 11:34:13 -0500842 else if (unformat (input, "enable-tcp-udp-checksum"))
Damjan Marion64ae6692016-05-25 18:40:13 +0200843 conf->enable_tcp_udp_checksum = 1;
Dave Barachd81566f2016-02-15 11:34:13 -0500844
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845 else if (unformat (input, "decimal-interface-names"))
Damjan Marion64ae6692016-05-25 18:40:13 +0200846 conf->interface_name_format_decimal = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700847
848 else if (unformat (input, "no-multi-seg"))
Damjan Marion64ae6692016-05-25 18:40:13 +0200849 conf->no_multi_seg = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700850
851 else if (unformat (input, "dev %s", &pci_dev_id))
852 {
Damjan Marion64ae6692016-05-25 18:40:13 +0200853 if (conf->eth_if_whitelist)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700854 {
855 /*
856 * Don't add duplicate device id's.
857 */
Damjan Marion64ae6692016-05-25 18:40:13 +0200858 if (strstr ((char *)conf->eth_if_whitelist, (char *)pci_dev_id))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700859 continue;
860
Damjan Marion64ae6692016-05-25 18:40:13 +0200861 _vec_len (conf->eth_if_whitelist) -= 1; // chomp trailing NULL.
862 conf->eth_if_whitelist = format (conf->eth_if_whitelist, " %s%c",
863 pci_dev_id, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700864 }
865 else
Damjan Marion64ae6692016-05-25 18:40:13 +0200866 conf->eth_if_whitelist = format (0, "%s%c", pci_dev_id, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700867 }
868
869#ifdef NETMAP
870 else if (unformat(input, "netmap %s/%d:%d/%d:%d/%d",
871 &nmname, &rxrings, &rxslots, &txrings, &txslots, &txburst)) {
872 char * rv;
873 rv = (char *)
874 eth_nm_args(nmname, rxrings, rxslots, txrings, txslots, txburst);
875 if (rv) {
876 error = clib_error_return (0, "%s", rv);
877 goto done;
878 }
879 }else if (unformat(input, "netmap %s", &nmname)) {
880 char * rv;
881 rv = (char *)
882 eth_nm_args(nmname, 0, 0, 0, 0, 0);
883 if (rv) {
884 error = clib_error_return (0, "%s", rv);
885 goto done;
886 }
887 }
888#endif
889
Damjan Marion64ae6692016-05-25 18:40:13 +0200890 else if (unformat (input, "num-mbufs %d", &conf->num_mbufs))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700891 ;
Damjan Marion64ae6692016-05-25 18:40:13 +0200892 else if (unformat (input, "max-tx-queues %d", &conf->max_tx_queues))
Damjan Marion85cdbd02016-02-12 18:00:23 +0100893 ;
Damjan Marion64ae6692016-05-25 18:40:13 +0200894 else if (unformat (input, "kni %d", &conf->num_kni))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700895 ;
Damjan Marion64ae6692016-05-25 18:40:13 +0200896 else if (unformat (input, "uio-driver %s", &conf->uio_driver_name))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700897 ;
Damjan Marion7a2a3782016-04-15 20:24:55 +0200898 else if (unformat (input, "socket-mem %s", &socket_mem))
899 ;
Damjan Marion64ae6692016-05-25 18:40:13 +0200900 else if (unformat (input, "vhost-user-coalesce-frames %d", &conf->vhost_coalesce_frames))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700901 ;
Damjan Marion64ae6692016-05-25 18:40:13 +0200902 else if (unformat (input, "vhost-user-coalesce-time %f", &conf->vhost_coalesce_time))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700903 ;
904 else if (unformat (input, "enable-vhost-user"))
Damjan Marion64ae6692016-05-25 18:40:13 +0200905 conf->use_virtio_vhost = 0;
906 else if (unformat (input, "rss %d", &conf->use_rss))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700907 ;
908
909#define _(a) \
910 else if (unformat(input, #a)) \
911 { \
912 if (!strncmp(#a, "no-pci", 6)) \
913 no_pci = 1; \
914 tmp = format (0, "--%s%c", #a, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200915 vec_add1 (conf->eal_init_args, tmp); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700916 }
917 foreach_eal_double_hyphen_predicate_arg
918#undef _
919
920#define _(a) \
921 else if (unformat(input, #a " %s", &s)) \
922 { \
923 if (!strncmp(#a, "huge-dir", 8)) \
924 huge_dir = 1; \
925 else if (!strncmp(#a, "file-prefix", 11)) \
926 file_prefix = 1; \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700927 tmp = format (0, "--%s%c", #a, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200928 vec_add1 (conf->eal_init_args, tmp); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700929 vec_add1 (s, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200930 vec_add1 (conf->eal_init_args, s); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700931 }
932 foreach_eal_double_hyphen_arg
933#undef _
934
935#define _(a,b) \
936 else if (unformat(input, #a " %s", &s)) \
937 { \
938 tmp = format (0, "-%s%c", #b, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200939 vec_add1 (conf->eal_init_args, tmp); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700940 vec_add1 (s, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200941 vec_add1 (conf->eal_init_args, s); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700942 }
943 foreach_eal_single_hyphen_arg
944#undef _
945
946#define _(a,b) \
947 else if (unformat(input, #a " %s", &s)) \
948 { \
949 tmp = format (0, "-%s%c", #b, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200950 vec_add1 (conf->eal_init_args, tmp); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700951 vec_add1 (s, 0); \
Damjan Marion64ae6692016-05-25 18:40:13 +0200952 vec_add1 (conf->eal_init_args, s); \
953 conf->a##_set_manually = 1; \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700954 }
955 foreach_eal_single_hyphen_mandatory_arg
956#undef _
957
958 else if (unformat(input, "default"))
959 ;
960
961 else
962 {
963 error = clib_error_return (0, "unknown input `%U'",
964 format_unformat_error, input);
965 goto done;
966 }
967 }
968
Damjan Marion64ae6692016-05-25 18:40:13 +0200969 if (!conf->uio_driver_name)
970 conf->uio_driver_name = format (0, "igb_uio%c", 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700971
972 /*
973 * Use 1G huge pages if available.
974 */
975 if (!no_huge && !huge_dir)
976 {
Damjan Marion7a2a3782016-04-15 20:24:55 +0200977 u32 x, * mem_by_socket = 0;
978 uword c = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700979 u8 use_1g = 1;
980 u8 use_2m = 1;
Damjan Marion7a2a3782016-04-15 20:24:55 +0200981 u8 less_than_1g = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700982 int rv;
983
984 umount(DEFAULT_HUGE_DIR);
985
986 /* Process "socket-mem" parameter value */
987 if (vec_len (socket_mem))
Damjan Marion7a2a3782016-04-15 20:24:55 +0200988 {
989 unformat_input_t in;
990 unformat_init_vector(&in, socket_mem);
991 while (unformat_check_input (&in) != UNFORMAT_END_OF_INPUT)
992 {
993 if (unformat (&in, "%u,", &x))
994 ;
995 else if (unformat (&in, "%u", &x))
996 ;
997 else if (unformat (&in, ","))
998 x = 0;
999 else
1000 break;
1001
1002 vec_add1(mem_by_socket, x);
1003
1004 if (x > 1023)
1005 less_than_1g = 0;
1006 }
Dave Barachdaede642016-04-22 07:54:02 -04001007 /* Note: unformat_free vec_frees(in.buffer), aka socket_mem... */
Damjan Marion7a2a3782016-04-15 20:24:55 +02001008 unformat_free(&in);
Dave Barachdaede642016-04-22 07:54:02 -04001009 socket_mem = 0;
Damjan Marion7a2a3782016-04-15 20:24:55 +02001010 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001011 else
Damjan Marion7a2a3782016-04-15 20:24:55 +02001012 {
1013 clib_bitmap_foreach (c, tm->cpu_socket_bitmap, (
1014 {
1015 vec_validate(mem_by_socket, c);
1016 mem_by_socket[c] = 512; /* default per-socket mem */
1017 }
1018 ));
1019 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001020
1021 /* check if available enough 1GB pages for each socket */
Damjan Marion7a2a3782016-04-15 20:24:55 +02001022 clib_bitmap_foreach (c, tm->cpu_socket_bitmap, (
1023 {
1024 u32 pages_avail, page_size, mem;
1025 u8 *s = 0;
Christophe Fontaine144a90f2016-05-13 07:14:08 +00001026 u8 *p = 0;
1027 char * numa_path = "/sys/devices/system/node/node%u/";
1028 char * nonnuma_path = "/sys/kernel/mm/";
1029 char * suffix = "hugepages/hugepages-%ukB/free_hugepages%c";
1030 char * path = NULL;
1031 struct stat sb_numa, sb_nonnuma;
1032
1033 p = format(p, numa_path, c);
1034 stat(numa_path, &sb_numa);
1035 stat(nonnuma_path, &sb_nonnuma);
1036
1037 if (S_ISDIR(sb_numa.st_mode)) {
1038 path = (char*)format((u8*)path, "%s%s", p, suffix);
1039 } else if (S_ISDIR(sb_nonnuma.st_mode)) {
1040 path = (char*)format((u8*)path, "%s%s", nonnuma_path, suffix);
1041 } else {
1042 use_1g = 0;
1043 use_2m = 0;
1044 vec_free(p);
1045 break;
1046 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001047
Damjan Marion7a2a3782016-04-15 20:24:55 +02001048 vec_validate(mem_by_socket, c);
1049 mem = mem_by_socket[c];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001050
Damjan Marion7a2a3782016-04-15 20:24:55 +02001051 page_size = 1024;
1052 pages_avail = 0;
Christophe Fontaine144a90f2016-05-13 07:14:08 +00001053 s = format (s, path, page_size * 1024, 0);
Damjan Marion5a206ea2016-05-12 22:11:03 +02001054 vlib_sysfs_read ((char *) s, "%u", &pages_avail);
Damjan Marion7a2a3782016-04-15 20:24:55 +02001055 vec_reset_length (s);
1056
1057 if (page_size * pages_avail < mem)
1058 use_1g = 0;
1059
1060 page_size = 2;
1061 pages_avail = 0;
Christophe Fontaine144a90f2016-05-13 07:14:08 +00001062 s = format (s, path, page_size * 1024, 0);
Damjan Marion5a206ea2016-05-12 22:11:03 +02001063 vlib_sysfs_read ((char *) s, "%u", &pages_avail);
Damjan Marion7a2a3782016-04-15 20:24:55 +02001064 vec_reset_length (s);
1065
1066 if (page_size * pages_avail < mem)
1067 use_2m = 0;
1068
1069 vec_free(s);
Christophe Fontaine144a90f2016-05-13 07:14:08 +00001070 vec_free(p);
1071 vec_free(path);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001072 }));
Damjan Marion7a2a3782016-04-15 20:24:55 +02001073 _vec_len (mem_by_socket) = c + 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001074
Damjan Marion7a2a3782016-04-15 20:24:55 +02001075 /* regenerate socket_mem string */
Damjan Marion7a2a3782016-04-15 20:24:55 +02001076 vec_foreach_index (x, mem_by_socket)
1077 socket_mem = format (socket_mem, "%s%u",
1078 socket_mem ? "," : "",
1079 mem_by_socket[x]);
1080 socket_mem = format (socket_mem, "%c", 0);
1081
1082 vec_free (mem_by_socket);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001083
1084 rv = mkdir(VPP_RUN_DIR, 0755);
1085 if (rv && errno != EEXIST)
1086 {
1087 error = clib_error_return (0, "mkdir '%s' failed errno %d",
1088 VPP_RUN_DIR, errno);
1089 goto done;
1090 }
1091
1092 rv = mkdir(DEFAULT_HUGE_DIR, 0755);
1093 if (rv && errno != EEXIST)
1094 {
1095 error = clib_error_return (0, "mkdir '%s' failed errno %d",
1096 DEFAULT_HUGE_DIR, errno);
1097 goto done;
1098 }
1099
Damjan Marion7a2a3782016-04-15 20:24:55 +02001100 if (use_1g && !(less_than_1g && use_2m))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001101 {
1102 rv = mount("none", DEFAULT_HUGE_DIR, "hugetlbfs", 0, "pagesize=1G");
1103 }
1104 else if (use_2m)
1105 {
1106 rv = mount("none", DEFAULT_HUGE_DIR, "hugetlbfs", 0, NULL);
1107 }
1108 else
1109 {
1110 return clib_error_return (0, "not enough free huge pages");
1111 }
1112
1113 if (rv)
1114 {
1115 error = clib_error_return (0, "mount failed %d", errno);
1116 goto done;
1117 }
1118
1119 tmp = format (0, "--huge-dir%c", 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001120 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001121 tmp = format (0, "%s%c", DEFAULT_HUGE_DIR, 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001122 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001123 if (!file_prefix)
1124 {
1125 tmp = format (0, "--file-prefix%c", 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001126 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001127 tmp = format (0, "vpp%c", 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001128 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001129 }
1130 }
1131
Ed Warnickecb9cada2015-12-08 15:45:58 -07001132 vec_free (rte_cmd);
1133 vec_free (ethname);
1134
1135 if (error)
1136 return error;
1137
1138 /* I'll bet that -c and -n must be the first and second args... */
Damjan Marion64ae6692016-05-25 18:40:13 +02001139 if (!conf->coremask_set_manually)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001140 {
1141 vlib_thread_registration_t * tr;
Damjan Marion14a44d32016-02-05 23:33:21 +01001142 uword * coremask = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001143 int i;
1144
1145 /* main thread core */
Damjan Marion14a44d32016-02-05 23:33:21 +01001146 coremask = clib_bitmap_set(coremask, tm->main_lcore, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001147
1148 for (i = 0; i < vec_len (tm->registrations); i++)
1149 {
1150 tr = tm->registrations[i];
Damjan Marion14a44d32016-02-05 23:33:21 +01001151 coremask = clib_bitmap_or(coremask, tr->coremask);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001152 }
1153
Damjan Marion64ae6692016-05-25 18:40:13 +02001154 vec_insert (conf->eal_init_args, 2, 1);
1155 conf->eal_init_args[1] = (u8 *) "-c";
Damjan Marion14a44d32016-02-05 23:33:21 +01001156 tmp = format (0, "%U%c", format_bitmap_hex, coremask, 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001157 conf->eal_init_args[2] = tmp;
Damjan Marion14a44d32016-02-05 23:33:21 +01001158 clib_bitmap_free(coremask);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001159 }
1160
Damjan Marion64ae6692016-05-25 18:40:13 +02001161 if (!conf->nchannels_set_manually)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001162 {
Damjan Marion64ae6692016-05-25 18:40:13 +02001163 vec_insert (conf->eal_init_args, 2, 3);
1164 conf->eal_init_args[3] = (u8 *) "-n";
1165 tmp = format (0, "%d", conf->nchannels);
1166 conf->eal_init_args[4] = tmp;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001167 }
1168
Damjan Mariona42cd342016-04-13 18:03:20 +02001169 if (no_pci == 0 && geteuid() == 0)
Damjan Marion64ae6692016-05-25 18:40:13 +02001170 dpdk_bind_devices_to_uio(conf);
Damjan Mariona42cd342016-04-13 18:03:20 +02001171
Ed Warnickecb9cada2015-12-08 15:45:58 -07001172 /*
1173 * If there are whitelisted devices,
1174 * add the whitelist option & device list to the dpdk arg list...
1175 */
Damjan Marion64ae6692016-05-25 18:40:13 +02001176 if (conf->eth_if_whitelist)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001177 {
Damjan Marion64ae6692016-05-25 18:40:13 +02001178 unformat_init_string (in, (char *) conf->eth_if_whitelist,
1179 vec_len (conf->eth_if_whitelist) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001180 fmt = "-w%c";
1181 }
1182
1183 /*
1184 * Otherwise add the blacklisted devices to the dpdk arg list.
1185 */
1186 else
1187 {
Damjan Marion64ae6692016-05-25 18:40:13 +02001188 unformat_init_string (in, (char *)conf->eth_if_blacklist,
1189 vec_len(conf->eth_if_blacklist) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001190 fmt = "-b%c";
1191 }
1192
1193 while (unformat_check_input (in) != UNFORMAT_END_OF_INPUT)
1194 {
1195 tmp = format (0, fmt, 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001196 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001197 unformat (in, "%s", &pci_dev_id);
Damjan Marion64ae6692016-05-25 18:40:13 +02001198 vec_add1 (conf->eal_init_args, pci_dev_id);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001199 }
1200
Ed Warnickecb9cada2015-12-08 15:45:58 -07001201 /* set master-lcore */
1202 tmp = format (0, "--master-lcore%c", 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001203 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001204 tmp = format (0, "%u%c", tm->main_lcore, 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001205 vec_add1 (conf->eal_init_args, tmp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001206
Damjan Marion7a2a3782016-04-15 20:24:55 +02001207 /* set socket-mem */
1208 tmp = format (0, "--socket-mem%c", 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001209 vec_add1 (conf->eal_init_args, tmp);
Damjan Marion7a2a3782016-04-15 20:24:55 +02001210 tmp = format (0, "%s%c", socket_mem, 0);
Damjan Marion64ae6692016-05-25 18:40:13 +02001211 vec_add1 (conf->eal_init_args, tmp);
Damjan Marion7a2a3782016-04-15 20:24:55 +02001212
Ed Warnickecb9cada2015-12-08 15:45:58 -07001213 /* NULL terminate the "argv" vector, in case of stupidity */
Damjan Marion64ae6692016-05-25 18:40:13 +02001214 vec_add1 (conf->eal_init_args, 0);
1215 _vec_len(conf->eal_init_args) -= 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001216
1217 /* Set up DPDK eal and packet mbuf pool early. */
1218
1219 log_level = (CLIB_DEBUG > 0) ? RTE_LOG_DEBUG : RTE_LOG_NOTICE;
1220
1221 rte_set_log_level (log_level);
1222
Damjan Marion64ae6692016-05-25 18:40:13 +02001223 vm = vlib_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001224
Damjan Mariond8ff0e12016-02-16 12:29:57 +01001225 /* make copy of args as rte_eal_init tends to mess up with arg array */
Damjan Marion64ae6692016-05-25 18:40:13 +02001226 for (i = 1; i < vec_len(conf->eal_init_args); i++)
1227 conf->eal_init_args_str = format(conf->eal_init_args_str, "%s ",
1228 conf->eal_init_args[i]);
Damjan Mariond8ff0e12016-02-16 12:29:57 +01001229
Damjan Marion64ae6692016-05-25 18:40:13 +02001230 ret = rte_eal_init(vec_len(conf->eal_init_args), (char **) conf->eal_init_args);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001231
1232 /* lazy umount hugepages */
1233 umount2(DEFAULT_HUGE_DIR, MNT_DETACH);
1234
1235 if (ret < 0)
1236 return clib_error_return (0, "rte_eal_init returned %d", ret);
1237
Sean Hope98efd022016-02-22 15:21:31 -05001238 /* Dump the physical memory layout prior to creating the mbuf_pool */
Todd Foggoa287d53f2016-03-02 10:52:53 -08001239 fprintf(stdout, "DPDK physical memory layout:\n");
1240 rte_dump_physmem_layout(stdout);
Sean Hope98efd022016-02-22 15:21:31 -05001241
Ed Warnickecb9cada2015-12-08 15:45:58 -07001242 /* main thread 1st */
Damjan Marion64ae6692016-05-25 18:40:13 +02001243 error = vlib_buffer_pool_create(vm, conf->num_mbufs, rte_socket_id());
Ed Warnickecb9cada2015-12-08 15:45:58 -07001244 if (error)
1245 return error;
1246
1247 for (i = 0; i < RTE_MAX_LCORE; i++)
1248 {
Damjan Marion64ae6692016-05-25 18:40:13 +02001249 error = vlib_buffer_pool_create(vm, conf->num_mbufs,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001250 rte_lcore_to_socket_id(i));
1251 if (error)
1252 return error;
1253 }
1254
Damjan Marion64ae6692016-05-25 18:40:13 +02001255 if (conf->use_rss)
Damjan Marion1c80e832016-05-11 23:07:18 +02001256 rt->function = dpdk_input_rss_multiarch_select();
1257 else
1258 rt->function = dpdk_input_multiarch_select();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001259 done:
1260 return error;
1261}
1262
1263VLIB_CONFIG_FUNCTION (dpdk_config, "dpdk");
1264
1265void dpdk_update_link_state (dpdk_device_t * xd, f64 now)
1266{
1267 vnet_main_t * vnm = vnet_get_main();
1268 struct rte_eth_link prev_link = xd->link;
1269 u32 hw_flags = 0;
1270 u8 hw_flags_chg = 0;
1271
1272 /* only update link state for PMD interfaces */
1273 if (xd->dev_type != VNET_DPDK_DEV_ETH)
1274 return;
1275
1276 xd->time_last_link_update = now ? now : xd->time_last_link_update;
1277 memset(&xd->link, 0, sizeof(xd->link));
1278 rte_eth_link_get_nowait (xd->device_index, &xd->link);
1279
1280 if (LINK_STATE_ELOGS)
1281 {
1282 vlib_main_t * vm = vlib_get_main();
1283 ELOG_TYPE_DECLARE(e) = {
1284 .format =
1285 "update-link-state: sw_if_index %d, admin_up %d,"
1286 "old link_state %d new link_state %d",
1287 .format_args = "i4i1i1i1",
1288 };
1289
1290 struct { u32 sw_if_index; u8 admin_up;
1291 u8 old_link_state; u8 new_link_state;} *ed;
1292 ed = ELOG_DATA (&vm->elog_main, e);
1293 ed->sw_if_index = xd->vlib_sw_if_index;
1294 ed->admin_up = xd->admin_up;
1295 ed->old_link_state = (u8)
1296 vnet_hw_interface_is_link_up (vnm, xd->vlib_hw_if_index);
1297 ed->new_link_state = (u8) xd->link.link_status;
1298 }
1299
1300 if ((xd->admin_up == 1) &&
1301 ((xd->link.link_status != 0) ^
1302 vnet_hw_interface_is_link_up (vnm, xd->vlib_hw_if_index)))
1303 {
1304 hw_flags_chg = 1;
1305 hw_flags |= (xd->link.link_status ?
1306 VNET_HW_INTERFACE_FLAG_LINK_UP: 0);
1307 }
1308
1309 if (hw_flags_chg || (xd->link.link_duplex != prev_link.link_duplex))
1310 {
1311 hw_flags_chg = 1;
1312 switch (xd->link.link_duplex)
1313 {
1314 case ETH_LINK_HALF_DUPLEX:
1315 hw_flags |= VNET_HW_INTERFACE_FLAG_HALF_DUPLEX;
1316 break;
1317 case ETH_LINK_FULL_DUPLEX:
1318 hw_flags |= VNET_HW_INTERFACE_FLAG_FULL_DUPLEX;
1319 break;
1320 default:
1321 break;
1322 }
1323 }
Damjan Marion5ba5f832016-04-11 12:51:00 +02001324#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
1325 if (hw_flags_chg || (xd->link.link_speed != prev_link.link_speed))
1326 {
1327 hw_flags_chg = 1;
1328 switch (xd->link.link_speed)
1329 {
1330 case ETH_SPEED_NUM_10M:
1331 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10M;
1332 break;
1333 case ETH_SPEED_NUM_100M:
1334 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M;
1335 break;
1336 case ETH_SPEED_NUM_1G:
1337 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G;
1338 break;
1339 case ETH_SPEED_NUM_10G:
1340 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G;
1341 break;
1342 case ETH_SPEED_NUM_40G:
1343 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G;
1344 break;
1345 case 0:
1346 break;
1347 default:
1348 clib_warning("unknown link speed %d", xd->link.link_speed);
1349 break;
1350 }
1351 }
1352#else
Ed Warnickecb9cada2015-12-08 15:45:58 -07001353 if (hw_flags_chg || (xd->link.link_speed != prev_link.link_speed))
1354 {
1355 hw_flags_chg = 1;
1356 switch (xd->link.link_speed)
1357 {
1358 case ETH_LINK_SPEED_10:
1359 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10M;
1360 break;
1361 case ETH_LINK_SPEED_100:
1362 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M;
1363 break;
1364 case ETH_LINK_SPEED_1000:
1365 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G;
1366 break;
1367 case ETH_LINK_SPEED_10000:
1368 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G;
1369 break;
1370 case ETH_LINK_SPEED_40G:
1371 hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G;
1372 break;
1373 case 0:
1374 break;
1375 default:
1376 clib_warning("unknown link speed %d", xd->link.link_speed);
1377 break;
1378 }
1379 }
Damjan Marion5ba5f832016-04-11 12:51:00 +02001380#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001381 if (hw_flags_chg)
1382 {
1383 if (LINK_STATE_ELOGS)
1384 {
1385 vlib_main_t * vm = vlib_get_main();
1386
1387 ELOG_TYPE_DECLARE(e) = {
1388 .format = "update-link-state: sw_if_index %d, new flags %d",
1389 .format_args = "i4i4",
1390 };
1391
1392 struct { u32 sw_if_index; u32 flags; } *ed;
1393 ed = ELOG_DATA (&vm->elog_main, e);
1394 ed->sw_if_index = xd->vlib_sw_if_index;
1395 ed->flags = hw_flags;
1396 }
1397 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, hw_flags);
1398 }
1399}
1400
1401static uword
1402dpdk_process (vlib_main_t * vm,
1403 vlib_node_runtime_t * rt,
1404 vlib_frame_t * f)
1405{
1406 clib_error_t * error;
John Lod9bf9ab2016-02-25 11:17:55 -05001407 vnet_main_t * vnm = vnet_get_main();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001408 dpdk_main_t * dm = &dpdk_main;
John Lod9bf9ab2016-02-25 11:17:55 -05001409 ethernet_main_t * em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001410 dpdk_device_t * xd;
1411 vlib_thread_main_t * tm = vlib_get_thread_main();
1412 void *vu_state;
1413 int i;
1414
1415 error = dpdk_lib_init (dm);
1416
1417 /*
1418 * Turn on the input node if we found some devices to drive
1419 * and we're not running worker threads or i/o threads
1420 */
1421
1422 if (error == 0 && vec_len(dm->devices) > 0)
1423 {
1424 if (tm->n_vlib_mains == 1)
1425 vlib_node_set_state (vm, dpdk_input_node.index,
1426 VLIB_NODE_STATE_POLLING);
1427 else if (tm->main_thread_is_io_node)
1428 vlib_node_set_state (vm, dpdk_io_input_node.index,
1429 VLIB_NODE_STATE_POLLING);
1430 else if (!dm->have_io_threads)
1431 for (i=0; i < tm->n_vlib_mains; i++)
1432 if (vec_len(dm->devices_by_cpu[i]) > 0)
1433 vlib_node_set_state (vlib_mains[i], dpdk_input_node.index,
1434 VLIB_NODE_STATE_POLLING);
1435 }
1436
1437 if (error)
1438 clib_error_report (error);
1439
1440 dpdk_vhost_user_process_init(&vu_state);
1441
1442 dm->io_thread_release = 1;
1443
1444 f64 now = vlib_time_now (vm);
1445 vec_foreach (xd, dm->devices)
1446 {
1447 dpdk_update_link_state (xd, now);
1448 }
1449
John Loe5055d22016-04-19 16:54:20 -04001450{ // Extra set up for bond interfaces:
1451 // 1. Setup MACs for bond interfaces and their slave links which was set
1452 // in dpdk_port_setup() but needs to be done again here to take effect.
1453 // 2. Set max L3 packet size of each bond interface to the lowerst value of
1454 // its slave links
1455 // 3. Set up info for bond interface related CLI support.
John Lod9bf9ab2016-02-25 11:17:55 -05001456 int nports = rte_eth_dev_count();
1457 if (nports > 0) {
1458 for (i = 0; i < nports; i++) {
1459 struct rte_eth_dev_info dev_info;
1460 rte_eth_dev_info_get(i, &dev_info);
1461 if (!dev_info.driver_name)
1462 dev_info.driver_name = dev_info.pci_dev->driver->name;
1463 ASSERT(dev_info.driver_name);
1464 if (strncmp(dev_info.driver_name, "rte_bond_pmd", 12) == 0) {
1465 u8 addr[6];
1466 u8 slink[16];
1467 int nlink = rte_eth_bond_slaves_get(i, slink, 16);
1468 if (nlink > 0) {
John Lobcebbb92016-04-05 15:47:43 -04001469 vnet_hw_interface_t * bhi;
1470 ethernet_interface_t * bei;
John Lod9bf9ab2016-02-25 11:17:55 -05001471 /* Get MAC of 1st slave link */
1472 rte_eth_macaddr_get(slink[0], (struct ether_addr *)addr);
1473 /* Set MAC of bounded interface to that of 1st slave link */
1474 rte_eth_bond_mac_address_set(i, (struct ether_addr *)addr);
1475 /* Populate MAC of bonded interface in VPP hw tables */
John Lobcebbb92016-04-05 15:47:43 -04001476 bhi = vnet_get_hw_interface(
John Lod9bf9ab2016-02-25 11:17:55 -05001477 vnm, dm->devices[i].vlib_hw_if_index);
John Lobcebbb92016-04-05 15:47:43 -04001478 bei = pool_elt_at_index(em->interfaces, bhi->hw_instance);
Damjan Marionf1213b82016-03-13 02:22:06 +01001479 clib_memcpy(bhi->hw_address, addr, 6);
1480 clib_memcpy(bei->address, addr, 6);
John Loe5055d22016-04-19 16:54:20 -04001481 /* Init l3 packet size allowed on bonded interface */
1482 bhi->max_l3_packet_bytes[VLIB_RX] =
1483 bhi->max_l3_packet_bytes[VLIB_TX] =
1484 ETHERNET_MAX_PACKET_BYTES - sizeof(ethernet_header_t);
John Lobcebbb92016-04-05 15:47:43 -04001485 while (nlink >= 1) { /* for all slave links */
1486 int slave = slink[--nlink];
1487 dpdk_device_t * sdev = &dm->devices[slave];
1488 vnet_hw_interface_t * shi;
1489 vnet_sw_interface_t * ssi;
1490 /* Add MAC to all slave links except the first one */
1491 if (nlink) rte_eth_dev_mac_addr_add(
1492 slave, (struct ether_addr *)addr, 0);
1493 /* Set slaves bitmap for bonded interface */
1494 bhi->bond_info = clib_bitmap_set(
1495 bhi->bond_info, sdev->vlib_hw_if_index, 1);
1496 /* Set slave link flags on slave interface */
1497 shi = vnet_get_hw_interface(vnm, sdev->vlib_hw_if_index);
1498 ssi = vnet_get_sw_interface(vnm, sdev->vlib_sw_if_index);
1499 shi->bond_info = VNET_HW_INTERFACE_BOND_INFO_SLAVE;
1500 ssi->flags |= VNET_SW_INTERFACE_FLAG_BOND_SLAVE;
John Loe5055d22016-04-19 16:54:20 -04001501 /* Set l3 packet size allowed as the lowest of slave */
1502 if (bhi->max_l3_packet_bytes[VLIB_RX] >
1503 shi->max_l3_packet_bytes[VLIB_RX])
1504 bhi->max_l3_packet_bytes[VLIB_RX] =
1505 bhi->max_l3_packet_bytes[VLIB_TX] =
1506 shi->max_l3_packet_bytes[VLIB_RX];
John Lod9bf9ab2016-02-25 11:17:55 -05001507 }
1508 }
1509 }
1510 }
1511 }
1512}
1513
Ed Warnickecb9cada2015-12-08 15:45:58 -07001514 while (1)
1515 {
Bud Grise02301ef2016-02-24 16:09:05 -05001516 /*
1517 * check each time through the loop in case intervals are changed
1518 */
1519 f64 min_wait = dm->link_state_poll_interval < dm->stat_poll_interval ?
1520 dm->link_state_poll_interval : dm->stat_poll_interval;
1521
1522 vlib_process_wait_for_event_or_clock (vm, min_wait);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001523
1524 if (dpdk_get_admin_up_down_in_progress())
1525 /* skip the poll if an admin up down is in progress (on any interface) */
1526 continue;
1527
1528 vec_foreach (xd, dm->devices)
1529 {
1530 f64 now = vlib_time_now (vm);
Bud Grise02301ef2016-02-24 16:09:05 -05001531 if ((now - xd->time_last_stats_update) >= dm->stat_poll_interval)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001532 dpdk_update_counters (xd, now);
Bud Grise02301ef2016-02-24 16:09:05 -05001533 if ((now - xd->time_last_link_update) >= dm->link_state_poll_interval)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001534 dpdk_update_link_state (xd, now);
1535
1536 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
1537 if (dpdk_vhost_user_process_if(vm, xd, vu_state) != 0)
1538 continue;
1539 }
1540 }
1541
1542 dpdk_vhost_user_process_cleanup(vu_state);
1543
1544 return 0;
1545}
1546
1547VLIB_REGISTER_NODE (dpdk_process_node,static) = {
1548 .function = dpdk_process,
1549 .type = VLIB_NODE_TYPE_PROCESS,
1550 .name = "dpdk-process",
1551 .process_log2_n_stack_bytes = 17,
1552};
1553
Bud Grise02301ef2016-02-24 16:09:05 -05001554int dpdk_set_stat_poll_interval (f64 interval)
1555{
1556 if (interval < DPDK_MIN_STATS_POLL_INTERVAL)
1557 return (VNET_API_ERROR_INVALID_VALUE);
1558
1559 dpdk_main.stat_poll_interval = interval;
1560
1561 return 0;
1562}
1563
1564int dpdk_set_link_state_poll_interval (f64 interval)
1565{
1566 if (interval < DPDK_MIN_LINK_POLL_INTERVAL)
1567 return (VNET_API_ERROR_INVALID_VALUE);
1568
1569 dpdk_main.link_state_poll_interval = interval;
1570
1571 return 0;
1572}
1573
Ed Warnickecb9cada2015-12-08 15:45:58 -07001574clib_error_t *
1575dpdk_init (vlib_main_t * vm)
1576{
1577 dpdk_main_t * dm = &dpdk_main;
1578 vlib_node_t * ei;
1579 clib_error_t * error = 0;
1580 vlib_thread_main_t * tm = vlib_get_thread_main();
1581
1582 /* verify that structs are cacheline aligned */
1583 ASSERT(offsetof(dpdk_device_t, cacheline0) == 0);
1584 ASSERT(offsetof(dpdk_device_t, cacheline1) == CLIB_CACHE_LINE_BYTES);
1585 ASSERT(offsetof(dpdk_worker_t, cacheline0) == 0);
1586 ASSERT(offsetof(frame_queue_trace_t, cacheline0) == 0);
1587
Ed Warnickecb9cada2015-12-08 15:45:58 -07001588 dm->vlib_main = vm;
1589 dm->vnet_main = vnet_get_main();
Damjan Marion64ae6692016-05-25 18:40:13 +02001590 dm->conf = &dpdk_config_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001591
1592 ei = vlib_get_node_by_name (vm, (u8 *) "ethernet-input");
1593 if (ei == 0)
1594 return clib_error_return (0, "ethernet-input node AWOL");
1595
1596 dm->ethernet_input_node_index = ei->index;
1597
Damjan Marion64ae6692016-05-25 18:40:13 +02001598 dm->conf->nchannels = 4;
1599 dm->conf->num_mbufs = dm->conf->num_mbufs ? dm->conf->num_mbufs : NB_MBUF;
1600 vec_add1 (dm->conf->eal_init_args, (u8 *) "vnet");
Ed Warnickecb9cada2015-12-08 15:45:58 -07001601
1602 dm->dpdk_device_by_kni_port_id = hash_create (0, sizeof (uword));
1603 dm->vu_sw_if_index_by_listener_fd = hash_create (0, sizeof (uword));
1604 dm->vu_sw_if_index_by_sock_fd = hash_create (0, sizeof (uword));
1605
1606 /* $$$ use n_thread_stacks since it's known-good at this point */
1607 vec_validate (dm->recycle, tm->n_thread_stacks - 1);
1608
1609 /* initialize EFD (early fast discard) default settings */
1610 dm->efd.enabled = DPDK_EFD_DISABLED;
1611 dm->efd.queue_hi_thresh = ((DPDK_EFD_DEFAULT_DEVICE_QUEUE_HI_THRESH_PCT *
1612 DPDK_NB_RX_DESC_10GE)/100);
1613 dm->efd.consec_full_frames_hi_thresh =
1614 DPDK_EFD_DEFAULT_CONSEC_FULL_FRAMES_HI_THRESH;
1615
1616 /* vhost-user coalescence frames defaults */
Damjan Marion64ae6692016-05-25 18:40:13 +02001617 dm->conf->vhost_coalesce_frames = 32;
1618 dm->conf->vhost_coalesce_time = 1e-3;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001619
Dave Barachd81566f2016-02-15 11:34:13 -05001620 /* Default vlib_buffer_t flags, DISABLES tcp/udp checksumming... */
1621 dm->buffer_flags_template =
1622 (VLIB_BUFFER_TOTAL_LENGTH_VALID
1623 | IP_BUFFER_L4_CHECKSUM_COMPUTED
1624 | IP_BUFFER_L4_CHECKSUM_CORRECT);
1625
Bud Grise02301ef2016-02-24 16:09:05 -05001626 dm->stat_poll_interval = DPDK_STATS_POLL_INTERVAL;
1627 dm->link_state_poll_interval = DPDK_LINK_POLL_INTERVAL;
1628
Ed Warnickecb9cada2015-12-08 15:45:58 -07001629 /* init CLI */
1630 if ((error = vlib_call_init_function (vm, dpdk_cli_init)))
1631 return error;
1632
1633 return error;
1634}
1635
1636VLIB_INIT_FUNCTION (dpdk_init);
1637