blob: d4c4c6b741421f5c8a64a6a5d5b78b57f724e106 [file] [log] [blame]
Damjan Marionb4d89272016-05-12 22:14:45 +02001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * WARNING!
18 * This driver is not intended for production use and it is unsupported.
19 * It is provided for educational use only.
20 * Please use supported DPDK driver instead.
21 */
22
Damjan Marion154d4452016-06-28 19:10:41 +020023#if __x86_64__
Damjan Marionb4d89272016-05-12 22:14:45 +020024#include <vppinfra/vector.h>
25
26#ifndef CLIB_HAVE_VEC128
27#warning HACK: ixge driver wont really work, missing u32x4
28typedef unsigned long long u32x4;
29#endif
30
31#include <vlib/vlib.h>
32#include <vlib/unix/unix.h>
33#include <vlib/pci/pci.h>
34#include <vnet/vnet.h>
35#include <vnet/devices/nic/ixge.h>
36#include <vnet/ethernet/ethernet.h>
37
38#define IXGE_ALWAYS_POLL 0
39
40#define EVENT_SET_FLAGS 0
41#define IXGE_HWBP_RACE_ELOG 0
42
43#define PCI_VENDOR_ID_INTEL 0x8086
44
45/* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
46#define XGE_PHY_DEV_TYPE_PMA_PMD 1
47#define XGE_PHY_DEV_TYPE_PHY_XS 4
48#define XGE_PHY_ID1 0x2
49#define XGE_PHY_ID2 0x3
50#define XGE_PHY_CONTROL 0x0
51#define XGE_PHY_CONTROL_RESET (1 << 15)
52
53ixge_main_t ixge_main;
54static vlib_node_registration_t ixge_input_node;
55static vlib_node_registration_t ixge_process_node;
56
Damjan Marion00a9dca2016-08-17 17:05:46 +020057static void
58ixge_semaphore_get (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020059{
Damjan Marion00a9dca2016-08-17 17:05:46 +020060 ixge_main_t *xm = &ixge_main;
61 vlib_main_t *vm = xm->vlib_main;
62 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020063 u32 i;
64
65 i = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +020066 while (!(r->software_semaphore & (1 << 0)))
Damjan Marionb4d89272016-05-12 22:14:45 +020067 {
68 if (i > 0)
69 vlib_process_suspend (vm, 100e-6);
70 i++;
71 }
Damjan Marion00a9dca2016-08-17 17:05:46 +020072 do
73 {
74 r->software_semaphore |= 1 << 1;
75 }
76 while (!(r->software_semaphore & (1 << 1)));
Damjan Marionb4d89272016-05-12 22:14:45 +020077}
78
Damjan Marion00a9dca2016-08-17 17:05:46 +020079static void
80ixge_semaphore_release (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020081{
Damjan Marion00a9dca2016-08-17 17:05:46 +020082 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020083 r->software_semaphore &= ~3;
84}
85
Damjan Marion00a9dca2016-08-17 17:05:46 +020086static void
87ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +020088{
Damjan Marion00a9dca2016-08-17 17:05:46 +020089 ixge_main_t *xm = &ixge_main;
90 vlib_main_t *vm = xm->vlib_main;
91 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020092 u32 fw_mask = sw_mask << 5;
93 u32 m, done = 0;
94
Damjan Marion00a9dca2016-08-17 17:05:46 +020095 while (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +020096 {
97 ixge_semaphore_get (xd);
98 m = r->software_firmware_sync;
99 done = (m & fw_mask) == 0;
100 if (done)
101 r->software_firmware_sync = m | sw_mask;
102 ixge_semaphore_release (xd);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200103 if (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +0200104 vlib_process_suspend (vm, 10e-3);
105 }
106}
107
Damjan Marion00a9dca2016-08-17 17:05:46 +0200108static void
109ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +0200110{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200111 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200112 ixge_semaphore_get (xd);
113 r->software_firmware_sync &= ~sw_mask;
114 ixge_semaphore_release (xd);
115}
116
Damjan Marion00a9dca2016-08-17 17:05:46 +0200117u32
118ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
119 u32 v, u32 is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200120{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200121 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200122 const u32 busy_bit = 1 << 30;
123 u32 x;
124
125 ASSERT (xd->phy_index < 2);
126 ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
127
128 ASSERT (reg_index < (1 << 16));
129 ASSERT (dev_type < (1 << 5));
Damjan Marion00a9dca2016-08-17 17:05:46 +0200130 if (!is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200131 r->xge_mac.phy_data = v;
132
133 /* Address cycle. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200134 x =
135 reg_index | (dev_type << 16) | (xd->
136 phys[xd->phy_index].mdio_address << 21);
Damjan Marionb4d89272016-05-12 22:14:45 +0200137 r->xge_mac.phy_command = x | busy_bit;
138 /* Busy wait timed to take 28e-6 secs. No suspend. */
139 while (r->xge_mac.phy_command & busy_bit)
140 ;
141
142 r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
143 while (r->xge_mac.phy_command & busy_bit)
144 ;
145
146 if (is_read)
147 v = r->xge_mac.phy_data >> 16;
148
149 ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
150
151 return v;
152}
153
Damjan Marion00a9dca2016-08-17 17:05:46 +0200154static u32
155ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
Damjan Marionb4d89272016-05-12 22:14:45 +0200156{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200157 return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
158 1);
159}
160
161static void
162ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
163{
164 (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
165 0);
166}
167
168static void
169ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
170{
171 ixge_main_t *xm = &ixge_main;
172 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200173 u32 v;
174
175 v = 0;
176 v |= (sda != 0) << 3;
177 v |= (scl != 0) << 1;
178 xd->regs->i2c_control = v;
179}
180
Damjan Marion00a9dca2016-08-17 17:05:46 +0200181static void
182ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
Damjan Marionb4d89272016-05-12 22:14:45 +0200183{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200184 ixge_main_t *xm = &ixge_main;
185 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200186 u32 v;
187
188 v = xd->regs->i2c_control;
189 *sda = (v & (1 << 2)) != 0;
190 *scl = (v & (1 << 0)) != 0;
191}
192
Damjan Marion00a9dca2016-08-17 17:05:46 +0200193static u16
194ixge_read_eeprom (ixge_device_t * xd, u32 address)
Damjan Marionb4d89272016-05-12 22:14:45 +0200195{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200196 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200197 u32 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200198 r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200199 /* Wait for done bit. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200200 while (!((v = r->eeprom_read) & (1 << 1)))
Damjan Marionb4d89272016-05-12 22:14:45 +0200201 ;
202 return v >> 16;
203}
204
205static void
206ixge_sfp_enable_disable_laser (ixge_device_t * xd, uword enable)
207{
208 u32 tx_disable_bit = 1 << 3;
209 if (enable)
210 xd->regs->sdp_control &= ~tx_disable_bit;
211 else
212 xd->regs->sdp_control |= tx_disable_bit;
213}
214
215static void
216ixge_sfp_enable_disable_10g (ixge_device_t * xd, uword enable)
217{
218 u32 is_10g_bit = 1 << 5;
219 if (enable)
220 xd->regs->sdp_control |= is_10g_bit;
221 else
222 xd->regs->sdp_control &= ~is_10g_bit;
223}
224
225static clib_error_t *
226ixge_sfp_phy_init_from_eeprom (ixge_device_t * xd, u16 sfp_type)
227{
228 u16 a, id, reg_values_addr = 0;
229
230 a = ixge_read_eeprom (xd, 0x2b);
231 if (a == 0 || a == 0xffff)
232 return clib_error_create ("no init sequence in eeprom");
233
234 while (1)
235 {
236 id = ixge_read_eeprom (xd, ++a);
237 if (id == 0xffff)
238 break;
239 reg_values_addr = ixge_read_eeprom (xd, ++a);
240 if (id == sfp_type)
241 break;
242 }
243 if (id != sfp_type)
244 return clib_error_create ("failed to find id 0x%x", sfp_type);
245
246 ixge_software_firmware_sync (xd, 1 << 3);
247 while (1)
248 {
249 u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
250 if (v == 0xffff)
251 break;
252 xd->regs->core_analog_config = v;
253 }
254 ixge_software_firmware_sync_release (xd, 1 << 3);
255
256 /* Make sure laser is off. We'll turn on the laser when
257 the interface is brought up. */
258 ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
259 ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
260
261 return 0;
262}
263
264static void
265ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
266{
267 u32 v;
268
269 if (is_up)
270 {
271 /* pma/pmd 10g serial SFI. */
272 xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
273 xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
274
275 v = xd->regs->xge_mac.auto_negotiation_control;
276 v &= ~(7 << 13);
277 v |= (0 << 13);
278 /* Restart autoneg. */
279 v |= (1 << 12);
280 xd->regs->xge_mac.auto_negotiation_control = v;
281
Damjan Marion00a9dca2016-08-17 17:05:46 +0200282 while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
Damjan Marionb4d89272016-05-12 22:14:45 +0200283 ;
284
285 v = xd->regs->xge_mac.auto_negotiation_control;
286
287 /* link mode 10g sfi serdes */
288 v &= ~(7 << 13);
289 v |= (3 << 13);
290
291 /* Restart autoneg. */
292 v |= (1 << 12);
293 xd->regs->xge_mac.auto_negotiation_control = v;
294
295 xd->regs->xge_mac.link_status;
296 }
297
298 ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
299
300 /* Give time for link partner to notice that we're up. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200301 if (is_up && vlib_in_process_context (vlib_get_main ()))
302 {
303 vlib_process_suspend (vlib_get_main (), 300e-3);
304 }
Damjan Marionb4d89272016-05-12 22:14:45 +0200305}
306
307always_inline ixge_dma_regs_t *
308get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
309{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200310 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200311 ASSERT (qi < 128);
312 if (rt == VLIB_RX)
313 return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
314 else
315 return &r->tx_dma[qi];
316}
317
318static clib_error_t *
319ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
320{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200321 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200322 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200323 ixge_main_t *xm = &ixge_main;
324 ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
325 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200326
327 if (is_up)
328 {
329 xd->regs->rx_enable |= 1;
330 xd->regs->tx_dma_control |= 1;
331 dr->control |= 1 << 25;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200332 while (!(dr->control & (1 << 25)))
333 ;
Damjan Marionb4d89272016-05-12 22:14:45 +0200334 }
335 else
336 {
337 xd->regs->rx_enable &= ~1;
338 xd->regs->tx_dma_control &= ~1;
339 }
340
341 ixge_sfp_device_up_down (xd, is_up);
342
343 return /* no error */ 0;
344}
345
Damjan Marion00a9dca2016-08-17 17:05:46 +0200346static void
347ixge_sfp_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200348{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200349 ixge_phy_t *phy = xd->phys + xd->phy_index;
350 i2c_bus_t *ib = &xd->i2c_bus;
Damjan Marionb4d89272016-05-12 22:14:45 +0200351
352 ib->private_data = xd->device_index;
353 ib->put_bits = ixge_i2c_put_bits;
354 ib->get_bits = ixge_i2c_get_bits;
355 vlib_i2c_init (ib);
356
Damjan Marion00a9dca2016-08-17 17:05:46 +0200357 vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
Damjan Marionb4d89272016-05-12 22:14:45 +0200358
Damjan Marion00a9dca2016-08-17 17:05:46 +0200359 if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
Damjan Marionb4d89272016-05-12 22:14:45 +0200360 xd->sfp_eeprom.id = SFP_ID_unknown;
361 else
362 {
363 /* FIXME 5 => SR/LR eeprom ID. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200364 clib_error_t *e =
365 ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
Damjan Marionb4d89272016-05-12 22:14:45 +0200366 if (e)
367 clib_error_report (e);
368 }
369
370 phy->mdio_address = ~0;
371}
372
Damjan Marion00a9dca2016-08-17 17:05:46 +0200373static void
374ixge_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200375{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200376 ixge_main_t *xm = &ixge_main;
377 vlib_main_t *vm = xm->vlib_main;
378 ixge_phy_t *phy = xd->phys + xd->phy_index;
Damjan Marionb4d89272016-05-12 22:14:45 +0200379
380 switch (xd->device_id)
381 {
382 case IXGE_82599_sfp:
383 case IXGE_82599_sfp_em:
384 case IXGE_82599_sfp_fcoe:
385 /* others? */
386 return ixge_sfp_phy_init (xd);
387
388 default:
389 break;
390 }
391
392 /* Probe address of phy. */
393 {
394 u32 i, v;
395
396 phy->mdio_address = ~0;
397 for (i = 0; i < 32; i++)
398 {
399 phy->mdio_address = i;
400 v = ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1);
401 if (v != 0xffff && v != 0)
402 break;
403 }
404
405 /* No PHY found? */
406 if (i >= 32)
407 return;
408 }
409
Damjan Marion00a9dca2016-08-17 17:05:46 +0200410 phy->id =
411 ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
412 ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200413
414 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200415 ELOG_TYPE_DECLARE (e) =
416 {
417 .function = (char *) __FUNCTION__,.format =
418 "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
419 struct
420 {
421 u32 instance, id, address;
422 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +0200423 ed = ELOG_DATA (&vm->elog_main, e);
424 ed->instance = xd->device_index;
425 ed->id = phy->id;
426 ed->address = phy->mdio_address;
427 }
428
429 /* Reset phy. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200430 ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
431 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200432
433 /* Wait for self-clearning reset bit to clear. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200434 do
435 {
436 vlib_process_suspend (vm, 1e-3);
437 }
438 while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
439 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200440}
441
Damjan Marion00a9dca2016-08-17 17:05:46 +0200442static u8 *
443format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200444{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200445 ixge_rx_from_hw_descriptor_t *d =
446 va_arg (*va, ixge_rx_from_hw_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200447 u32 s0 = d->status[0], s2 = d->status[2];
448 u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
449 uword indent = format_get_indent (s);
450
451 s = format (s, "%s-owned",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200452 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
453 "hw");
454 s =
455 format (s, ", length this descriptor %d, l3 offset %d",
456 d->n_packet_bytes_this_descriptor,
457 IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
Damjan Marionb4d89272016-05-12 22:14:45 +0200458 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
459 s = format (s, ", end-of-packet");
460
461 s = format (s, "\n%U", format_white_space, indent);
462
463 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR)
464 s = format (s, "layer2 error");
465
466 if (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2)
467 {
468 s = format (s, "layer 2 type %d", (s0 & 0x1f));
469 return s;
470 }
471
472 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN)
473 s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
474 format_white_space, indent);
475
476 if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
477 {
478 s = format (s, "ip4%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200479 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
480 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200481 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
482 s = format (s, " checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200483 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
484 "bad" : "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200485 }
486 if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
487 s = format (s, "ip6%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200488 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
489 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200490 is_tcp = is_udp = 0;
491 if ((is_ip = (is_ip4 | is_ip6)))
492 {
493 is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
494 is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
495 if (is_tcp)
496 s = format (s, ", tcp");
497 if (is_udp)
498 s = format (s, ", udp");
499 }
500
501 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
502 s = format (s, ", tcp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200503 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
504 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200505 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
506 s = format (s, ", udp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200507 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
508 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200509
510 return s;
511}
512
Damjan Marion00a9dca2016-08-17 17:05:46 +0200513static u8 *
514format_ixge_tx_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200515{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200516 ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200517 u32 s0 = d->status0, s1 = d->status1;
518 uword indent = format_get_indent (s);
519 u32 v;
520
521 s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200522 d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200523
524 s = format (s, "\n%U", format_white_space, indent);
525
526 if ((v = (s0 >> 0) & 3))
527 s = format (s, "reserved 0x%x, ", v);
528
529 if ((v = (s0 >> 2) & 3))
530 s = format (s, "mac 0x%x, ", v);
531
532 if ((v = (s0 >> 4) & 0xf) != 3)
533 s = format (s, "type 0x%x, ", v);
534
535 s = format (s, "%s%s%s%s%s%s%s%s",
536 (s0 & (1 << 8)) ? "eop, " : "",
537 (s0 & (1 << 9)) ? "insert-fcs, " : "",
538 (s0 & (1 << 10)) ? "reserved26, " : "",
539 (s0 & (1 << 11)) ? "report-status, " : "",
540 (s0 & (1 << 12)) ? "reserved28, " : "",
541 (s0 & (1 << 13)) ? "is-advanced, " : "",
542 (s0 & (1 << 14)) ? "vlan-enable, " : "",
543 (s0 & (1 << 15)) ? "tx-segmentation, " : "");
544
545 if ((v = s1 & 0xf) != 0)
546 s = format (s, "status 0x%x, ", v);
547
548 if ((v = (s1 >> 4) & 0xf))
549 s = format (s, "context 0x%x, ", v);
550
551 if ((v = (s1 >> 8) & 0x3f))
552 s = format (s, "options 0x%x, ", v);
553
554 return s;
555}
556
Damjan Marion00a9dca2016-08-17 17:05:46 +0200557typedef struct
558{
Damjan Marionb4d89272016-05-12 22:14:45 +0200559 ixge_descriptor_t before, after;
560
561 u32 buffer_index;
562
563 u16 device_index;
564
565 u8 queue_index;
566
567 u8 is_start_of_packet;
568
569 /* Copy of VLIB buffer; packet data stored in pre_data. */
570 vlib_buffer_t buffer;
571} ixge_rx_dma_trace_t;
572
Damjan Marion00a9dca2016-08-17 17:05:46 +0200573static u8 *
574format_ixge_rx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200575{
576 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200577 vlib_node_t *node = va_arg (*va, vlib_node_t *);
578 vnet_main_t *vnm = vnet_get_main ();
579 ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
580 ixge_main_t *xm = &ixge_main;
581 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
582 format_function_t *f;
Damjan Marionb4d89272016-05-12 22:14:45 +0200583 uword indent = format_get_indent (s);
584
585 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200586 vnet_sw_interface_t *sw =
587 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
588 s =
589 format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
590 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200591 }
592
593 s = format (s, "\n%Ubefore: %U",
594 format_white_space, indent,
595 format_ixge_rx_from_hw_descriptor, &t->before);
596 s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
597 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200598 t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
Damjan Marionb4d89272016-05-12 22:14:45 +0200599
600 s = format (s, "\n%Ubuffer 0x%x: %U",
601 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200602 t->buffer_index, format_vlib_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200603
Damjan Marion00a9dca2016-08-17 17:05:46 +0200604 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200605
606 f = node->format_buffer;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200607 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200608 f = format_hex_bytes;
609 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
610
611 return s;
612}
613
614#define foreach_ixge_error \
615 _ (none, "no error") \
616 _ (tx_full_drops, "tx ring full drops") \
617 _ (ip4_checksum_error, "ip4 checksum errors") \
618 _ (rx_alloc_fail, "rx buf alloc from free list failed") \
619 _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
620
Damjan Marion00a9dca2016-08-17 17:05:46 +0200621typedef enum
622{
Damjan Marionb4d89272016-05-12 22:14:45 +0200623#define _(f,s) IXGE_ERROR_##f,
624 foreach_ixge_error
625#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200626 IXGE_N_ERROR,
Damjan Marionb4d89272016-05-12 22:14:45 +0200627} ixge_error_t;
628
629always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200630ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
631 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200632 u8 * next0, u8 * error0, u32 * flags0)
633{
634 u8 is0_ip4, is0_ip6, n0, e0;
635 u32 f0;
636
637 e0 = IXGE_ERROR_none;
638 n0 = IXGE_RX_NEXT_ETHERNET_INPUT;
639
640 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
641 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
642
643 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200644 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200645
646 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
647 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
648
649 n0 = (xd->per_interface_next_index != ~0) ?
650 xd->per_interface_next_index : n0;
651
652 /* Check for error. */
653 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
654
655 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
656 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200657 ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200658
659 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
660 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200661 ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200662
663 *error0 = e0;
664 *next0 = n0;
665 *flags0 = f0;
666}
667
668always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200669ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
670 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200671 u32 s10, u32 s12,
672 u8 * next0, u8 * error0, u32 * flags0,
673 u8 * next1, u8 * error1, u32 * flags1)
674{
675 u8 is0_ip4, is0_ip6, n0, e0;
676 u8 is1_ip4, is1_ip6, n1, e1;
677 u32 f0, f1;
678
679 e0 = e1 = IXGE_ERROR_none;
680 n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
681
682 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
683 is1_ip4 = s12 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
684
685 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
686 n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
687
688 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200689 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200690 e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200691 ? IXGE_ERROR_ip4_checksum_error : e1);
Damjan Marionb4d89272016-05-12 22:14:45 +0200692
693 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
694 is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
695
696 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
697 n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
698
699 n0 = (xd->per_interface_next_index != ~0) ?
700 xd->per_interface_next_index : n0;
701 n1 = (xd->per_interface_next_index != ~0) ?
702 xd->per_interface_next_index : n1;
703
704 /* Check for error. */
705 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
706 n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
707
708 *error0 = e0;
709 *error1 = e1;
710
711 *next0 = n0;
712 *next1 = n1;
713
714 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
715 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200716 ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200717 f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
718 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200719 ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200720
721 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
722 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200723 ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200724 f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
725 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200726 ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200727
728 *flags0 = f0;
729 *flags1 = f1;
730}
731
732static void
733ixge_rx_trace (ixge_main_t * xm,
734 ixge_device_t * xd,
735 ixge_dma_queue_t * dq,
736 ixge_descriptor_t * before_descriptors,
737 u32 * before_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200738 ixge_descriptor_t * after_descriptors, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200739{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200740 vlib_main_t *vm = xm->vlib_main;
741 vlib_node_runtime_t *node = dq->rx.node;
742 ixge_rx_from_hw_descriptor_t *bd;
743 ixge_rx_to_hw_descriptor_t *ad;
744 u32 *b, n_left, is_sop, next_index_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200745
746 n_left = n_descriptors;
747 b = before_buffers;
748 bd = &before_descriptors->rx_from_hw;
749 ad = &after_descriptors->rx_to_hw;
750 is_sop = dq->rx.is_start_of_packet;
751 next_index_sop = dq->rx.saved_start_of_packet_next_index;
752
753 while (n_left >= 2)
754 {
755 u32 bi0, bi1, flags0, flags1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200756 vlib_buffer_t *b0, *b1;
757 ixge_rx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200758 u8 next0, error0, next1, error1;
759
760 bi0 = b[0];
761 bi1 = b[1];
762 n_left -= 2;
763
764 b0 = vlib_get_buffer (vm, bi0);
765 b1 = vlib_get_buffer (vm, bi1);
766
767 ixge_rx_next_and_error_from_status_x2 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200768 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200769 bd[1].status[0], bd[1].status[2],
770 &next0, &error0, &flags0,
771 &next1, &error1, &flags1);
772
773 next_index_sop = is_sop ? next0 : next_index_sop;
774 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
775 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
776 t0->is_start_of_packet = is_sop;
777 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
778
779 next_index_sop = is_sop ? next1 : next_index_sop;
780 vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
781 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
782 t1->is_start_of_packet = is_sop;
783 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
784
785 t0->queue_index = dq->queue_index;
786 t1->queue_index = dq->queue_index;
787 t0->device_index = xd->device_index;
788 t1->device_index = xd->device_index;
789 t0->before.rx_from_hw = bd[0];
790 t1->before.rx_from_hw = bd[1];
791 t0->after.rx_to_hw = ad[0];
792 t1->after.rx_to_hw = ad[1];
793 t0->buffer_index = bi0;
794 t1->buffer_index = bi1;
795 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
796 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
797 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
798 sizeof (t0->buffer.pre_data));
799 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
800 sizeof (t1->buffer.pre_data));
801
802 b += 2;
803 bd += 2;
804 ad += 2;
805 }
806
807 while (n_left >= 1)
808 {
809 u32 bi0, flags0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200810 vlib_buffer_t *b0;
811 ixge_rx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200812 u8 next0, error0;
813
814 bi0 = b[0];
815 n_left -= 1;
816
817 b0 = vlib_get_buffer (vm, bi0);
818
819 ixge_rx_next_and_error_from_status_x1 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200820 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200821 &next0, &error0, &flags0);
822
823 next_index_sop = is_sop ? next0 : next_index_sop;
824 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
825 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
826 t0->is_start_of_packet = is_sop;
827 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
828
829 t0->queue_index = dq->queue_index;
830 t0->device_index = xd->device_index;
831 t0->before.rx_from_hw = bd[0];
832 t0->after.rx_to_hw = ad[0];
833 t0->buffer_index = bi0;
834 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
835 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
836 sizeof (t0->buffer.pre_data));
837
838 b += 1;
839 bd += 1;
840 ad += 1;
841 }
842}
843
Damjan Marion00a9dca2016-08-17 17:05:46 +0200844typedef struct
845{
Damjan Marionb4d89272016-05-12 22:14:45 +0200846 ixge_tx_descriptor_t descriptor;
847
848 u32 buffer_index;
849
850 u16 device_index;
851
852 u8 queue_index;
853
854 u8 is_start_of_packet;
855
856 /* Copy of VLIB buffer; packet data stored in pre_data. */
857 vlib_buffer_t buffer;
858} ixge_tx_dma_trace_t;
859
Damjan Marion00a9dca2016-08-17 17:05:46 +0200860static u8 *
861format_ixge_tx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200862{
863 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
864 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200865 ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
866 vnet_main_t *vnm = vnet_get_main ();
867 ixge_main_t *xm = &ixge_main;
868 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
869 format_function_t *f;
Damjan Marionb4d89272016-05-12 22:14:45 +0200870 uword indent = format_get_indent (s);
871
872 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200873 vnet_sw_interface_t *sw =
874 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
875 s =
876 format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
877 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200878 }
879
880 s = format (s, "\n%Udescriptor: %U",
881 format_white_space, indent,
882 format_ixge_tx_descriptor, &t->descriptor);
883
884 s = format (s, "\n%Ubuffer 0x%x: %U",
885 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200886 t->buffer_index, format_vlib_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200887
Damjan Marion00a9dca2016-08-17 17:05:46 +0200888 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200889
890 f = format_ethernet_header_with_length;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200891 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200892 f = format_hex_bytes;
893 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
894
895 return s;
896}
897
Damjan Marion00a9dca2016-08-17 17:05:46 +0200898typedef struct
899{
900 vlib_node_runtime_t *node;
Damjan Marionb4d89272016-05-12 22:14:45 +0200901
902 u32 is_start_of_packet;
903
904 u32 n_bytes_in_packet;
905
Damjan Marion00a9dca2016-08-17 17:05:46 +0200906 ixge_tx_descriptor_t *start_of_packet_descriptor;
Damjan Marionb4d89272016-05-12 22:14:45 +0200907} ixge_tx_state_t;
908
909static void
910ixge_tx_trace (ixge_main_t * xm,
911 ixge_device_t * xd,
912 ixge_dma_queue_t * dq,
913 ixge_tx_state_t * tx_state,
914 ixge_tx_descriptor_t * descriptors,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200915 u32 * buffers, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200916{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200917 vlib_main_t *vm = xm->vlib_main;
918 vlib_node_runtime_t *node = tx_state->node;
919 ixge_tx_descriptor_t *d;
920 u32 *b, n_left, is_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200921
922 n_left = n_descriptors;
923 b = buffers;
924 d = descriptors;
925 is_sop = tx_state->is_start_of_packet;
926
927 while (n_left >= 2)
928 {
929 u32 bi0, bi1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200930 vlib_buffer_t *b0, *b1;
931 ixge_tx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200932
933 bi0 = b[0];
934 bi1 = b[1];
935 n_left -= 2;
936
937 b0 = vlib_get_buffer (vm, bi0);
938 b1 = vlib_get_buffer (vm, bi1);
939
940 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
941 t0->is_start_of_packet = is_sop;
942 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
943
944 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
945 t1->is_start_of_packet = is_sop;
946 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
947
948 t0->queue_index = dq->queue_index;
949 t1->queue_index = dq->queue_index;
950 t0->device_index = xd->device_index;
951 t1->device_index = xd->device_index;
952 t0->descriptor = d[0];
953 t1->descriptor = d[1];
954 t0->buffer_index = bi0;
955 t1->buffer_index = bi1;
956 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
957 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
958 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
959 sizeof (t0->buffer.pre_data));
960 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
961 sizeof (t1->buffer.pre_data));
962
963 b += 2;
964 d += 2;
965 }
966
967 while (n_left >= 1)
968 {
969 u32 bi0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200970 vlib_buffer_t *b0;
971 ixge_tx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200972
973 bi0 = b[0];
974 n_left -= 1;
975
976 b0 = vlib_get_buffer (vm, bi0);
977
978 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
979 t0->is_start_of_packet = is_sop;
980 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
981
982 t0->queue_index = dq->queue_index;
983 t0->device_index = xd->device_index;
984 t0->descriptor = d[0];
985 t0->buffer_index = bi0;
986 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
987 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
988 sizeof (t0->buffer.pre_data));
989
990 b += 1;
991 d += 1;
992 }
993}
994
995always_inline uword
996ixge_ring_sub (ixge_dma_queue_t * q, u32 i0, u32 i1)
997{
998 i32 d = i1 - i0;
999 ASSERT (i0 < q->n_descriptors);
1000 ASSERT (i1 < q->n_descriptors);
1001 return d < 0 ? q->n_descriptors + d : d;
1002}
1003
1004always_inline uword
1005ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
1006{
1007 u32 d = i0 + i1;
1008 ASSERT (i0 < q->n_descriptors);
1009 ASSERT (i1 < q->n_descriptors);
1010 d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1011 return d;
1012}
1013
1014always_inline uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02001015ixge_tx_descriptor_matches_template (ixge_main_t * xm,
1016 ixge_tx_descriptor_t * d)
Damjan Marionb4d89272016-05-12 22:14:45 +02001017{
1018 u32 cmp;
1019
1020 cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1021 ^ xm->tx_descriptor_template.status0);
1022 if (cmp)
1023 return 0;
1024 cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1025 ^ xm->tx_descriptor_template.status1);
1026 if (cmp)
1027 return 0;
1028
1029 return 1;
1030}
1031
1032static uword
1033ixge_tx_no_wrap (ixge_main_t * xm,
1034 ixge_device_t * xd,
1035 ixge_dma_queue_t * dq,
1036 u32 * buffers,
1037 u32 start_descriptor_index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001038 u32 n_descriptors, ixge_tx_state_t * tx_state)
Damjan Marionb4d89272016-05-12 22:14:45 +02001039{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001040 vlib_main_t *vm = xm->vlib_main;
1041 ixge_tx_descriptor_t *d, *d_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001042 u32 n_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001043 u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1044 u32 *to_tx =
1045 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001046 u32 is_sop = tx_state->is_start_of_packet;
1047 u32 len_sop = tx_state->n_bytes_in_packet;
1048 u16 template_status = xm->tx_descriptor_template.status0;
1049 u32 descriptor_prefetch_rotor = 0;
1050
1051 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1052 d = &dq->descriptors[start_descriptor_index].tx;
1053 d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1054
1055 while (n_left >= 4)
1056 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001057 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001058 u32 bi0, fi0, len0;
1059 u32 bi1, fi1, len1;
1060 u8 is_eop0, is_eop1;
1061
1062 /* Prefetch next iteration. */
1063 vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1064 vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1065
1066 if ((descriptor_prefetch_rotor & 0x3) == 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001067 CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001068
1069 descriptor_prefetch_rotor += 2;
1070
1071 bi0 = buffers[0];
1072 bi1 = buffers[1];
1073
1074 to_free[0] = fi0 = to_tx[0];
1075 to_tx[0] = bi0;
1076 to_free += fi0 != 0;
1077
1078 to_free[0] = fi1 = to_tx[1];
1079 to_tx[1] = bi1;
1080 to_free += fi1 != 0;
1081
1082 buffers += 2;
1083 n_left -= 2;
1084 to_tx += 2;
1085
1086 b0 = vlib_get_buffer (vm, bi0);
1087 b1 = vlib_get_buffer (vm, bi1);
1088
1089 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1090 is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1091
1092 len0 = b0->current_length;
1093 len1 = b1->current_length;
1094
1095 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1096 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
1097
Damjan Marion00a9dca2016-08-17 17:05:46 +02001098 d[0].buffer_address =
1099 vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
1100 d[1].buffer_address =
1101 vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02001102
1103 d[0].n_bytes_this_buffer = len0;
1104 d[1].n_bytes_this_buffer = len1;
1105
Damjan Marion00a9dca2016-08-17 17:05:46 +02001106 d[0].status0 =
1107 template_status | (is_eop0 <<
1108 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
1109 d[1].status0 =
1110 template_status | (is_eop1 <<
1111 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001112
1113 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001114 d_sop[0].status1 =
1115 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001116 d += 1;
1117 d_sop = is_eop0 ? d : d_sop;
1118
1119 is_sop = is_eop0;
1120
1121 len_sop = (is_sop ? 0 : len_sop) + len1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001122 d_sop[0].status1 =
1123 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001124 d += 1;
1125 d_sop = is_eop1 ? d : d_sop;
1126
1127 is_sop = is_eop1;
1128 }
1129
1130 while (n_left > 0)
1131 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001132 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001133 u32 bi0, fi0, len0;
1134 u8 is_eop0;
1135
1136 bi0 = buffers[0];
1137
1138 to_free[0] = fi0 = to_tx[0];
1139 to_tx[0] = bi0;
1140 to_free += fi0 != 0;
1141
1142 buffers += 1;
1143 n_left -= 1;
1144 to_tx += 1;
1145
1146 b0 = vlib_get_buffer (vm, bi0);
1147
1148 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149
1150 len0 = b0->current_length;
1151
1152 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1153
Damjan Marion00a9dca2016-08-17 17:05:46 +02001154 d[0].buffer_address =
1155 vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02001156
1157 d[0].n_bytes_this_buffer = len0;
1158
Damjan Marion00a9dca2016-08-17 17:05:46 +02001159 d[0].status0 =
1160 template_status | (is_eop0 <<
1161 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001162
1163 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001164 d_sop[0].status1 =
1165 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001166 d += 1;
1167 d_sop = is_eop0 ? d : d_sop;
1168
1169 is_sop = is_eop0;
1170 }
1171
1172 if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1173 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001174 to_tx =
1175 vec_elt_at_index (dq->descriptor_buffer_indices,
1176 start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001177 ixge_tx_trace (xm, xd, dq, tx_state,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001178 &dq->descriptors[start_descriptor_index].tx, to_tx,
Damjan Marionb4d89272016-05-12 22:14:45 +02001179 n_descriptors);
1180 }
1181
Damjan Marion00a9dca2016-08-17 17:05:46 +02001182 _vec_len (xm->tx_buffers_pending_free) =
1183 to_free - xm->tx_buffers_pending_free;
Damjan Marionb4d89272016-05-12 22:14:45 +02001184
1185 /* When we are done d_sop can point to end of ring. Wrap it if so. */
1186 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001187 ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001188
1189 ASSERT (d_sop - d_start <= dq->n_descriptors);
1190 d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1191 }
1192
1193 tx_state->is_start_of_packet = is_sop;
1194 tx_state->start_of_packet_descriptor = d_sop;
1195 tx_state->n_bytes_in_packet = len_sop;
1196
1197 return n_descriptors;
1198}
1199
1200static uword
1201ixge_interface_tx (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001202 vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02001203{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001204 ixge_main_t *xm = &ixge_main;
1205 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1206 ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
1207 ixge_dma_queue_t *dq;
1208 u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001209 u32 queue_index = 0; /* fixme parameter */
1210 ixge_tx_state_t tx_state;
1211
1212 tx_state.node = node;
1213 tx_state.is_start_of_packet = 1;
1214 tx_state.start_of_packet_descriptor = 0;
1215 tx_state.n_bytes_in_packet = 0;
1216
1217 from = vlib_frame_vector_args (f);
1218
1219 dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1220
1221 dq->head_index = dq->tx.head_index_write_back[0];
1222
1223 /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1224 n_left_tx = dq->n_descriptors - 1;
1225 n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1226
1227 _vec_len (xm->tx_buffers_pending_free) = 0;
1228
1229 n_descriptors_to_tx = f->n_vectors;
1230 n_tail_drop = 0;
1231 if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1232 {
1233 i32 i, n_ok, i_eop, i_sop;
1234
1235 i_sop = i_eop = ~0;
1236 for (i = n_left_tx - 1; i >= 0; i--)
1237 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001238 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1239 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marionb4d89272016-05-12 22:14:45 +02001240 {
1241 if (i_sop != ~0 && i_eop != ~0)
1242 break;
1243 i_eop = i;
1244 i_sop = i + 1;
1245 }
1246 }
1247 if (i == 0)
1248 n_ok = 0;
1249 else
1250 n_ok = i_eop + 1;
1251
1252 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001253 ELOG_TYPE_DECLARE (e) =
1254 {
1255 .function = (char *) __FUNCTION__,.format =
1256 "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1257 "i2i2i2i2",};
1258 struct
1259 {
1260 u16 instance, to_tx, head, tail;
1261 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001262 ed = ELOG_DATA (&vm->elog_main, e);
1263 ed->instance = xd->device_index;
1264 ed->to_tx = n_descriptors_to_tx;
1265 ed->head = dq->head_index;
1266 ed->tail = dq->tail_index;
1267 }
1268
1269 if (n_ok < n_descriptors_to_tx)
1270 {
1271 n_tail_drop = n_descriptors_to_tx - n_ok;
1272 vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001273 vlib_error_count (vm, ixge_input_node.index,
1274 IXGE_ERROR_tx_full_drops, n_tail_drop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001275 }
1276
1277 n_descriptors_to_tx = n_ok;
1278 }
1279
1280 dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1281
1282 /* Process from tail to end of descriptor ring. */
1283 if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1284 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001285 u32 n =
1286 clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
Damjan Marionb4d89272016-05-12 22:14:45 +02001287 n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1288 from += n;
1289 n_descriptors_to_tx -= n;
1290 dq->tail_index += n;
1291 ASSERT (dq->tail_index <= dq->n_descriptors);
1292 if (dq->tail_index == dq->n_descriptors)
1293 dq->tail_index = 0;
1294 }
1295
1296 if (n_descriptors_to_tx > 0)
1297 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001298 u32 n =
1299 ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
Damjan Marionb4d89272016-05-12 22:14:45 +02001300 from += n;
1301 ASSERT (n == n_descriptors_to_tx);
1302 dq->tail_index += n;
1303 ASSERT (dq->tail_index <= dq->n_descriptors);
1304 if (dq->tail_index == dq->n_descriptors)
1305 dq->tail_index = 0;
1306 }
1307
1308 /* We should only get full packets. */
1309 ASSERT (tx_state.is_start_of_packet);
1310
1311 /* Report status when last descriptor is done. */
1312 {
1313 u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001314 ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001315 d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
1316 }
1317
1318 /* Give new descriptors to hardware. */
1319 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001320 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001321
1322 CLIB_MEMORY_BARRIER ();
1323
1324 dr->tail_index = dq->tail_index;
1325 }
1326
1327 /* Free any buffers that are done. */
1328 {
1329 u32 n = _vec_len (xm->tx_buffers_pending_free);
1330 if (n > 0)
1331 {
1332 vlib_buffer_free_no_next (vm, xm->tx_buffers_pending_free, n);
1333 _vec_len (xm->tx_buffers_pending_free) = 0;
1334 ASSERT (dq->tx.n_buffers_on_ring >= n);
1335 dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1336 }
1337 }
1338
1339 return f->n_vectors;
1340}
1341
1342static uword
1343ixge_rx_queue_no_wrap (ixge_main_t * xm,
1344 ixge_device_t * xd,
1345 ixge_dma_queue_t * dq,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001346 u32 start_descriptor_index, u32 n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +02001347{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001348 vlib_main_t *vm = xm->vlib_main;
1349 vlib_node_runtime_t *node = dq->rx.node;
1350 ixge_descriptor_t *d;
1351 static ixge_descriptor_t *d_trace_save;
1352 static u32 *d_trace_buffers;
Damjan Marionb4d89272016-05-12 22:14:45 +02001353 u32 n_descriptors_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001354 u32 *to_rx =
1355 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1356 u32 *to_add;
Damjan Marionb4d89272016-05-12 22:14:45 +02001357 u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1358 u32 bi_last = dq->rx.saved_last_buffer_index;
1359 u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1360 u32 is_sop = dq->rx.is_start_of_packet;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001361 u32 next_index, n_left_to_next, *to_next;
Damjan Marionb4d89272016-05-12 22:14:45 +02001362 u32 n_packets = 0;
1363 u32 n_bytes = 0;
1364 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001365 vlib_buffer_t *b_last, b_dummy;
Damjan Marionb4d89272016-05-12 22:14:45 +02001366
1367 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1368 d = &dq->descriptors[start_descriptor_index];
1369
1370 b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
1371 next_index = dq->rx.next_index;
1372
1373 if (n_trace > 0)
1374 {
1375 u32 n = clib_min (n_trace, n_descriptors);
1376 if (d_trace_save)
1377 {
1378 _vec_len (d_trace_save) = 0;
1379 _vec_len (d_trace_buffers) = 0;
1380 }
1381 vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1382 vec_add (d_trace_buffers, to_rx, n);
1383 }
1384
1385 {
1386 uword l = vec_len (xm->rx_buffers_to_add);
1387
1388 if (l < n_descriptors_left)
1389 {
1390 u32 n_to_alloc = 2 * dq->n_descriptors - l;
1391 u32 n_allocated;
1392
1393 vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1394
1395 _vec_len (xm->rx_buffers_to_add) = l;
1396 n_allocated = vlib_buffer_alloc_from_free_list
1397 (vm, xm->rx_buffers_to_add + l, n_to_alloc,
1398 xm->vlib_buffer_free_list_index);
1399 _vec_len (xm->rx_buffers_to_add) += n_allocated;
1400
Damjan Marion00a9dca2016-08-17 17:05:46 +02001401 /* Handle transient allocation failure */
1402 if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
Damjan Marionb4d89272016-05-12 22:14:45 +02001403 {
1404 if (n_allocated == 0)
1405 vlib_error_count (vm, ixge_input_node.index,
1406 IXGE_ERROR_rx_alloc_no_physmem, 1);
1407 else
1408 vlib_error_count (vm, ixge_input_node.index,
1409 IXGE_ERROR_rx_alloc_fail, 1);
1410
1411 n_descriptors_left = l + n_allocated;
1412 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001413 n_descriptors = n_descriptors_left;
Damjan Marionb4d89272016-05-12 22:14:45 +02001414 }
1415
1416 /* Add buffers from end of vector going backwards. */
1417 to_add = vec_end (xm->rx_buffers_to_add) - 1;
1418 }
1419
1420 while (n_descriptors_left > 0)
1421 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001422 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Damjan Marionb4d89272016-05-12 22:14:45 +02001423
1424 while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1425 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001426 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001427 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1428 u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1429 u8 is_eop0, error0, next0;
1430 u8 is_eop1, error1, next1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001431 ixge_descriptor_t d0, d1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001432
1433 vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1434 vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1435
Damjan Marion00a9dca2016-08-17 17:05:46 +02001436 CLIB_PREFETCH (d + 2, 32, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001437
Damjan Marion00a9dca2016-08-17 17:05:46 +02001438 d0.as_u32x4 = d[0].as_u32x4;
1439 d1.as_u32x4 = d[1].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001440
1441 s20 = d0.rx_from_hw.status[2];
1442 s21 = d1.rx_from_hw.status[2];
1443
1444 s00 = d0.rx_from_hw.status[0];
1445 s01 = d1.rx_from_hw.status[0];
1446
Damjan Marion00a9dca2016-08-17 17:05:46 +02001447 if (!
1448 ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001449 goto found_hw_owned_descriptor_x2;
1450
1451 bi0 = to_rx[0];
1452 bi1 = to_rx[1];
1453
1454 ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1455 fi0 = to_add[0];
1456 fi1 = to_add[-1];
1457
1458 to_rx[0] = fi0;
1459 to_rx[1] = fi1;
1460 to_rx += 2;
1461 to_add -= 2;
1462
Damjan Marion00a9dca2016-08-17 17:05:46 +02001463 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1464 vlib_buffer_is_known (vm, bi0));
1465 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1466 vlib_buffer_is_known (vm, bi1));
1467 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1468 vlib_buffer_is_known (vm, fi0));
1469 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1470 vlib_buffer_is_known (vm, fi1));
Damjan Marionb4d89272016-05-12 22:14:45 +02001471
1472 b0 = vlib_get_buffer (vm, bi0);
1473 b1 = vlib_get_buffer (vm, bi1);
1474
Damjan Marion00a9dca2016-08-17 17:05:46 +02001475 /*
1476 * Turn this on if you run into
1477 * "bad monkey" contexts, and you want to know exactly
1478 * which nodes they've visited... See main.c...
1479 */
1480 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1481 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001482
1483 CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
1484 CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
1485
1486 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1487 is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1488
1489 ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1490 &next0, &error0, &flags0,
1491 &next1, &error1, &flags1);
1492
1493 next0 = is_sop ? next0 : next_index_sop;
1494 next1 = is_eop0 ? next1 : next0;
1495 next_index_sop = next1;
1496
1497 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1498 b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1499
1500 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1501 vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001502 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1503 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001504
1505 b0->error = node->errors[error0];
1506 b1->error = node->errors[error1];
1507
1508 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1509 len1 = d1.rx_from_hw.n_packet_bytes_this_descriptor;
1510 n_bytes += len0 + len1;
1511 n_packets += is_eop0 + is_eop1;
1512
1513 /* Give new buffers to hardware. */
1514 d0.rx_to_hw.tail_address =
Damjan Marion00a9dca2016-08-17 17:05:46 +02001515 vlib_get_buffer_data_physical_address (vm, fi0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001516 d1.rx_to_hw.tail_address =
Damjan Marion00a9dca2016-08-17 17:05:46 +02001517 vlib_get_buffer_data_physical_address (vm, fi1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001518 d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
1519 d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001520 d[0].as_u32x4 = d0.as_u32x4;
1521 d[1].as_u32x4 = d1.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001522
1523 d += 2;
1524 n_descriptors_left -= 2;
1525
1526 /* Point to either l2 or l3 header depending on next. */
1527 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001528 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001529 l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001530 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001531
1532 b0->current_length = len0 - l3_offset0;
1533 b1->current_length = len1 - l3_offset1;
1534 b0->current_data = l3_offset0;
1535 b1->current_data = l3_offset1;
1536
1537 b_last->next_buffer = is_sop ? ~0 : bi0;
1538 b0->next_buffer = is_eop0 ? ~0 : bi1;
1539 bi_last = bi1;
1540 b_last = b1;
1541
1542 if (CLIB_DEBUG > 0)
1543 {
1544 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1545 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1546
1547 if (is_eop0)
1548 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001549 u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1550 /* follow_buffer_next */ 1);
1551 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001552 }
1553 if (is_eop1)
1554 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001555 u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1556 /* follow_buffer_next */ 1);
1557 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001558 }
1559 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001560 if (0) /* "Dave" version */
1561 {
1562 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1563 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001564
Damjan Marion00a9dca2016-08-17 17:05:46 +02001565 if (is_eop0)
1566 {
1567 to_next[0] = bi_sop0;
1568 to_next++;
1569 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001570
Damjan Marion00a9dca2016-08-17 17:05:46 +02001571 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1572 to_next, n_left_to_next,
1573 bi_sop0, next0);
1574 }
1575 if (is_eop1)
1576 {
1577 to_next[0] = bi_sop1;
1578 to_next++;
1579 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001580
Damjan Marion00a9dca2016-08-17 17:05:46 +02001581 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1582 to_next, n_left_to_next,
1583 bi_sop1, next1);
1584 }
1585 is_sop = is_eop1;
1586 bi_sop = bi_sop1;
1587 }
1588 if (1) /* "Eliot" version */
1589 {
1590 /* Speculatively enqueue to cached next. */
1591 u8 saved_is_sop = is_sop;
1592 u32 bi_sop_save = bi_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001593
Damjan Marion00a9dca2016-08-17 17:05:46 +02001594 bi_sop = saved_is_sop ? bi0 : bi_sop;
1595 to_next[0] = bi_sop;
1596 to_next += is_eop0;
1597 n_left_to_next -= is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001598
Damjan Marion00a9dca2016-08-17 17:05:46 +02001599 bi_sop = is_eop0 ? bi1 : bi_sop;
1600 to_next[0] = bi_sop;
1601 to_next += is_eop1;
1602 n_left_to_next -= is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001603
Damjan Marion00a9dca2016-08-17 17:05:46 +02001604 is_sop = is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001605
Damjan Marion00a9dca2016-08-17 17:05:46 +02001606 if (PREDICT_FALSE
1607 (!(next0 == next_index && next1 == next_index)))
1608 {
1609 /* Undo speculation. */
1610 to_next -= is_eop0 + is_eop1;
1611 n_left_to_next += is_eop0 + is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001612
Damjan Marion00a9dca2016-08-17 17:05:46 +02001613 /* Re-do both descriptors being careful about where we enqueue. */
1614 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1615 if (is_eop0)
1616 {
1617 if (next0 != next_index)
1618 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1619 else
1620 {
1621 to_next[0] = bi_sop;
1622 to_next += 1;
1623 n_left_to_next -= 1;
1624 }
1625 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001626
Damjan Marion00a9dca2016-08-17 17:05:46 +02001627 bi_sop = is_eop0 ? bi1 : bi_sop;
1628 if (is_eop1)
1629 {
1630 if (next1 != next_index)
1631 vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1632 else
1633 {
1634 to_next[0] = bi_sop;
1635 to_next += 1;
1636 n_left_to_next -= 1;
1637 }
1638 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001639
Damjan Marion00a9dca2016-08-17 17:05:46 +02001640 /* Switch cached next index when next for both packets is the same. */
1641 if (is_eop0 && is_eop1 && next0 == next1)
1642 {
1643 vlib_put_next_frame (vm, node, next_index,
1644 n_left_to_next);
1645 next_index = next0;
1646 vlib_get_next_frame (vm, node, next_index,
1647 to_next, n_left_to_next);
1648 }
1649 }
1650 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001651 }
1652
Damjan Marion00a9dca2016-08-17 17:05:46 +02001653 /* Bail out of dual loop and proceed with single loop. */
Damjan Marionb4d89272016-05-12 22:14:45 +02001654 found_hw_owned_descriptor_x2:
1655
1656 while (n_descriptors_left > 0 && n_left_to_next > 0)
1657 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001658 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001659 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1660 u8 is_eop0, error0, next0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001661 ixge_descriptor_t d0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001662
Damjan Marion00a9dca2016-08-17 17:05:46 +02001663 d0.as_u32x4 = d[0].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001664
1665 s20 = d0.rx_from_hw.status[2];
1666 s00 = d0.rx_from_hw.status[0];
1667
Damjan Marion00a9dca2016-08-17 17:05:46 +02001668 if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001669 goto found_hw_owned_descriptor_x1;
1670
1671 bi0 = to_rx[0];
1672 ASSERT (to_add >= xm->rx_buffers_to_add);
1673 fi0 = to_add[0];
1674
1675 to_rx[0] = fi0;
1676 to_rx += 1;
1677 to_add -= 1;
1678
Damjan Marion00a9dca2016-08-17 17:05:46 +02001679 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1680 vlib_buffer_is_known (vm, bi0));
1681 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1682 vlib_buffer_is_known (vm, fi0));
Damjan Marionb4d89272016-05-12 22:14:45 +02001683
1684 b0 = vlib_get_buffer (vm, bi0);
1685
Damjan Marion00a9dca2016-08-17 17:05:46 +02001686 /*
1687 * Turn this on if you run into
1688 * "bad monkey" contexts, and you want to know exactly
1689 * which nodes they've visited...
1690 */
1691 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001692
1693 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1694 ixge_rx_next_and_error_from_status_x1
Damjan Marion00a9dca2016-08-17 17:05:46 +02001695 (xd, s00, s20, &next0, &error0, &flags0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001696
1697 next0 = is_sop ? next0 : next_index_sop;
1698 next_index_sop = next0;
1699
1700 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1701
1702 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001703 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001704
1705 b0->error = node->errors[error0];
1706
1707 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1708 n_bytes += len0;
1709 n_packets += is_eop0;
1710
1711 /* Give new buffer to hardware. */
1712 d0.rx_to_hw.tail_address =
Damjan Marion00a9dca2016-08-17 17:05:46 +02001713 vlib_get_buffer_data_physical_address (vm, fi0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001714 d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001715 d[0].as_u32x4 = d0.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001716
1717 d += 1;
1718 n_descriptors_left -= 1;
1719
1720 /* Point to either l2 or l3 header depending on next. */
1721 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001722 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001723 b0->current_length = len0 - l3_offset0;
1724 b0->current_data = l3_offset0;
1725
1726 b_last->next_buffer = is_sop ? ~0 : bi0;
1727 bi_last = bi0;
1728 b_last = b0;
1729
1730 bi_sop = is_sop ? bi0 : bi_sop;
1731
1732 if (CLIB_DEBUG > 0 && is_eop0)
1733 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001734 u8 *msg =
1735 vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1736 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001737 }
1738
Damjan Marion00a9dca2016-08-17 17:05:46 +02001739 if (0) /* "Dave" version */
1740 {
1741 if (is_eop0)
1742 {
1743 to_next[0] = bi_sop;
1744 to_next++;
1745 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001746
Damjan Marion00a9dca2016-08-17 17:05:46 +02001747 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1748 to_next, n_left_to_next,
1749 bi_sop, next0);
1750 }
1751 }
1752 if (1) /* "Eliot" version */
1753 {
1754 if (PREDICT_TRUE (next0 == next_index))
1755 {
1756 to_next[0] = bi_sop;
1757 to_next += is_eop0;
1758 n_left_to_next -= is_eop0;
1759 }
1760 else
1761 {
1762 if (next0 != next_index && is_eop0)
1763 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001764
Damjan Marion00a9dca2016-08-17 17:05:46 +02001765 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1766 next_index = next0;
1767 vlib_get_next_frame (vm, node, next_index,
1768 to_next, n_left_to_next);
1769 }
1770 }
1771 is_sop = is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001772 }
1773 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1774 }
1775
Damjan Marion00a9dca2016-08-17 17:05:46 +02001776found_hw_owned_descriptor_x1:
Damjan Marionb4d89272016-05-12 22:14:45 +02001777 if (n_descriptors_left > 0)
1778 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1779
1780 _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1781
1782 {
1783 u32 n_done = n_descriptors - n_descriptors_left;
1784
1785 if (n_trace > 0 && n_done > 0)
1786 {
1787 u32 n = clib_min (n_trace, n_done);
1788 ixge_rx_trace (xm, xd, dq,
1789 d_trace_save,
1790 d_trace_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001791 &dq->descriptors[start_descriptor_index], n);
Damjan Marionb4d89272016-05-12 22:14:45 +02001792 vlib_set_trace_count (vm, node, n_trace - n);
1793 }
1794 if (d_trace_save)
1795 {
1796 _vec_len (d_trace_save) = 0;
1797 _vec_len (d_trace_buffers) = 0;
1798 }
1799
1800 /* Don't keep a reference to b_last if we don't have to.
1801 Otherwise we can over-write a next_buffer pointer after already haven
1802 enqueued a packet. */
1803 if (is_sop)
1804 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001805 b_last->next_buffer = ~0;
1806 bi_last = ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001807 }
1808
1809 dq->rx.n_descriptors_done_this_call = n_done;
1810 dq->rx.n_descriptors_done_total += n_done;
1811 dq->rx.is_start_of_packet = is_sop;
1812 dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1813 dq->rx.saved_last_buffer_index = bi_last;
1814 dq->rx.saved_start_of_packet_next_index = next_index_sop;
1815 dq->rx.next_index = next_index;
1816 dq->rx.n_bytes += n_bytes;
1817
1818 return n_packets;
1819 }
1820}
1821
1822static uword
1823ixge_rx_queue (ixge_main_t * xm,
1824 ixge_device_t * xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001825 vlib_node_runtime_t * node, u32 queue_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02001826{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001827 ixge_dma_queue_t *dq =
1828 vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1829 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001830 uword n_packets = 0;
1831 u32 hw_head_index, sw_head_index;
1832
1833 /* One time initialization. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02001834 if (!dq->rx.node)
Damjan Marionb4d89272016-05-12 22:14:45 +02001835 {
1836 dq->rx.node = node;
1837 dq->rx.is_start_of_packet = 1;
1838 dq->rx.saved_start_of_packet_buffer_index = ~0;
1839 dq->rx.saved_last_buffer_index = ~0;
1840 }
1841
1842 dq->rx.next_index = node->cached_next_index;
1843
1844 dq->rx.n_descriptors_done_total = 0;
1845 dq->rx.n_descriptors_done_this_call = 0;
1846 dq->rx.n_bytes = 0;
1847
1848 /* Fetch head from hardware and compare to where we think we are. */
1849 hw_head_index = dr->head_index;
1850 sw_head_index = dq->head_index;
1851
1852 if (hw_head_index == sw_head_index)
1853 goto done;
1854
1855 if (hw_head_index < sw_head_index)
1856 {
1857 u32 n_tried = dq->n_descriptors - sw_head_index;
1858 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001859 sw_head_index =
1860 ixge_ring_add (dq, sw_head_index,
1861 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001862
1863 if (dq->rx.n_descriptors_done_this_call != n_tried)
1864 goto done;
1865 }
1866 if (hw_head_index >= sw_head_index)
1867 {
1868 u32 n_tried = hw_head_index - sw_head_index;
1869 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001870 sw_head_index =
1871 ixge_ring_add (dq, sw_head_index,
1872 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001873 }
1874
Damjan Marion00a9dca2016-08-17 17:05:46 +02001875done:
Damjan Marionb4d89272016-05-12 22:14:45 +02001876 dq->head_index = sw_head_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001877 dq->tail_index =
1878 ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
Damjan Marionb4d89272016-05-12 22:14:45 +02001879
1880 /* Give tail back to hardware. */
1881 CLIB_MEMORY_BARRIER ();
1882
1883 dr->tail_index = dq->tail_index;
1884
Damjan Marion00a9dca2016-08-17 17:05:46 +02001885 vlib_increment_combined_counter (vnet_main.
1886 interface_main.combined_sw_if_counters +
1887 VNET_INTERFACE_COUNTER_RX,
1888 0 /* cpu_index */ ,
1889 xd->vlib_sw_if_index, n_packets,
Damjan Marionb4d89272016-05-12 22:14:45 +02001890 dq->rx.n_bytes);
1891
1892 return n_packets;
1893}
1894
Damjan Marion00a9dca2016-08-17 17:05:46 +02001895static void
1896ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
Damjan Marionb4d89272016-05-12 22:14:45 +02001897{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001898 vlib_main_t *vm = xm->vlib_main;
1899 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02001900
1901 if (i != 20)
1902 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001903 ELOG_TYPE_DECLARE (e) =
1904 {
1905 .function = (char *) __FUNCTION__,.format =
1906 "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1907 16,.enum_strings =
1908 {
1909 "flow director",
1910 "rx miss",
1911 "pci exception",
1912 "mailbox",
1913 "link status change",
1914 "linksec key exchange",
1915 "manageability event",
1916 "reserved23",
1917 "sdp0",
1918 "sdp1",
1919 "sdp2",
1920 "sdp3",
1921 "ecc", "descriptor handler error", "tcp timer", "other",},};
1922 struct
1923 {
1924 u8 instance;
1925 u8 index;
1926 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001927 ed = ELOG_DATA (&vm->elog_main, e);
1928 ed->instance = xd->device_index;
1929 ed->index = i - 16;
1930 }
1931 else
1932 {
1933 u32 v = r->xge_mac.link_status;
1934 uword is_up = (v & (1 << 30)) != 0;
1935
Damjan Marion00a9dca2016-08-17 17:05:46 +02001936 ELOG_TYPE_DECLARE (e) =
1937 {
1938 .function = (char *) __FUNCTION__,.format =
1939 "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1940 struct
1941 {
1942 u32 instance, link_status;
1943 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001944 ed = ELOG_DATA (&vm->elog_main, e);
1945 ed->instance = xd->device_index;
1946 ed->link_status = v;
1947 xd->link_status_at_last_link_change = v;
1948
1949 vlib_process_signal_event (vm, ixge_process_node.index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001950 EVENT_SET_FLAGS,
1951 ((is_up << 31) | xd->vlib_hw_if_index));
Damjan Marionb4d89272016-05-12 22:14:45 +02001952 }
1953}
1954
1955always_inline u32
1956clean_block (u32 * b, u32 * t, u32 n_left)
1957{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001958 u32 *t0 = t;
Damjan Marionb4d89272016-05-12 22:14:45 +02001959
1960 while (n_left >= 4)
1961 {
1962 u32 bi0, bi1, bi2, bi3;
1963
1964 t[0] = bi0 = b[0];
1965 b[0] = 0;
1966 t += bi0 != 0;
1967
1968 t[0] = bi1 = b[1];
1969 b[1] = 0;
1970 t += bi1 != 0;
1971
1972 t[0] = bi2 = b[2];
1973 b[2] = 0;
1974 t += bi2 != 0;
1975
1976 t[0] = bi3 = b[3];
1977 b[3] = 0;
1978 t += bi3 != 0;
1979
1980 b += 4;
1981 n_left -= 4;
1982 }
1983
1984 while (n_left > 0)
1985 {
1986 u32 bi0;
1987
1988 t[0] = bi0 = b[0];
1989 b[0] = 0;
1990 t += bi0 != 0;
1991 b += 1;
1992 n_left -= 1;
1993 }
1994
1995 return t - t0;
1996}
1997
1998static void
1999ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
2000{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002001 vlib_main_t *vm = xm->vlib_main;
2002 ixge_dma_queue_t *dq =
2003 vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2004 u32 n_clean, *b, *t, *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002005 i32 n_hw_owned_descriptors;
2006 i32 first_to_clean, last_to_clean;
2007 u64 hwbp_race = 0;
2008
2009 /* Handle case where head write back pointer update
2010 * arrives after the interrupt during high PCI bus loads.
2011 */
2012 while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2013 dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2014 {
2015 hwbp_race++;
2016 if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2017 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002018 ELOG_TYPE_DECLARE (e) =
2019 {
2020 .function = (char *) __FUNCTION__,.format =
2021 "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2022 = "i4i4i4i4",};
2023 struct
2024 {
2025 u32 instance, head_index, tail_index, n_buffers_on_ring;
2026 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02002027 ed = ELOG_DATA (&vm->elog_main, e);
2028 ed->instance = xd->device_index;
2029 ed->head_index = dq->head_index;
2030 ed->tail_index = dq->tail_index;
2031 ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2032 }
2033 }
2034
2035 dq->head_index = dq->tx.head_index_write_back[0];
2036 n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002037 ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002038 n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2039
2040 if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2041 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002042 ELOG_TYPE_DECLARE (e) =
2043 {
2044 .function = (char *) __FUNCTION__,.format =
2045 "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2046 = "i4i4i4i4i4",};
2047 struct
2048 {
2049 u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2050 } *ed;
2051 ed = ELOG_DATA (&vm->elog_main, e);
2052 ed->instance = xd->device_index;
2053 ed->head_index = dq->head_index;
2054 ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2055 ed->n_clean = n_clean;
2056 ed->retries = hwbp_race;
Damjan Marionb4d89272016-05-12 22:14:45 +02002057 }
2058
2059 /*
2060 * This function used to wait until hardware owned zero descriptors.
2061 * At high PPS rates, that doesn't happen until the TX ring is
2062 * completely full of descriptors which need to be cleaned up.
2063 * That, in turn, causes TX ring-full drops and/or long RX service
2064 * interruptions.
2065 */
2066 if (n_clean == 0)
2067 return;
2068
2069 /* Clean the n_clean descriptors prior to the reported hardware head */
2070 last_to_clean = dq->head_index - 1;
2071 last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002072 last_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002073
2074 first_to_clean = (last_to_clean) - (n_clean - 1);
2075 first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002076 first_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002077
2078 vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2079 t0 = t = xm->tx_buffers_pending_free;
2080 b = dq->descriptor_buffer_indices + first_to_clean;
2081
2082 /* Wrap case: clean from first to end, then start to last */
2083 if (first_to_clean > last_to_clean)
2084 {
2085 t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2086 first_to_clean = 0;
2087 b = dq->descriptor_buffer_indices;
2088 }
2089
2090 /* Typical case: clean from first to last */
2091 if (first_to_clean <= last_to_clean)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002092 t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
Damjan Marionb4d89272016-05-12 22:14:45 +02002093
2094 if (t > t0)
2095 {
2096 u32 n = t - t0;
2097 vlib_buffer_free_no_next (vm, t0, n);
2098 ASSERT (dq->tx.n_buffers_on_ring >= n);
2099 dq->tx.n_buffers_on_ring -= n;
2100 _vec_len (xm->tx_buffers_pending_free) = 0;
2101 }
2102}
2103
2104/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002105always_inline uword
2106ixge_interrupt_is_rx_queue (uword i)
2107{
2108 return i < 8;
2109}
Damjan Marionb4d89272016-05-12 22:14:45 +02002110
Damjan Marion00a9dca2016-08-17 17:05:46 +02002111always_inline uword
2112ixge_interrupt_is_tx_queue (uword i)
2113{
2114 return i >= 8 && i < 16;
2115}
Damjan Marionb4d89272016-05-12 22:14:45 +02002116
Damjan Marion00a9dca2016-08-17 17:05:46 +02002117always_inline uword
2118ixge_tx_queue_to_interrupt (uword i)
2119{
2120 return 8 + i;
2121}
Damjan Marionb4d89272016-05-12 22:14:45 +02002122
Damjan Marion00a9dca2016-08-17 17:05:46 +02002123always_inline uword
2124ixge_rx_queue_to_interrupt (uword i)
2125{
2126 return 0 + i;
2127}
Damjan Marionb4d89272016-05-12 22:14:45 +02002128
Damjan Marion00a9dca2016-08-17 17:05:46 +02002129always_inline uword
2130ixge_interrupt_rx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002131{
2132 ASSERT (ixge_interrupt_is_rx_queue (i));
2133 return i - 0;
2134}
2135
Damjan Marion00a9dca2016-08-17 17:05:46 +02002136always_inline uword
2137ixge_interrupt_tx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002138{
2139 ASSERT (ixge_interrupt_is_tx_queue (i));
2140 return i - 8;
2141}
2142
2143static uword
2144ixge_device_input (ixge_main_t * xm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002145 ixge_device_t * xd, vlib_node_runtime_t * node)
Damjan Marionb4d89272016-05-12 22:14:45 +02002146{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002147 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002148 u32 i, s;
2149 uword n_rx_packets = 0;
2150
2151 s = r->interrupt.status_write_1_to_set;
2152 if (s)
2153 r->interrupt.status_write_1_to_clear = s;
2154
Damjan Marion00a9dca2016-08-17 17:05:46 +02002155 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002156 foreach_set_bit (i, s, ({
2157 if (ixge_interrupt_is_rx_queue (i))
2158 n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2159
2160 else if (ixge_interrupt_is_tx_queue (i))
2161 ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2162
2163 else
2164 ixge_interrupt (xm, xd, i);
2165 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002166 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002167
2168 return n_rx_packets;
2169}
2170
2171static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002172ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002173{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002174 ixge_main_t *xm = &ixge_main;
2175 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002176 uword n_rx_packets = 0;
2177
2178 if (node->state == VLIB_NODE_STATE_INTERRUPT)
2179 {
2180 uword i;
2181
2182 /* Loop over devices with interrupts. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002183 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002184 foreach_set_bit (i, node->runtime_data[0], ({
2185 xd = vec_elt_at_index (xm->devices, i);
2186 n_rx_packets += ixge_device_input (xm, xd, node);
2187
2188 /* Re-enable interrupts since we're going to stay in interrupt mode. */
2189 if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2190 xd->regs->interrupt.enable_write_1_to_set = ~0;
2191 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002192 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002193
2194 /* Clear mask of devices with pending interrupts. */
2195 node->runtime_data[0] = 0;
2196 }
2197 else
2198 {
2199 /* Poll all devices for input/interrupts. */
2200 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002201 {
2202 n_rx_packets += ixge_device_input (xm, xd, node);
Damjan Marionb4d89272016-05-12 22:14:45 +02002203
Damjan Marion00a9dca2016-08-17 17:05:46 +02002204 /* Re-enable interrupts when switching out of polling mode. */
2205 if (node->flags &
2206 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
2207 xd->regs->interrupt.enable_write_1_to_set = ~0;
2208 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002209 }
2210
2211 return n_rx_packets;
2212}
2213
Damjan Marion00a9dca2016-08-17 17:05:46 +02002214static char *ixge_error_strings[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002215#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002216 foreach_ixge_error
Damjan Marionb4d89272016-05-12 22:14:45 +02002217#undef _
2218};
2219
Damjan Marion00a9dca2016-08-17 17:05:46 +02002220/* *INDENT-OFF* */
Damjan Marion98897e22016-06-17 16:42:02 +02002221VLIB_REGISTER_NODE (ixge_input_node, static) = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002222 .function = ixge_input,
2223 .type = VLIB_NODE_TYPE_INPUT,
2224 .name = "ixge-input",
2225
2226 /* Will be enabled if/when hardware is detected. */
2227 .state = VLIB_NODE_STATE_DISABLED,
2228
2229 .format_buffer = format_ethernet_header_with_length,
2230 .format_trace = format_ixge_rx_dma_trace,
2231
2232 .n_errors = IXGE_N_ERROR,
2233 .error_strings = ixge_error_strings,
2234
2235 .n_next_nodes = IXGE_RX_N_NEXT,
2236 .next_nodes = {
2237 [IXGE_RX_NEXT_DROP] = "error-drop",
2238 [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2239 [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2240 [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2241 },
2242};
2243
2244VLIB_NODE_FUNCTION_MULTIARCH_CLONE (ixge_input)
2245CLIB_MULTIARCH_SELECT_FN (ixge_input)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002246/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002247
Damjan Marion00a9dca2016-08-17 17:05:46 +02002248static u8 *
2249format_ixge_device_name (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002250{
2251 u32 i = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002252 ixge_main_t *xm = &ixge_main;
2253 ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
Damjan Marionb4d89272016-05-12 22:14:45 +02002254 return format (s, "TenGigabitEthernet%U",
2255 format_vlib_pci_handle, &xd->pci_device.bus_address);
2256}
2257
2258#define IXGE_COUNTER_IS_64_BIT (1 << 0)
2259#define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2260
2261static u8 ixge_counter_flags[] = {
2262#define _(a,f) 0,
2263#define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2264 foreach_ixge_counter
2265#undef _
2266#undef _64
2267};
2268
Damjan Marion00a9dca2016-08-17 17:05:46 +02002269static void
2270ixge_update_counters (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +02002271{
2272 /* Byte offset for counter registers. */
2273 static u32 reg_offsets[] = {
2274#define _(a,f) (a) / sizeof (u32),
2275#define _64(a,f) _(a,f)
2276 foreach_ixge_counter
2277#undef _
2278#undef _64
2279 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002280 volatile u32 *r = (volatile u32 *) xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002281 int i;
2282
2283 for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2284 {
2285 u32 o = reg_offsets[i];
2286 xd->counters[i] += r[o];
2287 if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
2288 r[o] = 0;
2289 if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002290 xd->counters[i] += (u64) r[o + 1] << (u64) 32;
Damjan Marionb4d89272016-05-12 22:14:45 +02002291 }
2292}
2293
Damjan Marion00a9dca2016-08-17 17:05:46 +02002294static u8 *
2295format_ixge_device_id (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002296{
2297 u32 device_id = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002298 char *t = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002299 switch (device_id)
2300 {
2301#define _(f,n) case n: t = #f; break;
2302 foreach_ixge_pci_device_id;
2303#undef _
2304 default:
2305 t = 0;
2306 break;
2307 }
2308 if (t == 0)
2309 s = format (s, "unknown 0x%x", device_id);
2310 else
2311 s = format (s, "%s", t);
2312 return s;
2313}
2314
Damjan Marion00a9dca2016-08-17 17:05:46 +02002315static u8 *
2316format_ixge_link_status (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002317{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002318 ixge_device_t *xd = va_arg (*args, ixge_device_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +02002319 u32 v = xd->link_status_at_last_link_change;
2320
2321 s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2322
2323 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002324 char *modes[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002325 "1g", "10g parallel", "10g serial", "autoneg",
2326 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002327 char *speeds[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002328 "unknown", "100m", "1g", "10g",
2329 };
2330 s = format (s, ", mode %s, speed %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002331 modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002332 }
2333
2334 return s;
2335}
2336
Damjan Marion00a9dca2016-08-17 17:05:46 +02002337static u8 *
2338format_ixge_device (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002339{
2340 u32 dev_instance = va_arg (*args, u32);
2341 CLIB_UNUSED (int verbose) = va_arg (*args, int);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002342 ixge_main_t *xm = &ixge_main;
2343 ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2344 ixge_phy_t *phy = xd->phys + xd->phy_index;
Damjan Marionb4d89272016-05-12 22:14:45 +02002345 uword indent = format_get_indent (s);
2346
2347 ixge_update_counters (xd);
2348 xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status;
2349
2350 s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2351 format_ixge_device_id, xd->device_id,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002352 format_white_space, indent + 2, format_ixge_link_status, xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002353
2354 {
2355
2356 s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2357 format_vlib_pci_link_speed, &xd->pci_device);
2358 }
2359
2360 s = format (s, "\n%U", format_white_space, indent + 2);
2361 if (phy->mdio_address != ~0)
2362 s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2363 else if (xd->sfp_eeprom.id == SFP_ID_sfp)
2364 s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2365 else
2366 s = format (s, "PHY not found");
2367
2368 /* FIXME */
2369 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002370 ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
2371 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002372 u32 hw_head_index = dr->head_index;
2373 u32 sw_head_index = dq->head_index;
2374 u32 nitems;
2375
2376 nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2377 s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002378 format_white_space, indent + 2, nitems, dq->n_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002379
2380 s = format (s, "\n%U%d buffers in driver rx cache",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002381 format_white_space, indent + 2,
2382 vec_len (xm->rx_buffers_to_add));
Damjan Marionb4d89272016-05-12 22:14:45 +02002383
2384 s = format (s, "\n%U%d buffers on tx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002385 format_white_space, indent + 2,
2386 xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
Damjan Marionb4d89272016-05-12 22:14:45 +02002387 }
2388 {
2389 u32 i;
2390 u64 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002391 static char *names[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002392#define _(a,f) #f,
2393#define _64(a,f) _(a,f)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002394 foreach_ixge_counter
Damjan Marionb4d89272016-05-12 22:14:45 +02002395#undef _
2396#undef _64
2397 };
2398
2399 for (i = 0; i < ARRAY_LEN (names); i++)
2400 {
2401 v = xd->counters[i] - xd->counters_last_clear[i];
2402 if (v != 0)
2403 s = format (s, "\n%U%-40U%16Ld",
2404 format_white_space, indent + 2,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002405 format_c_identifier, names[i], v);
Damjan Marionb4d89272016-05-12 22:14:45 +02002406 }
2407 }
2408
2409 return s;
2410}
2411
Damjan Marion00a9dca2016-08-17 17:05:46 +02002412static void
2413ixge_clear_hw_interface_counters (u32 instance)
Damjan Marionb4d89272016-05-12 22:14:45 +02002414{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002415 ixge_main_t *xm = &ixge_main;
2416 ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002417 ixge_update_counters (xd);
2418 memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2419}
2420
2421/*
2422 * Dynamically redirect all pkts from a specific interface
2423 * to the specified node
2424 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002425static void
2426ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
2427 u32 node_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002428{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002429 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002430 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002431 ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002432
2433 /* Shut off redirection */
2434 if (node_index == ~0)
2435 {
2436 xd->per_interface_next_index = node_index;
2437 return;
2438 }
2439
2440 xd->per_interface_next_index =
2441 vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2442}
2443
2444
Damjan Marion00a9dca2016-08-17 17:05:46 +02002445/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002446VNET_DEVICE_CLASS (ixge_device_class) = {
2447 .name = "ixge",
2448 .tx_function = ixge_interface_tx,
2449 .format_device_name = format_ixge_device_name,
2450 .format_device = format_ixge_device,
2451 .format_tx_trace = format_ixge_tx_dma_trace,
2452 .clear_counters = ixge_clear_hw_interface_counters,
2453 .admin_up_down_function = ixge_interface_admin_up_down,
2454 .rx_redirect_to_node = ixge_set_interface_next_node,
Damjan Marion8b2247d2016-12-02 08:09:45 +01002455 .flatten_output_chains = 1,
Damjan Marionb4d89272016-05-12 22:14:45 +02002456};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002457/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002458
Damjan Marion00a9dca2016-08-17 17:05:46 +02002459#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
Damjan Marionb4d89272016-05-12 22:14:45 +02002460
2461static clib_error_t *
2462ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
2463{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002464 ixge_main_t *xm = &ixge_main;
2465 vlib_main_t *vm = xm->vlib_main;
2466 ixge_dma_queue_t *dq;
2467 clib_error_t *error = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002468
2469 vec_validate (xd->dma_queues[rt], queue_index);
2470 dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2471
Damjan Marion00a9dca2016-08-17 17:05:46 +02002472 if (!xm->n_descriptors_per_cache_line)
2473 xm->n_descriptors_per_cache_line =
2474 CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002475
Damjan Marion00a9dca2016-08-17 17:05:46 +02002476 if (!xm->n_bytes_in_rx_buffer)
Damjan Marionb4d89272016-05-12 22:14:45 +02002477 xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
2478 xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002479 if (!xm->vlib_buffer_free_list_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002480 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002481 xm->vlib_buffer_free_list_index =
2482 vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer,
2483 "ixge rx");
Damjan Marionb4d89272016-05-12 22:14:45 +02002484 ASSERT (xm->vlib_buffer_free_list_index != 0);
2485 }
2486
Damjan Marion00a9dca2016-08-17 17:05:46 +02002487 if (!xm->n_descriptors[rt])
Damjan Marionb4d89272016-05-12 22:14:45 +02002488 xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2489
2490 dq->queue_index = queue_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002491 dq->n_descriptors =
2492 round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
Damjan Marionb4d89272016-05-12 22:14:45 +02002493 dq->head_index = dq->tail_index = 0;
2494
2495 dq->descriptors = vlib_physmem_alloc_aligned (vm, &error,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002496 dq->n_descriptors *
2497 sizeof (dq->descriptors[0]),
2498 128 /* per chip spec */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002499 if (error)
2500 return error;
2501
Damjan Marion00a9dca2016-08-17 17:05:46 +02002502 memset (dq->descriptors, 0,
2503 dq->n_descriptors * sizeof (dq->descriptors[0]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002504 vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
2505
2506 if (rt == VLIB_RX)
2507 {
2508 u32 n_alloc, i;
2509
2510 n_alloc = vlib_buffer_alloc_from_free_list
Damjan Marion00a9dca2016-08-17 17:05:46 +02002511 (vm, dq->descriptor_buffer_indices,
2512 vec_len (dq->descriptor_buffer_indices),
Damjan Marionb4d89272016-05-12 22:14:45 +02002513 xm->vlib_buffer_free_list_index);
2514 ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2515 for (i = 0; i < n_alloc; i++)
2516 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002517 vlib_buffer_t *b =
2518 vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
2519 dq->descriptors[i].rx_to_hw.tail_address =
2520 vlib_physmem_virtual_to_physical (vm, b->data);
Damjan Marionb4d89272016-05-12 22:14:45 +02002521 }
2522 }
2523 else
2524 {
2525 u32 i;
2526
Damjan Marion00a9dca2016-08-17 17:05:46 +02002527 dq->tx.head_index_write_back =
2528 vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES);
Damjan Marionb4d89272016-05-12 22:14:45 +02002529
2530 for (i = 0; i < dq->n_descriptors; i++)
2531 dq->descriptors[i].tx = xm->tx_descriptor_template;
2532
2533 vec_validate (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2534 }
2535
2536 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002537 ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002538 u64 a;
2539
2540 a = vlib_physmem_virtual_to_physical (vm, dq->descriptors);
2541 dr->descriptor_address[0] = a & 0xFFFFFFFF;
2542 dr->descriptor_address[1] = a >> (u64) 32;
2543 dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2544 dq->head_index = dq->tail_index = 0;
2545
2546 if (rt == VLIB_RX)
2547 {
2548 ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2549 dr->rx_split_control =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002550 ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2551 | ( /* lo free descriptor threshold (units of 64 descriptors) */
2552 (1 << 22)) | ( /* descriptor type: advanced one buffer */
2553 (1 << 25)) | ( /* drop if no descriptors available */
2554 (1 << 28)));
Damjan Marionb4d89272016-05-12 22:14:45 +02002555
2556 /* Give hardware all but last 16 cache lines' worth of descriptors. */
2557 dq->tail_index = dq->n_descriptors -
Damjan Marion00a9dca2016-08-17 17:05:46 +02002558 16 * xm->n_descriptors_per_cache_line;
Damjan Marionb4d89272016-05-12 22:14:45 +02002559 }
2560 else
2561 {
2562 /* Make sure its initialized before hardware can get to it. */
2563 dq->tx.head_index_write_back[0] = dq->head_index;
2564
Damjan Marion00a9dca2016-08-17 17:05:46 +02002565 a =
2566 vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
Damjan Marionb4d89272016-05-12 22:14:45 +02002567 dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2568 dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2569 }
2570
2571 /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2572 and [12] undocumented set. */
2573 if (rt == VLIB_RX)
2574 dr->dca_control &= ~((1 << 13) | (1 << 12));
2575
2576 CLIB_MEMORY_BARRIER ();
2577
2578 if (rt == VLIB_TX)
2579 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002580 xd->regs->tx_dma_control |= (1 << 0);
2581 dr->control |= ((32 << 0) /* prefetch threshold */
2582 | (64 << 8) /* host threshold */
2583 | (0 << 16) /* writeback threshold */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002584 }
2585
2586 /* Enable this queue and wait for hardware to initialize
2587 before adding to tail. */
2588 if (rt == VLIB_TX)
2589 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002590 dr->control |= 1 << 25;
2591 while (!(dr->control & (1 << 25)))
2592 ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002593 }
2594
2595 /* Set head/tail indices and enable DMA. */
2596 dr->head_index = dq->head_index;
2597 dr->tail_index = dq->tail_index;
2598 }
2599
2600 return error;
2601}
2602
Damjan Marion00a9dca2016-08-17 17:05:46 +02002603static u32
2604ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
Damjan Marionb4d89272016-05-12 22:14:45 +02002605{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002606 ixge_device_t *xd;
2607 ixge_regs_t *r;
Damjan Marionb4d89272016-05-12 22:14:45 +02002608 u32 old;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002609 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002610
2611 xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2612 r = xd->regs;
2613
2614 old = r->filter_control;
2615
2616 if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002617 r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002618 else
2619 r->filter_control = old & ~(1 << 9);
2620
2621 return old;
2622}
2623
Damjan Marion00a9dca2016-08-17 17:05:46 +02002624static void
2625ixge_device_init (ixge_main_t * xm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002626{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002627 vnet_main_t *vnm = vnet_get_main ();
2628 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002629
2630 /* Reset chip(s). */
2631 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002632 {
2633 ixge_regs_t *r = xd->regs;
2634 const u32 reset_bit = (1 << 26) | (1 << 3);
2635
2636 r->control |= reset_bit;
2637
2638 /* No need to suspend. Timed to take ~1e-6 secs */
2639 while (r->control & reset_bit)
2640 ;
2641
2642 /* Software loaded. */
2643 r->extended_control |= (1 << 28);
2644
2645 ixge_phy_init (xd);
2646
2647 /* Register ethernet interface. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002648 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002649 u8 addr8[6];
2650 u32 i, addr32[2];
2651 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002652
Damjan Marion00a9dca2016-08-17 17:05:46 +02002653 addr32[0] = r->rx_ethernet_address0[0][0];
2654 addr32[1] = r->rx_ethernet_address0[0][1];
2655 for (i = 0; i < 6; i++)
2656 addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
Damjan Marionb4d89272016-05-12 22:14:45 +02002657
Damjan Marion00a9dca2016-08-17 17:05:46 +02002658 error = ethernet_register_interface
2659 (vnm, ixge_device_class.index, xd->device_index,
2660 /* ethernet address */ addr8,
2661 &xd->vlib_hw_if_index, ixge_flag_change);
2662 if (error)
2663 clib_error_report (error);
Damjan Marionb4d89272016-05-12 22:14:45 +02002664 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002665
2666 {
2667 vnet_sw_interface_t *sw =
2668 vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
2669 xd->vlib_sw_if_index = sw->sw_if_index;
2670 }
2671
2672 ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2673
2674 xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
2675
2676 ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2677
2678 /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2679 r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2680 ixge_rx_queue_to_interrupt (0)) << 0);
2681
2682 r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2683 ixge_tx_queue_to_interrupt (0)) << 8);
2684
2685 /* No use in getting too many interrupts.
2686 Limit them to one every 3/4 ring size at line rate
2687 min sized packets.
2688 No need for this since kernel/vlib main loop provides adequate interrupt
2689 limiting scheme. */
2690 if (0)
2691 {
2692 f64 line_rate_max_pps =
2693 10e9 / (8 * (64 + /* interframe padding */ 20));
2694 ixge_throttle_queue_interrupt (r, 0,
2695 .75 * xm->n_descriptors[VLIB_RX] /
2696 line_rate_max_pps);
2697 }
2698
2699 /* Accept all multicast and broadcast packets. Should really add them
2700 to the dst_ethernet_address register array. */
2701 r->filter_control |= (1 << 10) | (1 << 8);
2702
2703 /* Enable frames up to size in mac frame size register. */
2704 r->xge_mac.control |= 1 << 2;
2705 r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2706
2707 /* Enable all interrupts. */
2708 if (!IXGE_ALWAYS_POLL)
2709 r->interrupt.enable_write_1_to_set = ~0;
2710 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002711}
2712
2713static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002714ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002715{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002716 vnet_main_t *vnm = vnet_get_main ();
2717 ixge_main_t *xm = &ixge_main;
2718 ixge_device_t *xd;
2719 uword event_type, *event_data = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002720 f64 timeout, link_debounce_deadline;
2721
2722 ixge_device_init (xm);
2723
2724 /* Clear all counters. */
2725 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002726 {
2727 ixge_update_counters (xd);
2728 memset (xd->counters, 0, sizeof (xd->counters));
2729 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002730
2731 timeout = 30.0;
2732 link_debounce_deadline = 1e70;
2733
2734 while (1)
2735 {
2736 /* 36 bit stat counters could overflow in ~50 secs.
Damjan Marion00a9dca2016-08-17 17:05:46 +02002737 We poll every 30 secs to be conservative. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002738 vlib_process_wait_for_event_or_clock (vm, timeout);
2739
2740 event_type = vlib_process_get_events (vm, &event_data);
2741
Damjan Marion00a9dca2016-08-17 17:05:46 +02002742 switch (event_type)
2743 {
2744 case EVENT_SET_FLAGS:
2745 /* 1 ms */
2746 link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2747 timeout = 1e-3;
2748 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002749
Damjan Marion00a9dca2016-08-17 17:05:46 +02002750 case ~0:
2751 /* No events found: timer expired. */
2752 if (vlib_time_now (vm) > link_debounce_deadline)
2753 {
2754 vec_foreach (xd, xm->devices)
2755 {
2756 ixge_regs_t *r = xd->regs;
2757 u32 v = r->xge_mac.link_status;
2758 uword is_up = (v & (1 << 30)) != 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002759
Damjan Marion00a9dca2016-08-17 17:05:46 +02002760 vnet_hw_interface_set_flags
2761 (vnm, xd->vlib_hw_if_index,
2762 is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2763 }
2764 link_debounce_deadline = 1e70;
2765 timeout = 30.0;
2766 }
2767 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002768
Damjan Marion00a9dca2016-08-17 17:05:46 +02002769 default:
2770 ASSERT (0);
2771 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002772
2773 if (event_data)
2774 _vec_len (event_data) = 0;
2775
2776 /* Query stats every 30 secs. */
2777 {
2778 f64 now = vlib_time_now (vm);
2779 if (now - xm->time_last_stats_update > 30)
2780 {
2781 xm->time_last_stats_update = now;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002782 vec_foreach (xd, xm->devices) ixge_update_counters (xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002783 }
2784 }
2785 }
2786
2787 return 0;
2788}
2789
2790static vlib_node_registration_t ixge_process_node = {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002791 .function = ixge_process,
2792 .type = VLIB_NODE_TYPE_PROCESS,
2793 .name = "ixge-process",
Damjan Marionb4d89272016-05-12 22:14:45 +02002794};
2795
Damjan Marion00a9dca2016-08-17 17:05:46 +02002796clib_error_t *
2797ixge_init (vlib_main_t * vm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002798{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002799 ixge_main_t *xm = &ixge_main;
2800 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002801
2802 xm->vlib_main = vm;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002803 memset (&xm->tx_descriptor_template, 0,
2804 sizeof (xm->tx_descriptor_template));
2805 memset (&xm->tx_descriptor_template_mask, 0,
2806 sizeof (xm->tx_descriptor_template_mask));
Damjan Marionb4d89272016-05-12 22:14:45 +02002807 xm->tx_descriptor_template.status0 =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002808 (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
2809 IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
2810 IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
Damjan Marionb4d89272016-05-12 22:14:45 +02002811 xm->tx_descriptor_template_mask.status0 = 0xffff;
2812 xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2813
2814 xm->tx_descriptor_template_mask.status0 &=
2815 ~(IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
2816 | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS);
2817 xm->tx_descriptor_template_mask.status1 &=
2818 ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE);
2819
2820 error = vlib_call_init_function (vm, pci_bus_init);
2821
2822 return error;
2823}
2824
2825VLIB_INIT_FUNCTION (ixge_init);
2826
2827
2828static void
Damjan Marion00a9dca2016-08-17 17:05:46 +02002829ixge_pci_intr_handler (vlib_pci_device_t * dev)
Damjan Marionb4d89272016-05-12 22:14:45 +02002830{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002831 ixge_main_t *xm = &ixge_main;
2832 vlib_main_t *vm = xm->vlib_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002833
2834 vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
2835
2836 /* Let node know which device is interrupting. */
2837 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002838 vlib_node_runtime_t *rt =
2839 vlib_node_get_runtime (vm, ixge_input_node.index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002840 rt->runtime_data[0] |= 1 << dev->private_data;
2841 }
2842}
2843
2844static clib_error_t *
2845ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
2846{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002847 ixge_main_t *xm = &ixge_main;
2848 clib_error_t *error;
2849 void *r;
2850 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002851
2852 /* Device found: make sure we have dma memory. */
2853 if (unix_physmem_is_fake (vm))
2854 return clib_error_return (0, "no physical memory available");
2855
2856 error = vlib_pci_map_resource (dev, 0, &r);
2857 if (error)
2858 return error;
2859
2860 vec_add2 (xm->devices, xd, 1);
2861
2862 if (vec_len (xm->devices) == 1)
2863 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002864 ixge_input_node.function = ixge_input_multiarch_select ();
Damjan Marionb4d89272016-05-12 22:14:45 +02002865 }
2866
2867 xd->pci_device = dev[0];
2868 xd->device_id = xd->pci_device.config0.header.device_id;
2869 xd->regs = r;
2870 xd->device_index = xd - xm->devices;
2871 xd->pci_function = dev->bus_address.function;
2872 xd->per_interface_next_index = ~0;
2873
2874
2875 /* Chip found so enable node. */
2876 {
2877 vlib_node_set_state (vm, ixge_input_node.index,
2878 (IXGE_ALWAYS_POLL
2879 ? VLIB_NODE_STATE_POLLING
2880 : VLIB_NODE_STATE_INTERRUPT));
2881
2882 dev->private_data = xd->device_index;
2883 }
2884
2885 if (vec_len (xm->devices) == 1)
2886 {
2887 vlib_register_node (vm, &ixge_process_node);
2888 xm->process_node_index = ixge_process_node.index;
2889 }
2890
Damjan Marion00a9dca2016-08-17 17:05:46 +02002891 error = vlib_pci_bus_master_enable (dev);
Damjan Marionb4d89272016-05-12 22:14:45 +02002892
2893 if (error)
2894 return error;
2895
Damjan Marion00a9dca2016-08-17 17:05:46 +02002896 return vlib_pci_intr_enable (dev);
Damjan Marionb4d89272016-05-12 22:14:45 +02002897}
2898
Damjan Marion00a9dca2016-08-17 17:05:46 +02002899/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002900PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2901 .init_function = ixge_pci_init,
2902 .interrupt_handler = ixge_pci_intr_handler,
2903 .supported_devices = {
2904#define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2905 foreach_ixge_pci_device_id
2906#undef _
2907 { 0 },
2908 },
2909};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002910/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002911
Damjan Marion00a9dca2016-08-17 17:05:46 +02002912void
2913ixge_set_next_node (ixge_rx_next_t next, char *name)
Damjan Marionb4d89272016-05-12 22:14:45 +02002914{
2915 vlib_node_registration_t *r = &ixge_input_node;
2916
2917 switch (next)
2918 {
2919 case IXGE_RX_NEXT_IP4_INPUT:
2920 case IXGE_RX_NEXT_IP6_INPUT:
2921 case IXGE_RX_NEXT_ETHERNET_INPUT:
2922 r->next_nodes[next] = name;
2923 break;
2924
2925 default:
2926 clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2927 break;
2928 }
2929}
Damjan Marion154d4452016-06-28 19:10:41 +02002930#endif
Damjan Marion00a9dca2016-08-17 17:05:46 +02002931
2932/*
2933 * fd.io coding-style-patch-verification: ON
2934 *
2935 * Local Variables:
2936 * eval: (c-set-style "gnu")
2937 * End:
2938 */