blob: bb91a368488584a3e30896d12097d5a3b272b629 [file] [log] [blame]
Damjan Marionb4d89272016-05-12 22:14:45 +02001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * WARNING!
18 * This driver is not intended for production use and it is unsupported.
19 * It is provided for educational use only.
20 * Please use supported DPDK driver instead.
21 */
22
Gabriel Ganneb81831d2017-12-05 17:33:37 +010023#if __x86_64__ || __i386__ || __aarch64__
Damjan Marionb4d89272016-05-12 22:14:45 +020024#include <vppinfra/vector.h>
25
26#ifndef CLIB_HAVE_VEC128
27#warning HACK: ixge driver wont really work, missing u32x4
28typedef unsigned long long u32x4;
29#endif
30
31#include <vlib/vlib.h>
32#include <vlib/unix/unix.h>
33#include <vlib/pci/pci.h>
34#include <vnet/vnet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010035#include <ixge/ixge.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020036#include <vnet/ethernet/ethernet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010037#include <vnet/plugin/plugin.h>
38#include <vpp/app/version.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020039
40#define IXGE_ALWAYS_POLL 0
41
42#define EVENT_SET_FLAGS 0
43#define IXGE_HWBP_RACE_ELOG 0
44
45#define PCI_VENDOR_ID_INTEL 0x8086
46
47/* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48#define XGE_PHY_DEV_TYPE_PMA_PMD 1
49#define XGE_PHY_DEV_TYPE_PHY_XS 4
50#define XGE_PHY_ID1 0x2
51#define XGE_PHY_ID2 0x3
52#define XGE_PHY_CONTROL 0x0
53#define XGE_PHY_CONTROL_RESET (1 << 15)
54
55ixge_main_t ixge_main;
56static vlib_node_registration_t ixge_input_node;
57static vlib_node_registration_t ixge_process_node;
58
Damjan Marion00a9dca2016-08-17 17:05:46 +020059static void
60ixge_semaphore_get (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020061{
Damjan Marion00a9dca2016-08-17 17:05:46 +020062 ixge_main_t *xm = &ixge_main;
63 vlib_main_t *vm = xm->vlib_main;
64 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020065 u32 i;
66
67 i = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +020068 while (!(r->software_semaphore & (1 << 0)))
Damjan Marionb4d89272016-05-12 22:14:45 +020069 {
70 if (i > 0)
71 vlib_process_suspend (vm, 100e-6);
72 i++;
73 }
Damjan Marion00a9dca2016-08-17 17:05:46 +020074 do
75 {
76 r->software_semaphore |= 1 << 1;
77 }
78 while (!(r->software_semaphore & (1 << 1)));
Damjan Marionb4d89272016-05-12 22:14:45 +020079}
80
Damjan Marion00a9dca2016-08-17 17:05:46 +020081static void
82ixge_semaphore_release (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020083{
Damjan Marion00a9dca2016-08-17 17:05:46 +020084 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020085 r->software_semaphore &= ~3;
86}
87
Damjan Marion00a9dca2016-08-17 17:05:46 +020088static void
89ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +020090{
Damjan Marion00a9dca2016-08-17 17:05:46 +020091 ixge_main_t *xm = &ixge_main;
92 vlib_main_t *vm = xm->vlib_main;
93 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020094 u32 fw_mask = sw_mask << 5;
95 u32 m, done = 0;
96
Damjan Marion00a9dca2016-08-17 17:05:46 +020097 while (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +020098 {
99 ixge_semaphore_get (xd);
100 m = r->software_firmware_sync;
101 done = (m & fw_mask) == 0;
102 if (done)
103 r->software_firmware_sync = m | sw_mask;
104 ixge_semaphore_release (xd);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200105 if (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +0200106 vlib_process_suspend (vm, 10e-3);
107 }
108}
109
Damjan Marion00a9dca2016-08-17 17:05:46 +0200110static void
111ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +0200112{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200113 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200114 ixge_semaphore_get (xd);
115 r->software_firmware_sync &= ~sw_mask;
116 ixge_semaphore_release (xd);
117}
118
Damjan Marion00a9dca2016-08-17 17:05:46 +0200119u32
120ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121 u32 v, u32 is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200122{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200123 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200124 const u32 busy_bit = 1 << 30;
125 u32 x;
126
127 ASSERT (xd->phy_index < 2);
128 ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129
130 ASSERT (reg_index < (1 << 16));
131 ASSERT (dev_type < (1 << 5));
Damjan Marion00a9dca2016-08-17 17:05:46 +0200132 if (!is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200133 r->xge_mac.phy_data = v;
134
135 /* Address cycle. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200136 x =
137 reg_index | (dev_type << 16) | (xd->
138 phys[xd->phy_index].mdio_address << 21);
Damjan Marionb4d89272016-05-12 22:14:45 +0200139 r->xge_mac.phy_command = x | busy_bit;
140 /* Busy wait timed to take 28e-6 secs. No suspend. */
141 while (r->xge_mac.phy_command & busy_bit)
142 ;
143
144 r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145 while (r->xge_mac.phy_command & busy_bit)
146 ;
147
148 if (is_read)
149 v = r->xge_mac.phy_data >> 16;
150
151 ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152
153 return v;
154}
155
Damjan Marion00a9dca2016-08-17 17:05:46 +0200156static u32
157ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
Damjan Marionb4d89272016-05-12 22:14:45 +0200158{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200159 return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160 1);
161}
162
163static void
164ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165{
166 (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167 0);
168}
169
170static void
171ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172{
173 ixge_main_t *xm = &ixge_main;
174 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200175 u32 v;
176
177 v = 0;
178 v |= (sda != 0) << 3;
179 v |= (scl != 0) << 1;
180 xd->regs->i2c_control = v;
181}
182
Damjan Marion00a9dca2016-08-17 17:05:46 +0200183static void
184ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
Damjan Marionb4d89272016-05-12 22:14:45 +0200185{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200186 ixge_main_t *xm = &ixge_main;
187 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200188 u32 v;
189
190 v = xd->regs->i2c_control;
191 *sda = (v & (1 << 2)) != 0;
192 *scl = (v & (1 << 0)) != 0;
193}
194
Damjan Marion00a9dca2016-08-17 17:05:46 +0200195static u16
196ixge_read_eeprom (ixge_device_t * xd, u32 address)
Damjan Marionb4d89272016-05-12 22:14:45 +0200197{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200198 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200199 u32 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200200 r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200201 /* Wait for done bit. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200202 while (!((v = r->eeprom_read) & (1 << 1)))
Damjan Marionb4d89272016-05-12 22:14:45 +0200203 ;
204 return v >> 16;
205}
206
207static void
208ixge_sfp_enable_disable_laser (ixge_device_t * xd, uword enable)
209{
210 u32 tx_disable_bit = 1 << 3;
211 if (enable)
212 xd->regs->sdp_control &= ~tx_disable_bit;
213 else
214 xd->regs->sdp_control |= tx_disable_bit;
215}
216
217static void
218ixge_sfp_enable_disable_10g (ixge_device_t * xd, uword enable)
219{
220 u32 is_10g_bit = 1 << 5;
221 if (enable)
222 xd->regs->sdp_control |= is_10g_bit;
223 else
224 xd->regs->sdp_control &= ~is_10g_bit;
225}
226
227static clib_error_t *
228ixge_sfp_phy_init_from_eeprom (ixge_device_t * xd, u16 sfp_type)
229{
230 u16 a, id, reg_values_addr = 0;
231
232 a = ixge_read_eeprom (xd, 0x2b);
233 if (a == 0 || a == 0xffff)
234 return clib_error_create ("no init sequence in eeprom");
235
236 while (1)
237 {
238 id = ixge_read_eeprom (xd, ++a);
239 if (id == 0xffff)
240 break;
241 reg_values_addr = ixge_read_eeprom (xd, ++a);
242 if (id == sfp_type)
243 break;
244 }
245 if (id != sfp_type)
246 return clib_error_create ("failed to find id 0x%x", sfp_type);
247
248 ixge_software_firmware_sync (xd, 1 << 3);
249 while (1)
250 {
251 u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252 if (v == 0xffff)
253 break;
254 xd->regs->core_analog_config = v;
255 }
256 ixge_software_firmware_sync_release (xd, 1 << 3);
257
258 /* Make sure laser is off. We'll turn on the laser when
259 the interface is brought up. */
260 ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261 ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262
263 return 0;
264}
265
266static void
267ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
268{
269 u32 v;
270
271 if (is_up)
272 {
273 /* pma/pmd 10g serial SFI. */
274 xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275 xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276
277 v = xd->regs->xge_mac.auto_negotiation_control;
278 v &= ~(7 << 13);
279 v |= (0 << 13);
280 /* Restart autoneg. */
281 v |= (1 << 12);
282 xd->regs->xge_mac.auto_negotiation_control = v;
283
Damjan Marion00a9dca2016-08-17 17:05:46 +0200284 while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
Damjan Marionb4d89272016-05-12 22:14:45 +0200285 ;
286
287 v = xd->regs->xge_mac.auto_negotiation_control;
288
289 /* link mode 10g sfi serdes */
290 v &= ~(7 << 13);
291 v |= (3 << 13);
292
293 /* Restart autoneg. */
294 v |= (1 << 12);
295 xd->regs->xge_mac.auto_negotiation_control = v;
296
297 xd->regs->xge_mac.link_status;
298 }
299
300 ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301
302 /* Give time for link partner to notice that we're up. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200303 if (is_up && vlib_in_process_context (vlib_get_main ()))
304 {
305 vlib_process_suspend (vlib_get_main (), 300e-3);
306 }
Damjan Marionb4d89272016-05-12 22:14:45 +0200307}
308
309always_inline ixge_dma_regs_t *
310get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
311{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200312 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200313 ASSERT (qi < 128);
314 if (rt == VLIB_RX)
315 return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316 else
317 return &r->tx_dma[qi];
318}
319
320static clib_error_t *
321ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
322{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200323 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200324 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200325 ixge_main_t *xm = &ixge_main;
326 ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
327 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200328
329 if (is_up)
330 {
331 xd->regs->rx_enable |= 1;
332 xd->regs->tx_dma_control |= 1;
333 dr->control |= 1 << 25;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200334 while (!(dr->control & (1 << 25)))
335 ;
Damjan Marionb4d89272016-05-12 22:14:45 +0200336 }
337 else
338 {
339 xd->regs->rx_enable &= ~1;
340 xd->regs->tx_dma_control &= ~1;
341 }
342
343 ixge_sfp_device_up_down (xd, is_up);
344
345 return /* no error */ 0;
346}
347
Damjan Marion00a9dca2016-08-17 17:05:46 +0200348static void
349ixge_sfp_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200350{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200351 ixge_phy_t *phy = xd->phys + xd->phy_index;
352 i2c_bus_t *ib = &xd->i2c_bus;
Damjan Marionb4d89272016-05-12 22:14:45 +0200353
354 ib->private_data = xd->device_index;
355 ib->put_bits = ixge_i2c_put_bits;
356 ib->get_bits = ixge_i2c_get_bits;
357 vlib_i2c_init (ib);
358
Damjan Marion00a9dca2016-08-17 17:05:46 +0200359 vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
Damjan Marionb4d89272016-05-12 22:14:45 +0200360
Damjan Marion00a9dca2016-08-17 17:05:46 +0200361 if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
Damjan Marionc45e1902018-09-24 15:17:36 +0200362 xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
Damjan Marionb4d89272016-05-12 22:14:45 +0200363 else
364 {
365 /* FIXME 5 => SR/LR eeprom ID. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200366 clib_error_t *e =
367 ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
Damjan Marionb4d89272016-05-12 22:14:45 +0200368 if (e)
369 clib_error_report (e);
370 }
371
372 phy->mdio_address = ~0;
373}
374
Damjan Marion00a9dca2016-08-17 17:05:46 +0200375static void
376ixge_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200377{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200378 ixge_main_t *xm = &ixge_main;
379 vlib_main_t *vm = xm->vlib_main;
380 ixge_phy_t *phy = xd->phys + xd->phy_index;
Damjan Marionb4d89272016-05-12 22:14:45 +0200381
382 switch (xd->device_id)
383 {
384 case IXGE_82599_sfp:
385 case IXGE_82599_sfp_em:
386 case IXGE_82599_sfp_fcoe:
387 /* others? */
388 return ixge_sfp_phy_init (xd);
389
390 default:
391 break;
392 }
393
394 /* Probe address of phy. */
395 {
396 u32 i, v;
397
398 phy->mdio_address = ~0;
399 for (i = 0; i < 32; i++)
400 {
401 phy->mdio_address = i;
402 v = ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1);
403 if (v != 0xffff && v != 0)
404 break;
405 }
406
407 /* No PHY found? */
408 if (i >= 32)
409 return;
410 }
411
Damjan Marion00a9dca2016-08-17 17:05:46 +0200412 phy->id =
413 ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
414 ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200415
416 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200417 ELOG_TYPE_DECLARE (e) =
418 {
419 .function = (char *) __FUNCTION__,.format =
420 "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421 struct
422 {
423 u32 instance, id, address;
424 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +0200425 ed = ELOG_DATA (&vm->elog_main, e);
426 ed->instance = xd->device_index;
427 ed->id = phy->id;
428 ed->address = phy->mdio_address;
429 }
430
431 /* Reset phy. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200432 ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
433 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200434
435 /* Wait for self-clearning reset bit to clear. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200436 do
437 {
438 vlib_process_suspend (vm, 1e-3);
439 }
440 while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
441 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200442}
443
Damjan Marion00a9dca2016-08-17 17:05:46 +0200444static u8 *
445format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200446{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200447 ixge_rx_from_hw_descriptor_t *d =
448 va_arg (*va, ixge_rx_from_hw_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200449 u32 s0 = d->status[0], s2 = d->status[2];
450 u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200451 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200452
453 s = format (s, "%s-owned",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200454 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
455 "hw");
456 s =
457 format (s, ", length this descriptor %d, l3 offset %d",
458 d->n_packet_bytes_this_descriptor,
459 IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
Damjan Marionb4d89272016-05-12 22:14:45 +0200460 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
461 s = format (s, ", end-of-packet");
462
463 s = format (s, "\n%U", format_white_space, indent);
464
465 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR)
466 s = format (s, "layer2 error");
467
468 if (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2)
469 {
470 s = format (s, "layer 2 type %d", (s0 & 0x1f));
471 return s;
472 }
473
474 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN)
475 s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476 format_white_space, indent);
477
478 if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479 {
480 s = format (s, "ip4%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200481 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200483 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
484 s = format (s, " checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200485 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
486 "bad" : "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200487 }
488 if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489 s = format (s, "ip6%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200490 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200492 is_tcp = is_udp = 0;
493 if ((is_ip = (is_ip4 | is_ip6)))
494 {
495 is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496 is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497 if (is_tcp)
498 s = format (s, ", tcp");
499 if (is_udp)
500 s = format (s, ", udp");
501 }
502
503 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
504 s = format (s, ", tcp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200505 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
506 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200507 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
508 s = format (s, ", udp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200509 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
510 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200511
512 return s;
513}
514
Damjan Marion00a9dca2016-08-17 17:05:46 +0200515static u8 *
516format_ixge_tx_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200517{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200518 ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200519 u32 s0 = d->status0, s1 = d->status1;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200520 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200521 u32 v;
522
523 s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200524 d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200525
526 s = format (s, "\n%U", format_white_space, indent);
527
528 if ((v = (s0 >> 0) & 3))
529 s = format (s, "reserved 0x%x, ", v);
530
531 if ((v = (s0 >> 2) & 3))
532 s = format (s, "mac 0x%x, ", v);
533
534 if ((v = (s0 >> 4) & 0xf) != 3)
535 s = format (s, "type 0x%x, ", v);
536
537 s = format (s, "%s%s%s%s%s%s%s%s",
538 (s0 & (1 << 8)) ? "eop, " : "",
539 (s0 & (1 << 9)) ? "insert-fcs, " : "",
540 (s0 & (1 << 10)) ? "reserved26, " : "",
541 (s0 & (1 << 11)) ? "report-status, " : "",
542 (s0 & (1 << 12)) ? "reserved28, " : "",
543 (s0 & (1 << 13)) ? "is-advanced, " : "",
544 (s0 & (1 << 14)) ? "vlan-enable, " : "",
545 (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546
547 if ((v = s1 & 0xf) != 0)
548 s = format (s, "status 0x%x, ", v);
549
550 if ((v = (s1 >> 4) & 0xf))
551 s = format (s, "context 0x%x, ", v);
552
553 if ((v = (s1 >> 8) & 0x3f))
554 s = format (s, "options 0x%x, ", v);
555
556 return s;
557}
558
Damjan Marion00a9dca2016-08-17 17:05:46 +0200559typedef struct
560{
Damjan Marionb4d89272016-05-12 22:14:45 +0200561 ixge_descriptor_t before, after;
562
563 u32 buffer_index;
564
565 u16 device_index;
566
567 u8 queue_index;
568
569 u8 is_start_of_packet;
570
571 /* Copy of VLIB buffer; packet data stored in pre_data. */
572 vlib_buffer_t buffer;
573} ixge_rx_dma_trace_t;
574
Damjan Marion00a9dca2016-08-17 17:05:46 +0200575static u8 *
576format_ixge_rx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200577{
578 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200579 vlib_node_t *node = va_arg (*va, vlib_node_t *);
580 vnet_main_t *vnm = vnet_get_main ();
581 ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582 ixge_main_t *xm = &ixge_main;
583 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
584 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200585 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200586
587 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200588 vnet_sw_interface_t *sw =
589 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
590 s =
591 format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200593 }
594
595 s = format (s, "\n%Ubefore: %U",
596 format_white_space, indent,
597 format_ixge_rx_from_hw_descriptor, &t->before);
598 s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200600 t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
Damjan Marionb4d89272016-05-12 22:14:45 +0200601
602 s = format (s, "\n%Ubuffer 0x%x: %U",
603 format_white_space, indent,
Damjan Marionbd846cd2017-11-21 13:12:41 +0100604 t->buffer_index, format_vnet_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200605
Damjan Marion00a9dca2016-08-17 17:05:46 +0200606 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200607
608 f = node->format_buffer;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200609 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200610 f = format_hex_bytes;
611 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612
613 return s;
614}
615
616#define foreach_ixge_error \
617 _ (none, "no error") \
618 _ (tx_full_drops, "tx ring full drops") \
619 _ (ip4_checksum_error, "ip4 checksum errors") \
620 _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621 _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622
Damjan Marion00a9dca2016-08-17 17:05:46 +0200623typedef enum
624{
Damjan Marionb4d89272016-05-12 22:14:45 +0200625#define _(f,s) IXGE_ERROR_##f,
626 foreach_ixge_error
627#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200628 IXGE_N_ERROR,
Damjan Marionb4d89272016-05-12 22:14:45 +0200629} ixge_error_t;
630
631always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200632ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
633 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200634 u8 * next0, u8 * error0, u32 * flags0)
635{
636 u8 is0_ip4, is0_ip6, n0, e0;
637 u32 f0;
638
639 e0 = IXGE_ERROR_none;
640 n0 = IXGE_RX_NEXT_ETHERNET_INPUT;
641
642 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
643 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644
645 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200646 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200647
648 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650
651 n0 = (xd->per_interface_next_index != ~0) ?
652 xd->per_interface_next_index : n0;
653
654 /* Check for error. */
655 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656
657 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
658 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200659 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200660
661 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
662 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200663 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200664
665 *error0 = e0;
666 *next0 = n0;
667 *flags0 = f0;
668}
669
670always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200671ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
672 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200673 u32 s10, u32 s12,
674 u8 * next0, u8 * error0, u32 * flags0,
675 u8 * next1, u8 * error1, u32 * flags1)
676{
677 u8 is0_ip4, is0_ip6, n0, e0;
678 u8 is1_ip4, is1_ip6, n1, e1;
679 u32 f0, f1;
680
681 e0 = e1 = IXGE_ERROR_none;
682 n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683
684 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
685 is1_ip4 = s12 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
686
687 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688 n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689
690 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200691 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200692 e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200693 ? IXGE_ERROR_ip4_checksum_error : e1);
Damjan Marionb4d89272016-05-12 22:14:45 +0200694
695 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696 is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697
698 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699 n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700
701 n0 = (xd->per_interface_next_index != ~0) ?
702 xd->per_interface_next_index : n0;
703 n1 = (xd->per_interface_next_index != ~0) ?
704 xd->per_interface_next_index : n1;
705
706 /* Check for error. */
707 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708 n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709
710 *error0 = e0;
711 *error1 = e1;
712
713 *next0 = n0;
714 *next1 = n1;
715
716 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
717 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200718 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200719 f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
720 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200721 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200722
723 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
724 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200725 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200726 f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
727 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200728 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200729
730 *flags0 = f0;
731 *flags1 = f1;
732}
733
734static void
735ixge_rx_trace (ixge_main_t * xm,
736 ixge_device_t * xd,
737 ixge_dma_queue_t * dq,
738 ixge_descriptor_t * before_descriptors,
739 u32 * before_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200740 ixge_descriptor_t * after_descriptors, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200741{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200742 vlib_main_t *vm = xm->vlib_main;
743 vlib_node_runtime_t *node = dq->rx.node;
744 ixge_rx_from_hw_descriptor_t *bd;
745 ixge_rx_to_hw_descriptor_t *ad;
746 u32 *b, n_left, is_sop, next_index_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200747
748 n_left = n_descriptors;
749 b = before_buffers;
750 bd = &before_descriptors->rx_from_hw;
751 ad = &after_descriptors->rx_to_hw;
752 is_sop = dq->rx.is_start_of_packet;
753 next_index_sop = dq->rx.saved_start_of_packet_next_index;
754
755 while (n_left >= 2)
756 {
757 u32 bi0, bi1, flags0, flags1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200758 vlib_buffer_t *b0, *b1;
759 ixge_rx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200760 u8 next0, error0, next1, error1;
761
762 bi0 = b[0];
763 bi1 = b[1];
764 n_left -= 2;
765
766 b0 = vlib_get_buffer (vm, bi0);
767 b1 = vlib_get_buffer (vm, bi1);
768
769 ixge_rx_next_and_error_from_status_x2 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200770 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200771 bd[1].status[0], bd[1].status[2],
772 &next0, &error0, &flags0,
773 &next1, &error1, &flags1);
774
775 next_index_sop = is_sop ? next0 : next_index_sop;
776 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778 t0->is_start_of_packet = is_sop;
779 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780
781 next_index_sop = is_sop ? next1 : next_index_sop;
782 vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784 t1->is_start_of_packet = is_sop;
785 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786
787 t0->queue_index = dq->queue_index;
788 t1->queue_index = dq->queue_index;
789 t0->device_index = xd->device_index;
790 t1->device_index = xd->device_index;
791 t0->before.rx_from_hw = bd[0];
792 t1->before.rx_from_hw = bd[1];
793 t0->after.rx_to_hw = ad[0];
794 t1->after.rx_to_hw = ad[1];
795 t0->buffer_index = bi0;
796 t1->buffer_index = bi1;
797 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800 sizeof (t0->buffer.pre_data));
801 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802 sizeof (t1->buffer.pre_data));
803
804 b += 2;
805 bd += 2;
806 ad += 2;
807 }
808
809 while (n_left >= 1)
810 {
811 u32 bi0, flags0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200812 vlib_buffer_t *b0;
813 ixge_rx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200814 u8 next0, error0;
815
816 bi0 = b[0];
817 n_left -= 1;
818
819 b0 = vlib_get_buffer (vm, bi0);
820
821 ixge_rx_next_and_error_from_status_x1 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200822 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200823 &next0, &error0, &flags0);
824
825 next_index_sop = is_sop ? next0 : next_index_sop;
826 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828 t0->is_start_of_packet = is_sop;
829 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830
831 t0->queue_index = dq->queue_index;
832 t0->device_index = xd->device_index;
833 t0->before.rx_from_hw = bd[0];
834 t0->after.rx_to_hw = ad[0];
835 t0->buffer_index = bi0;
836 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838 sizeof (t0->buffer.pre_data));
839
840 b += 1;
841 bd += 1;
842 ad += 1;
843 }
844}
845
Damjan Marion00a9dca2016-08-17 17:05:46 +0200846typedef struct
847{
Damjan Marionb4d89272016-05-12 22:14:45 +0200848 ixge_tx_descriptor_t descriptor;
849
850 u32 buffer_index;
851
852 u16 device_index;
853
854 u8 queue_index;
855
856 u8 is_start_of_packet;
857
858 /* Copy of VLIB buffer; packet data stored in pre_data. */
859 vlib_buffer_t buffer;
860} ixge_tx_dma_trace_t;
861
Damjan Marion00a9dca2016-08-17 17:05:46 +0200862static u8 *
863format_ixge_tx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200864{
865 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200867 ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868 vnet_main_t *vnm = vnet_get_main ();
869 ixge_main_t *xm = &ixge_main;
870 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
871 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200872 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200873
874 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200875 vnet_sw_interface_t *sw =
876 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
877 s =
878 format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200880 }
881
882 s = format (s, "\n%Udescriptor: %U",
883 format_white_space, indent,
884 format_ixge_tx_descriptor, &t->descriptor);
885
886 s = format (s, "\n%Ubuffer 0x%x: %U",
887 format_white_space, indent,
Damjan Marionbd846cd2017-11-21 13:12:41 +0100888 t->buffer_index, format_vnet_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200889
Damjan Marion00a9dca2016-08-17 17:05:46 +0200890 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200891
892 f = format_ethernet_header_with_length;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200893 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200894 f = format_hex_bytes;
895 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896
897 return s;
898}
899
Damjan Marion00a9dca2016-08-17 17:05:46 +0200900typedef struct
901{
902 vlib_node_runtime_t *node;
Damjan Marionb4d89272016-05-12 22:14:45 +0200903
904 u32 is_start_of_packet;
905
906 u32 n_bytes_in_packet;
907
Damjan Marion00a9dca2016-08-17 17:05:46 +0200908 ixge_tx_descriptor_t *start_of_packet_descriptor;
Damjan Marionb4d89272016-05-12 22:14:45 +0200909} ixge_tx_state_t;
910
911static void
912ixge_tx_trace (ixge_main_t * xm,
913 ixge_device_t * xd,
914 ixge_dma_queue_t * dq,
915 ixge_tx_state_t * tx_state,
916 ixge_tx_descriptor_t * descriptors,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200917 u32 * buffers, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200918{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200919 vlib_main_t *vm = xm->vlib_main;
920 vlib_node_runtime_t *node = tx_state->node;
921 ixge_tx_descriptor_t *d;
922 u32 *b, n_left, is_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200923
924 n_left = n_descriptors;
925 b = buffers;
926 d = descriptors;
927 is_sop = tx_state->is_start_of_packet;
928
929 while (n_left >= 2)
930 {
931 u32 bi0, bi1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200932 vlib_buffer_t *b0, *b1;
933 ixge_tx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200934
935 bi0 = b[0];
936 bi1 = b[1];
937 n_left -= 2;
938
939 b0 = vlib_get_buffer (vm, bi0);
940 b1 = vlib_get_buffer (vm, bi1);
941
942 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943 t0->is_start_of_packet = is_sop;
944 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945
946 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947 t1->is_start_of_packet = is_sop;
948 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949
950 t0->queue_index = dq->queue_index;
951 t1->queue_index = dq->queue_index;
952 t0->device_index = xd->device_index;
953 t1->device_index = xd->device_index;
954 t0->descriptor = d[0];
955 t1->descriptor = d[1];
956 t0->buffer_index = bi0;
957 t1->buffer_index = bi1;
958 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961 sizeof (t0->buffer.pre_data));
962 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963 sizeof (t1->buffer.pre_data));
964
965 b += 2;
966 d += 2;
967 }
968
969 while (n_left >= 1)
970 {
971 u32 bi0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200972 vlib_buffer_t *b0;
973 ixge_tx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200974
975 bi0 = b[0];
976 n_left -= 1;
977
978 b0 = vlib_get_buffer (vm, bi0);
979
980 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981 t0->is_start_of_packet = is_sop;
982 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983
984 t0->queue_index = dq->queue_index;
985 t0->device_index = xd->device_index;
986 t0->descriptor = d[0];
987 t0->buffer_index = bi0;
988 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990 sizeof (t0->buffer.pre_data));
991
992 b += 1;
993 d += 1;
994 }
995}
996
997always_inline uword
998ixge_ring_sub (ixge_dma_queue_t * q, u32 i0, u32 i1)
999{
1000 i32 d = i1 - i0;
1001 ASSERT (i0 < q->n_descriptors);
1002 ASSERT (i1 < q->n_descriptors);
1003 return d < 0 ? q->n_descriptors + d : d;
1004}
1005
1006always_inline uword
1007ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
1008{
1009 u32 d = i0 + i1;
1010 ASSERT (i0 < q->n_descriptors);
1011 ASSERT (i1 < q->n_descriptors);
1012 d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013 return d;
1014}
1015
1016always_inline uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02001017ixge_tx_descriptor_matches_template (ixge_main_t * xm,
1018 ixge_tx_descriptor_t * d)
Damjan Marionb4d89272016-05-12 22:14:45 +02001019{
1020 u32 cmp;
1021
1022 cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1023 ^ xm->tx_descriptor_template.status0);
1024 if (cmp)
1025 return 0;
1026 cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1027 ^ xm->tx_descriptor_template.status1);
1028 if (cmp)
1029 return 0;
1030
1031 return 1;
1032}
1033
1034static uword
1035ixge_tx_no_wrap (ixge_main_t * xm,
1036 ixge_device_t * xd,
1037 ixge_dma_queue_t * dq,
1038 u32 * buffers,
1039 u32 start_descriptor_index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001040 u32 n_descriptors, ixge_tx_state_t * tx_state)
Damjan Marionb4d89272016-05-12 22:14:45 +02001041{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001042 vlib_main_t *vm = xm->vlib_main;
1043 ixge_tx_descriptor_t *d, *d_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001044 u32 n_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001045 u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046 u32 *to_tx =
1047 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001048 u32 is_sop = tx_state->is_start_of_packet;
1049 u32 len_sop = tx_state->n_bytes_in_packet;
1050 u16 template_status = xm->tx_descriptor_template.status0;
1051 u32 descriptor_prefetch_rotor = 0;
1052
1053 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054 d = &dq->descriptors[start_descriptor_index].tx;
1055 d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056
1057 while (n_left >= 4)
1058 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001059 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001060 u32 bi0, fi0, len0;
1061 u32 bi1, fi1, len1;
1062 u8 is_eop0, is_eop1;
1063
1064 /* Prefetch next iteration. */
1065 vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066 vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067
1068 if ((descriptor_prefetch_rotor & 0x3) == 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001069 CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001070
1071 descriptor_prefetch_rotor += 2;
1072
1073 bi0 = buffers[0];
1074 bi1 = buffers[1];
1075
1076 to_free[0] = fi0 = to_tx[0];
1077 to_tx[0] = bi0;
1078 to_free += fi0 != 0;
1079
1080 to_free[0] = fi1 = to_tx[1];
1081 to_tx[1] = bi1;
1082 to_free += fi1 != 0;
1083
1084 buffers += 2;
1085 n_left -= 2;
1086 to_tx += 2;
1087
1088 b0 = vlib_get_buffer (vm, bi0);
1089 b1 = vlib_get_buffer (vm, bi1);
1090
1091 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092 is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093
1094 len0 = b0->current_length;
1095 len1 = b1->current_length;
1096
1097 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1098 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
1099
Damjan Marion8f499362018-10-22 13:07:02 +02001100 d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1101 d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001102
1103 d[0].n_bytes_this_buffer = len0;
1104 d[1].n_bytes_this_buffer = len1;
1105
Damjan Marion00a9dca2016-08-17 17:05:46 +02001106 d[0].status0 =
1107 template_status | (is_eop0 <<
1108 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
1109 d[1].status0 =
1110 template_status | (is_eop1 <<
1111 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001112
1113 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001114 d_sop[0].status1 =
1115 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001116 d += 1;
1117 d_sop = is_eop0 ? d : d_sop;
1118
1119 is_sop = is_eop0;
1120
1121 len_sop = (is_sop ? 0 : len_sop) + len1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001122 d_sop[0].status1 =
1123 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001124 d += 1;
1125 d_sop = is_eop1 ? d : d_sop;
1126
1127 is_sop = is_eop1;
1128 }
1129
1130 while (n_left > 0)
1131 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001132 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001133 u32 bi0, fi0, len0;
1134 u8 is_eop0;
1135
1136 bi0 = buffers[0];
1137
1138 to_free[0] = fi0 = to_tx[0];
1139 to_tx[0] = bi0;
1140 to_free += fi0 != 0;
1141
1142 buffers += 1;
1143 n_left -= 1;
1144 to_tx += 1;
1145
1146 b0 = vlib_get_buffer (vm, bi0);
1147
1148 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149
1150 len0 = b0->current_length;
1151
1152 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1153
Damjan Marion8f499362018-10-22 13:07:02 +02001154 d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001155 d[0].n_bytes_this_buffer = len0;
1156
Damjan Marion00a9dca2016-08-17 17:05:46 +02001157 d[0].status0 =
1158 template_status | (is_eop0 <<
1159 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001160
1161 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001162 d_sop[0].status1 =
1163 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001164 d += 1;
1165 d_sop = is_eop0 ? d : d_sop;
1166
1167 is_sop = is_eop0;
1168 }
1169
1170 if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1171 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001172 to_tx =
1173 vec_elt_at_index (dq->descriptor_buffer_indices,
1174 start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001175 ixge_tx_trace (xm, xd, dq, tx_state,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001176 &dq->descriptors[start_descriptor_index].tx, to_tx,
Damjan Marionb4d89272016-05-12 22:14:45 +02001177 n_descriptors);
1178 }
1179
Damjan Marion00a9dca2016-08-17 17:05:46 +02001180 _vec_len (xm->tx_buffers_pending_free) =
1181 to_free - xm->tx_buffers_pending_free;
Damjan Marionb4d89272016-05-12 22:14:45 +02001182
1183 /* When we are done d_sop can point to end of ring. Wrap it if so. */
1184 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001185 ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001186
1187 ASSERT (d_sop - d_start <= dq->n_descriptors);
1188 d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1189 }
1190
1191 tx_state->is_start_of_packet = is_sop;
1192 tx_state->start_of_packet_descriptor = d_sop;
1193 tx_state->n_bytes_in_packet = len_sop;
1194
1195 return n_descriptors;
1196}
1197
1198static uword
1199ixge_interface_tx (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001200 vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02001201{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001202 ixge_main_t *xm = &ixge_main;
1203 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1204 ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
1205 ixge_dma_queue_t *dq;
1206 u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001207 u32 queue_index = 0; /* fixme parameter */
1208 ixge_tx_state_t tx_state;
1209
1210 tx_state.node = node;
1211 tx_state.is_start_of_packet = 1;
1212 tx_state.start_of_packet_descriptor = 0;
1213 tx_state.n_bytes_in_packet = 0;
1214
1215 from = vlib_frame_vector_args (f);
1216
1217 dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1218
1219 dq->head_index = dq->tx.head_index_write_back[0];
1220
1221 /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1222 n_left_tx = dq->n_descriptors - 1;
1223 n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1224
1225 _vec_len (xm->tx_buffers_pending_free) = 0;
1226
1227 n_descriptors_to_tx = f->n_vectors;
1228 n_tail_drop = 0;
1229 if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1230 {
1231 i32 i, n_ok, i_eop, i_sop;
1232
1233 i_sop = i_eop = ~0;
1234 for (i = n_left_tx - 1; i >= 0; i--)
1235 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001236 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1237 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marionb4d89272016-05-12 22:14:45 +02001238 {
1239 if (i_sop != ~0 && i_eop != ~0)
1240 break;
1241 i_eop = i;
1242 i_sop = i + 1;
1243 }
1244 }
1245 if (i == 0)
1246 n_ok = 0;
1247 else
1248 n_ok = i_eop + 1;
1249
1250 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001251 ELOG_TYPE_DECLARE (e) =
1252 {
1253 .function = (char *) __FUNCTION__,.format =
1254 "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1255 "i2i2i2i2",};
1256 struct
1257 {
1258 u16 instance, to_tx, head, tail;
1259 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001260 ed = ELOG_DATA (&vm->elog_main, e);
1261 ed->instance = xd->device_index;
1262 ed->to_tx = n_descriptors_to_tx;
1263 ed->head = dq->head_index;
1264 ed->tail = dq->tail_index;
1265 }
1266
1267 if (n_ok < n_descriptors_to_tx)
1268 {
1269 n_tail_drop = n_descriptors_to_tx - n_ok;
1270 vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001271 vlib_error_count (vm, ixge_input_node.index,
1272 IXGE_ERROR_tx_full_drops, n_tail_drop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001273 }
1274
1275 n_descriptors_to_tx = n_ok;
1276 }
1277
1278 dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1279
1280 /* Process from tail to end of descriptor ring. */
1281 if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1282 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001283 u32 n =
1284 clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
Damjan Marionb4d89272016-05-12 22:14:45 +02001285 n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1286 from += n;
1287 n_descriptors_to_tx -= n;
1288 dq->tail_index += n;
1289 ASSERT (dq->tail_index <= dq->n_descriptors);
1290 if (dq->tail_index == dq->n_descriptors)
1291 dq->tail_index = 0;
1292 }
1293
1294 if (n_descriptors_to_tx > 0)
1295 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001296 u32 n =
1297 ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
Damjan Marionb4d89272016-05-12 22:14:45 +02001298 from += n;
1299 ASSERT (n == n_descriptors_to_tx);
1300 dq->tail_index += n;
1301 ASSERT (dq->tail_index <= dq->n_descriptors);
1302 if (dq->tail_index == dq->n_descriptors)
1303 dq->tail_index = 0;
1304 }
1305
1306 /* We should only get full packets. */
1307 ASSERT (tx_state.is_start_of_packet);
1308
1309 /* Report status when last descriptor is done. */
1310 {
1311 u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001312 ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001313 d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
1314 }
1315
1316 /* Give new descriptors to hardware. */
1317 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001318 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001319
1320 CLIB_MEMORY_BARRIER ();
1321
1322 dr->tail_index = dq->tail_index;
1323 }
1324
1325 /* Free any buffers that are done. */
1326 {
1327 u32 n = _vec_len (xm->tx_buffers_pending_free);
1328 if (n > 0)
1329 {
1330 vlib_buffer_free_no_next (vm, xm->tx_buffers_pending_free, n);
1331 _vec_len (xm->tx_buffers_pending_free) = 0;
1332 ASSERT (dq->tx.n_buffers_on_ring >= n);
1333 dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1334 }
1335 }
1336
1337 return f->n_vectors;
1338}
1339
1340static uword
1341ixge_rx_queue_no_wrap (ixge_main_t * xm,
1342 ixge_device_t * xd,
1343 ixge_dma_queue_t * dq,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001344 u32 start_descriptor_index, u32 n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +02001345{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001346 vlib_main_t *vm = xm->vlib_main;
1347 vlib_node_runtime_t *node = dq->rx.node;
1348 ixge_descriptor_t *d;
1349 static ixge_descriptor_t *d_trace_save;
1350 static u32 *d_trace_buffers;
Damjan Marionb4d89272016-05-12 22:14:45 +02001351 u32 n_descriptors_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001352 u32 *to_rx =
1353 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1354 u32 *to_add;
Damjan Marionb4d89272016-05-12 22:14:45 +02001355 u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1356 u32 bi_last = dq->rx.saved_last_buffer_index;
1357 u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1358 u32 is_sop = dq->rx.is_start_of_packet;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001359 u32 next_index, n_left_to_next, *to_next;
Damjan Marionb4d89272016-05-12 22:14:45 +02001360 u32 n_packets = 0;
1361 u32 n_bytes = 0;
1362 u32 n_trace = vlib_get_trace_count (vm, node);
Dave Barach11fb09e2020-08-06 12:10:09 -04001363 vlib_buffer_t *b_last, b_placeholder;
Damjan Marionb4d89272016-05-12 22:14:45 +02001364
1365 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1366 d = &dq->descriptors[start_descriptor_index];
1367
Dave Barach11fb09e2020-08-06 12:10:09 -04001368 b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_placeholder;
Damjan Marionb4d89272016-05-12 22:14:45 +02001369 next_index = dq->rx.next_index;
1370
1371 if (n_trace > 0)
1372 {
1373 u32 n = clib_min (n_trace, n_descriptors);
1374 if (d_trace_save)
1375 {
1376 _vec_len (d_trace_save) = 0;
1377 _vec_len (d_trace_buffers) = 0;
1378 }
1379 vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1380 vec_add (d_trace_buffers, to_rx, n);
1381 }
1382
1383 {
1384 uword l = vec_len (xm->rx_buffers_to_add);
1385
1386 if (l < n_descriptors_left)
1387 {
1388 u32 n_to_alloc = 2 * dq->n_descriptors - l;
1389 u32 n_allocated;
1390
1391 vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1392
1393 _vec_len (xm->rx_buffers_to_add) = l;
Damjan Marioncef87f12017-10-05 15:32:41 +02001394 n_allocated =
1395 vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
Damjan Marionb4d89272016-05-12 22:14:45 +02001396 _vec_len (xm->rx_buffers_to_add) += n_allocated;
1397
Damjan Marion00a9dca2016-08-17 17:05:46 +02001398 /* Handle transient allocation failure */
1399 if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
Damjan Marionb4d89272016-05-12 22:14:45 +02001400 {
1401 if (n_allocated == 0)
1402 vlib_error_count (vm, ixge_input_node.index,
1403 IXGE_ERROR_rx_alloc_no_physmem, 1);
1404 else
1405 vlib_error_count (vm, ixge_input_node.index,
1406 IXGE_ERROR_rx_alloc_fail, 1);
1407
1408 n_descriptors_left = l + n_allocated;
1409 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001410 n_descriptors = n_descriptors_left;
Damjan Marionb4d89272016-05-12 22:14:45 +02001411 }
1412
1413 /* Add buffers from end of vector going backwards. */
1414 to_add = vec_end (xm->rx_buffers_to_add) - 1;
1415 }
1416
1417 while (n_descriptors_left > 0)
1418 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001419 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Damjan Marionb4d89272016-05-12 22:14:45 +02001420
1421 while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1422 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001423 vlib_buffer_t *b0, *b1;
Damjan Marion8f499362018-10-22 13:07:02 +02001424 vlib_buffer_t *f0, *f1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001425 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1426 u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1427 u8 is_eop0, error0, next0;
1428 u8 is_eop1, error1, next1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001429 ixge_descriptor_t d0, d1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001430
1431 vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1432 vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1433
Damjan Marion00a9dca2016-08-17 17:05:46 +02001434 CLIB_PREFETCH (d + 2, 32, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001435
Damjan Marion00a9dca2016-08-17 17:05:46 +02001436 d0.as_u32x4 = d[0].as_u32x4;
1437 d1.as_u32x4 = d[1].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001438
1439 s20 = d0.rx_from_hw.status[2];
1440 s21 = d1.rx_from_hw.status[2];
1441
1442 s00 = d0.rx_from_hw.status[0];
1443 s01 = d1.rx_from_hw.status[0];
1444
Damjan Marion00a9dca2016-08-17 17:05:46 +02001445 if (!
1446 ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001447 goto found_hw_owned_descriptor_x2;
1448
1449 bi0 = to_rx[0];
1450 bi1 = to_rx[1];
1451
1452 ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1453 fi0 = to_add[0];
1454 fi1 = to_add[-1];
1455
1456 to_rx[0] = fi0;
1457 to_rx[1] = fi1;
1458 to_rx += 2;
1459 to_add -= 2;
1460
Damjan Marioncef87f12017-10-05 15:32:41 +02001461#if 0
Steven899a84b2018-01-29 20:09:09 -08001462 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0));
1463 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi1));
1464 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0));
1465 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi1));
Damjan Marioncef87f12017-10-05 15:32:41 +02001466#endif
Damjan Marionb4d89272016-05-12 22:14:45 +02001467
1468 b0 = vlib_get_buffer (vm, bi0);
1469 b1 = vlib_get_buffer (vm, bi1);
1470
Damjan Marionb4d89272016-05-12 22:14:45 +02001471 CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
1472 CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
1473
1474 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1475 is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1476
1477 ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1478 &next0, &error0, &flags0,
1479 &next1, &error1, &flags1);
1480
1481 next0 = is_sop ? next0 : next_index_sop;
1482 next1 = is_eop0 ? next1 : next0;
1483 next_index_sop = next1;
1484
1485 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1486 b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1487
1488 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1489 vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001490 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1491 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001492
1493 b0->error = node->errors[error0];
1494 b1->error = node->errors[error1];
1495
1496 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1497 len1 = d1.rx_from_hw.n_packet_bytes_this_descriptor;
1498 n_bytes += len0 + len1;
1499 n_packets += is_eop0 + is_eop1;
1500
1501 /* Give new buffers to hardware. */
Damjan Marion8f499362018-10-22 13:07:02 +02001502 f0 = vlib_get_buffer (vm, fi0);
1503 f1 = vlib_get_buffer (vm, fi1);
1504 d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0);
1505 d1.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001506 d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
1507 d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001508 d[0].as_u32x4 = d0.as_u32x4;
1509 d[1].as_u32x4 = d1.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001510
1511 d += 2;
1512 n_descriptors_left -= 2;
1513
1514 /* Point to either l2 or l3 header depending on next. */
1515 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001516 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001517 l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001518 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001519
1520 b0->current_length = len0 - l3_offset0;
1521 b1->current_length = len1 - l3_offset1;
1522 b0->current_data = l3_offset0;
1523 b1->current_data = l3_offset1;
1524
1525 b_last->next_buffer = is_sop ? ~0 : bi0;
1526 b0->next_buffer = is_eop0 ? ~0 : bi1;
1527 bi_last = bi1;
1528 b_last = b1;
1529
1530 if (CLIB_DEBUG > 0)
1531 {
1532 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1533 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1534
1535 if (is_eop0)
1536 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001537 u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1538 /* follow_buffer_next */ 1);
1539 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001540 }
1541 if (is_eop1)
1542 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001543 u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1544 /* follow_buffer_next */ 1);
1545 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001546 }
1547 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001548 if (0) /* "Dave" version */
1549 {
1550 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1551 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001552
Damjan Marion00a9dca2016-08-17 17:05:46 +02001553 if (is_eop0)
1554 {
1555 to_next[0] = bi_sop0;
1556 to_next++;
1557 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001558
Damjan Marion00a9dca2016-08-17 17:05:46 +02001559 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1560 to_next, n_left_to_next,
1561 bi_sop0, next0);
1562 }
1563 if (is_eop1)
1564 {
1565 to_next[0] = bi_sop1;
1566 to_next++;
1567 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001568
Damjan Marion00a9dca2016-08-17 17:05:46 +02001569 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1570 to_next, n_left_to_next,
1571 bi_sop1, next1);
1572 }
1573 is_sop = is_eop1;
1574 bi_sop = bi_sop1;
1575 }
1576 if (1) /* "Eliot" version */
1577 {
1578 /* Speculatively enqueue to cached next. */
1579 u8 saved_is_sop = is_sop;
1580 u32 bi_sop_save = bi_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001581
Damjan Marion00a9dca2016-08-17 17:05:46 +02001582 bi_sop = saved_is_sop ? bi0 : bi_sop;
1583 to_next[0] = bi_sop;
1584 to_next += is_eop0;
1585 n_left_to_next -= is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001586
Damjan Marion00a9dca2016-08-17 17:05:46 +02001587 bi_sop = is_eop0 ? bi1 : bi_sop;
1588 to_next[0] = bi_sop;
1589 to_next += is_eop1;
1590 n_left_to_next -= is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001591
Damjan Marion00a9dca2016-08-17 17:05:46 +02001592 is_sop = is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001593
Damjan Marion00a9dca2016-08-17 17:05:46 +02001594 if (PREDICT_FALSE
1595 (!(next0 == next_index && next1 == next_index)))
1596 {
1597 /* Undo speculation. */
1598 to_next -= is_eop0 + is_eop1;
1599 n_left_to_next += is_eop0 + is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001600
Damjan Marion00a9dca2016-08-17 17:05:46 +02001601 /* Re-do both descriptors being careful about where we enqueue. */
1602 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1603 if (is_eop0)
1604 {
1605 if (next0 != next_index)
1606 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1607 else
1608 {
1609 to_next[0] = bi_sop;
1610 to_next += 1;
1611 n_left_to_next -= 1;
1612 }
1613 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001614
Damjan Marion00a9dca2016-08-17 17:05:46 +02001615 bi_sop = is_eop0 ? bi1 : bi_sop;
1616 if (is_eop1)
1617 {
1618 if (next1 != next_index)
1619 vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1620 else
1621 {
1622 to_next[0] = bi_sop;
1623 to_next += 1;
1624 n_left_to_next -= 1;
1625 }
1626 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001627
Damjan Marion00a9dca2016-08-17 17:05:46 +02001628 /* Switch cached next index when next for both packets is the same. */
1629 if (is_eop0 && is_eop1 && next0 == next1)
1630 {
1631 vlib_put_next_frame (vm, node, next_index,
1632 n_left_to_next);
1633 next_index = next0;
1634 vlib_get_next_frame (vm, node, next_index,
1635 to_next, n_left_to_next);
1636 }
1637 }
1638 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001639 }
1640
Damjan Marion00a9dca2016-08-17 17:05:46 +02001641 /* Bail out of dual loop and proceed with single loop. */
Damjan Marionb4d89272016-05-12 22:14:45 +02001642 found_hw_owned_descriptor_x2:
1643
1644 while (n_descriptors_left > 0 && n_left_to_next > 0)
1645 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001646 vlib_buffer_t *b0;
Damjan Marion8f499362018-10-22 13:07:02 +02001647 vlib_buffer_t *f0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001648 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1649 u8 is_eop0, error0, next0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001650 ixge_descriptor_t d0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001651
Damjan Marion00a9dca2016-08-17 17:05:46 +02001652 d0.as_u32x4 = d[0].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001653
1654 s20 = d0.rx_from_hw.status[2];
1655 s00 = d0.rx_from_hw.status[0];
1656
Damjan Marion00a9dca2016-08-17 17:05:46 +02001657 if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001658 goto found_hw_owned_descriptor_x1;
1659
1660 bi0 = to_rx[0];
1661 ASSERT (to_add >= xm->rx_buffers_to_add);
1662 fi0 = to_add[0];
1663
1664 to_rx[0] = fi0;
1665 to_rx += 1;
1666 to_add -= 1;
1667
Damjan Marioncef87f12017-10-05 15:32:41 +02001668#if 0
Steven899a84b2018-01-29 20:09:09 -08001669 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0));
1670 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0));
Damjan Marioncef87f12017-10-05 15:32:41 +02001671#endif
Damjan Marionb4d89272016-05-12 22:14:45 +02001672
1673 b0 = vlib_get_buffer (vm, bi0);
1674
Damjan Marionb4d89272016-05-12 22:14:45 +02001675 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1676 ixge_rx_next_and_error_from_status_x1
Damjan Marion00a9dca2016-08-17 17:05:46 +02001677 (xd, s00, s20, &next0, &error0, &flags0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001678
1679 next0 = is_sop ? next0 : next_index_sop;
1680 next_index_sop = next0;
1681
1682 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1683
1684 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001685 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001686
1687 b0->error = node->errors[error0];
1688
1689 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1690 n_bytes += len0;
1691 n_packets += is_eop0;
1692
1693 /* Give new buffer to hardware. */
Damjan Marion8f499362018-10-22 13:07:02 +02001694 f0 = vlib_get_buffer (vm, fi0);
1695 d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001696 d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001697 d[0].as_u32x4 = d0.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001698
1699 d += 1;
1700 n_descriptors_left -= 1;
1701
1702 /* Point to either l2 or l3 header depending on next. */
1703 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001704 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001705 b0->current_length = len0 - l3_offset0;
1706 b0->current_data = l3_offset0;
1707
1708 b_last->next_buffer = is_sop ? ~0 : bi0;
1709 bi_last = bi0;
1710 b_last = b0;
1711
1712 bi_sop = is_sop ? bi0 : bi_sop;
1713
1714 if (CLIB_DEBUG > 0 && is_eop0)
1715 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001716 u8 *msg =
1717 vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1718 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001719 }
1720
Damjan Marion00a9dca2016-08-17 17:05:46 +02001721 if (0) /* "Dave" version */
1722 {
1723 if (is_eop0)
1724 {
1725 to_next[0] = bi_sop;
1726 to_next++;
1727 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001728
Damjan Marion00a9dca2016-08-17 17:05:46 +02001729 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1730 to_next, n_left_to_next,
1731 bi_sop, next0);
1732 }
1733 }
1734 if (1) /* "Eliot" version */
1735 {
1736 if (PREDICT_TRUE (next0 == next_index))
1737 {
1738 to_next[0] = bi_sop;
1739 to_next += is_eop0;
1740 n_left_to_next -= is_eop0;
1741 }
1742 else
1743 {
1744 if (next0 != next_index && is_eop0)
1745 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001746
Damjan Marion00a9dca2016-08-17 17:05:46 +02001747 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1748 next_index = next0;
1749 vlib_get_next_frame (vm, node, next_index,
1750 to_next, n_left_to_next);
1751 }
1752 }
1753 is_sop = is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001754 }
1755 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1756 }
1757
Damjan Marion00a9dca2016-08-17 17:05:46 +02001758found_hw_owned_descriptor_x1:
Damjan Marionb4d89272016-05-12 22:14:45 +02001759 if (n_descriptors_left > 0)
1760 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1761
1762 _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1763
1764 {
1765 u32 n_done = n_descriptors - n_descriptors_left;
1766
1767 if (n_trace > 0 && n_done > 0)
1768 {
1769 u32 n = clib_min (n_trace, n_done);
1770 ixge_rx_trace (xm, xd, dq,
1771 d_trace_save,
1772 d_trace_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001773 &dq->descriptors[start_descriptor_index], n);
Damjan Marionb4d89272016-05-12 22:14:45 +02001774 vlib_set_trace_count (vm, node, n_trace - n);
1775 }
1776 if (d_trace_save)
1777 {
1778 _vec_len (d_trace_save) = 0;
1779 _vec_len (d_trace_buffers) = 0;
1780 }
1781
1782 /* Don't keep a reference to b_last if we don't have to.
1783 Otherwise we can over-write a next_buffer pointer after already haven
1784 enqueued a packet. */
1785 if (is_sop)
1786 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001787 b_last->next_buffer = ~0;
1788 bi_last = ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001789 }
1790
1791 dq->rx.n_descriptors_done_this_call = n_done;
1792 dq->rx.n_descriptors_done_total += n_done;
1793 dq->rx.is_start_of_packet = is_sop;
1794 dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1795 dq->rx.saved_last_buffer_index = bi_last;
1796 dq->rx.saved_start_of_packet_next_index = next_index_sop;
1797 dq->rx.next_index = next_index;
1798 dq->rx.n_bytes += n_bytes;
1799
1800 return n_packets;
1801 }
1802}
1803
1804static uword
1805ixge_rx_queue (ixge_main_t * xm,
1806 ixge_device_t * xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001807 vlib_node_runtime_t * node, u32 queue_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02001808{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001809 ixge_dma_queue_t *dq =
1810 vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1811 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001812 uword n_packets = 0;
1813 u32 hw_head_index, sw_head_index;
1814
1815 /* One time initialization. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02001816 if (!dq->rx.node)
Damjan Marionb4d89272016-05-12 22:14:45 +02001817 {
1818 dq->rx.node = node;
1819 dq->rx.is_start_of_packet = 1;
1820 dq->rx.saved_start_of_packet_buffer_index = ~0;
1821 dq->rx.saved_last_buffer_index = ~0;
1822 }
1823
1824 dq->rx.next_index = node->cached_next_index;
1825
1826 dq->rx.n_descriptors_done_total = 0;
1827 dq->rx.n_descriptors_done_this_call = 0;
1828 dq->rx.n_bytes = 0;
1829
1830 /* Fetch head from hardware and compare to where we think we are. */
1831 hw_head_index = dr->head_index;
1832 sw_head_index = dq->head_index;
1833
1834 if (hw_head_index == sw_head_index)
1835 goto done;
1836
1837 if (hw_head_index < sw_head_index)
1838 {
1839 u32 n_tried = dq->n_descriptors - sw_head_index;
1840 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001841 sw_head_index =
1842 ixge_ring_add (dq, sw_head_index,
1843 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001844
1845 if (dq->rx.n_descriptors_done_this_call != n_tried)
1846 goto done;
1847 }
1848 if (hw_head_index >= sw_head_index)
1849 {
1850 u32 n_tried = hw_head_index - sw_head_index;
1851 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001852 sw_head_index =
1853 ixge_ring_add (dq, sw_head_index,
1854 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001855 }
1856
Damjan Marion00a9dca2016-08-17 17:05:46 +02001857done:
Damjan Marionb4d89272016-05-12 22:14:45 +02001858 dq->head_index = sw_head_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001859 dq->tail_index =
1860 ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
Damjan Marionb4d89272016-05-12 22:14:45 +02001861
1862 /* Give tail back to hardware. */
1863 CLIB_MEMORY_BARRIER ();
1864
1865 dr->tail_index = dq->tail_index;
1866
Damjan Marion00a9dca2016-08-17 17:05:46 +02001867 vlib_increment_combined_counter (vnet_main.
1868 interface_main.combined_sw_if_counters +
1869 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001870 0 /* thread_index */ ,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001871 xd->vlib_sw_if_index, n_packets,
Damjan Marionb4d89272016-05-12 22:14:45 +02001872 dq->rx.n_bytes);
1873
1874 return n_packets;
1875}
1876
Damjan Marion00a9dca2016-08-17 17:05:46 +02001877static void
1878ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
Damjan Marionb4d89272016-05-12 22:14:45 +02001879{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001880 vlib_main_t *vm = xm->vlib_main;
1881 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02001882
1883 if (i != 20)
1884 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001885 ELOG_TYPE_DECLARE (e) =
1886 {
1887 .function = (char *) __FUNCTION__,.format =
1888 "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1889 16,.enum_strings =
1890 {
1891 "flow director",
1892 "rx miss",
1893 "pci exception",
1894 "mailbox",
1895 "link status change",
1896 "linksec key exchange",
1897 "manageability event",
1898 "reserved23",
1899 "sdp0",
1900 "sdp1",
1901 "sdp2",
1902 "sdp3",
1903 "ecc", "descriptor handler error", "tcp timer", "other",},};
1904 struct
1905 {
1906 u8 instance;
1907 u8 index;
1908 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001909 ed = ELOG_DATA (&vm->elog_main, e);
1910 ed->instance = xd->device_index;
1911 ed->index = i - 16;
1912 }
1913 else
1914 {
1915 u32 v = r->xge_mac.link_status;
1916 uword is_up = (v & (1 << 30)) != 0;
1917
Damjan Marion00a9dca2016-08-17 17:05:46 +02001918 ELOG_TYPE_DECLARE (e) =
1919 {
1920 .function = (char *) __FUNCTION__,.format =
1921 "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1922 struct
1923 {
1924 u32 instance, link_status;
1925 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001926 ed = ELOG_DATA (&vm->elog_main, e);
1927 ed->instance = xd->device_index;
1928 ed->link_status = v;
1929 xd->link_status_at_last_link_change = v;
1930
1931 vlib_process_signal_event (vm, ixge_process_node.index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001932 EVENT_SET_FLAGS,
1933 ((is_up << 31) | xd->vlib_hw_if_index));
Damjan Marionb4d89272016-05-12 22:14:45 +02001934 }
1935}
1936
1937always_inline u32
1938clean_block (u32 * b, u32 * t, u32 n_left)
1939{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001940 u32 *t0 = t;
Damjan Marionb4d89272016-05-12 22:14:45 +02001941
1942 while (n_left >= 4)
1943 {
1944 u32 bi0, bi1, bi2, bi3;
1945
1946 t[0] = bi0 = b[0];
1947 b[0] = 0;
1948 t += bi0 != 0;
1949
1950 t[0] = bi1 = b[1];
1951 b[1] = 0;
1952 t += bi1 != 0;
1953
1954 t[0] = bi2 = b[2];
1955 b[2] = 0;
1956 t += bi2 != 0;
1957
1958 t[0] = bi3 = b[3];
1959 b[3] = 0;
1960 t += bi3 != 0;
1961
1962 b += 4;
1963 n_left -= 4;
1964 }
1965
1966 while (n_left > 0)
1967 {
1968 u32 bi0;
1969
1970 t[0] = bi0 = b[0];
1971 b[0] = 0;
1972 t += bi0 != 0;
1973 b += 1;
1974 n_left -= 1;
1975 }
1976
1977 return t - t0;
1978}
1979
1980static void
1981ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1982{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001983 vlib_main_t *vm = xm->vlib_main;
1984 ixge_dma_queue_t *dq =
1985 vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1986 u32 n_clean, *b, *t, *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001987 i32 n_hw_owned_descriptors;
1988 i32 first_to_clean, last_to_clean;
1989 u64 hwbp_race = 0;
1990
1991 /* Handle case where head write back pointer update
1992 * arrives after the interrupt during high PCI bus loads.
1993 */
1994 while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
1995 dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
1996 {
1997 hwbp_race++;
1998 if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
1999 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002000 ELOG_TYPE_DECLARE (e) =
2001 {
2002 .function = (char *) __FUNCTION__,.format =
2003 "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2004 = "i4i4i4i4",};
2005 struct
2006 {
2007 u32 instance, head_index, tail_index, n_buffers_on_ring;
2008 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02002009 ed = ELOG_DATA (&vm->elog_main, e);
2010 ed->instance = xd->device_index;
2011 ed->head_index = dq->head_index;
2012 ed->tail_index = dq->tail_index;
2013 ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2014 }
2015 }
2016
2017 dq->head_index = dq->tx.head_index_write_back[0];
2018 n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002019 ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002020 n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2021
2022 if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2023 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002024 ELOG_TYPE_DECLARE (e) =
2025 {
2026 .function = (char *) __FUNCTION__,.format =
2027 "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2028 = "i4i4i4i4i4",};
2029 struct
2030 {
2031 u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2032 } *ed;
2033 ed = ELOG_DATA (&vm->elog_main, e);
2034 ed->instance = xd->device_index;
2035 ed->head_index = dq->head_index;
2036 ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2037 ed->n_clean = n_clean;
2038 ed->retries = hwbp_race;
Damjan Marionb4d89272016-05-12 22:14:45 +02002039 }
2040
2041 /*
2042 * This function used to wait until hardware owned zero descriptors.
2043 * At high PPS rates, that doesn't happen until the TX ring is
2044 * completely full of descriptors which need to be cleaned up.
2045 * That, in turn, causes TX ring-full drops and/or long RX service
2046 * interruptions.
2047 */
2048 if (n_clean == 0)
2049 return;
2050
2051 /* Clean the n_clean descriptors prior to the reported hardware head */
2052 last_to_clean = dq->head_index - 1;
2053 last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002054 last_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002055
2056 first_to_clean = (last_to_clean) - (n_clean - 1);
2057 first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002058 first_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002059
2060 vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2061 t0 = t = xm->tx_buffers_pending_free;
2062 b = dq->descriptor_buffer_indices + first_to_clean;
2063
2064 /* Wrap case: clean from first to end, then start to last */
2065 if (first_to_clean > last_to_clean)
2066 {
2067 t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2068 first_to_clean = 0;
2069 b = dq->descriptor_buffer_indices;
2070 }
2071
2072 /* Typical case: clean from first to last */
2073 if (first_to_clean <= last_to_clean)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002074 t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
Damjan Marionb4d89272016-05-12 22:14:45 +02002075
2076 if (t > t0)
2077 {
2078 u32 n = t - t0;
2079 vlib_buffer_free_no_next (vm, t0, n);
2080 ASSERT (dq->tx.n_buffers_on_ring >= n);
2081 dq->tx.n_buffers_on_ring -= n;
2082 _vec_len (xm->tx_buffers_pending_free) = 0;
2083 }
2084}
2085
2086/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002087always_inline uword
2088ixge_interrupt_is_rx_queue (uword i)
2089{
2090 return i < 8;
2091}
Damjan Marionb4d89272016-05-12 22:14:45 +02002092
Damjan Marion00a9dca2016-08-17 17:05:46 +02002093always_inline uword
2094ixge_interrupt_is_tx_queue (uword i)
2095{
2096 return i >= 8 && i < 16;
2097}
Damjan Marionb4d89272016-05-12 22:14:45 +02002098
Damjan Marion00a9dca2016-08-17 17:05:46 +02002099always_inline uword
2100ixge_tx_queue_to_interrupt (uword i)
2101{
2102 return 8 + i;
2103}
Damjan Marionb4d89272016-05-12 22:14:45 +02002104
Damjan Marion00a9dca2016-08-17 17:05:46 +02002105always_inline uword
2106ixge_rx_queue_to_interrupt (uword i)
2107{
2108 return 0 + i;
2109}
Damjan Marionb4d89272016-05-12 22:14:45 +02002110
Damjan Marion00a9dca2016-08-17 17:05:46 +02002111always_inline uword
2112ixge_interrupt_rx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002113{
2114 ASSERT (ixge_interrupt_is_rx_queue (i));
2115 return i - 0;
2116}
2117
Damjan Marion00a9dca2016-08-17 17:05:46 +02002118always_inline uword
2119ixge_interrupt_tx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002120{
2121 ASSERT (ixge_interrupt_is_tx_queue (i));
2122 return i - 8;
2123}
2124
2125static uword
2126ixge_device_input (ixge_main_t * xm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002127 ixge_device_t * xd, vlib_node_runtime_t * node)
Damjan Marionb4d89272016-05-12 22:14:45 +02002128{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002129 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002130 u32 i, s;
2131 uword n_rx_packets = 0;
2132
2133 s = r->interrupt.status_write_1_to_set;
2134 if (s)
2135 r->interrupt.status_write_1_to_clear = s;
2136
Damjan Marion00a9dca2016-08-17 17:05:46 +02002137 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002138 foreach_set_bit (i, s, ({
2139 if (ixge_interrupt_is_rx_queue (i))
2140 n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2141
2142 else if (ixge_interrupt_is_tx_queue (i))
2143 ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2144
2145 else
2146 ixge_interrupt (xm, xd, i);
2147 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002148 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002149
2150 return n_rx_packets;
2151}
2152
2153static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002154ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002155{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002156 ixge_main_t *xm = &ixge_main;
2157 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002158 uword n_rx_packets = 0;
2159
2160 if (node->state == VLIB_NODE_STATE_INTERRUPT)
2161 {
2162 uword i;
2163
2164 /* Loop over devices with interrupts. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002165 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002166 foreach_set_bit (i, node->runtime_data[0], ({
2167 xd = vec_elt_at_index (xm->devices, i);
2168 n_rx_packets += ixge_device_input (xm, xd, node);
2169
2170 /* Re-enable interrupts since we're going to stay in interrupt mode. */
2171 if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2172 xd->regs->interrupt.enable_write_1_to_set = ~0;
2173 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002174 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002175
2176 /* Clear mask of devices with pending interrupts. */
2177 node->runtime_data[0] = 0;
2178 }
2179 else
2180 {
2181 /* Poll all devices for input/interrupts. */
2182 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002183 {
2184 n_rx_packets += ixge_device_input (xm, xd, node);
Damjan Marionb4d89272016-05-12 22:14:45 +02002185
Damjan Marion00a9dca2016-08-17 17:05:46 +02002186 /* Re-enable interrupts when switching out of polling mode. */
2187 if (node->flags &
2188 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
2189 xd->regs->interrupt.enable_write_1_to_set = ~0;
2190 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002191 }
2192
2193 return n_rx_packets;
2194}
2195
Damjan Marion00a9dca2016-08-17 17:05:46 +02002196static char *ixge_error_strings[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002197#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002198 foreach_ixge_error
Damjan Marionb4d89272016-05-12 22:14:45 +02002199#undef _
2200};
2201
Damjan Marion00a9dca2016-08-17 17:05:46 +02002202/* *INDENT-OFF* */
Damjan Marion98897e22016-06-17 16:42:02 +02002203VLIB_REGISTER_NODE (ixge_input_node, static) = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002204 .function = ixge_input,
2205 .type = VLIB_NODE_TYPE_INPUT,
2206 .name = "ixge-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02002207 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Damjan Marionb4d89272016-05-12 22:14:45 +02002208
2209 /* Will be enabled if/when hardware is detected. */
2210 .state = VLIB_NODE_STATE_DISABLED,
2211
2212 .format_buffer = format_ethernet_header_with_length,
2213 .format_trace = format_ixge_rx_dma_trace,
2214
2215 .n_errors = IXGE_N_ERROR,
2216 .error_strings = ixge_error_strings,
2217
2218 .n_next_nodes = IXGE_RX_N_NEXT,
2219 .next_nodes = {
2220 [IXGE_RX_NEXT_DROP] = "error-drop",
2221 [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2222 [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2223 [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2224 },
2225};
2226
Damjan Marion00a9dca2016-08-17 17:05:46 +02002227/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002228
Damjan Marion00a9dca2016-08-17 17:05:46 +02002229static u8 *
2230format_ixge_device_name (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002231{
Damjan Marion23227982018-10-22 13:38:57 +02002232 vlib_main_t *vm = vlib_get_main ();
Damjan Marionb4d89272016-05-12 22:14:45 +02002233 u32 i = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002234 ixge_main_t *xm = &ixge_main;
2235 ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
Damjan Marion23227982018-10-22 13:38:57 +02002236 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
Damjan Marioncef87f12017-10-05 15:32:41 +02002237 return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2238 addr->domain, addr->bus, addr->slot, addr->function);
Damjan Marionb4d89272016-05-12 22:14:45 +02002239}
2240
2241#define IXGE_COUNTER_IS_64_BIT (1 << 0)
2242#define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2243
2244static u8 ixge_counter_flags[] = {
2245#define _(a,f) 0,
2246#define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2247 foreach_ixge_counter
2248#undef _
2249#undef _64
2250};
2251
Damjan Marion00a9dca2016-08-17 17:05:46 +02002252static void
2253ixge_update_counters (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +02002254{
2255 /* Byte offset for counter registers. */
2256 static u32 reg_offsets[] = {
2257#define _(a,f) (a) / sizeof (u32),
2258#define _64(a,f) _(a,f)
2259 foreach_ixge_counter
2260#undef _
2261#undef _64
2262 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002263 volatile u32 *r = (volatile u32 *) xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002264 int i;
2265
2266 for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2267 {
2268 u32 o = reg_offsets[i];
2269 xd->counters[i] += r[o];
2270 if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
2271 r[o] = 0;
2272 if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002273 xd->counters[i] += (u64) r[o + 1] << (u64) 32;
Damjan Marionb4d89272016-05-12 22:14:45 +02002274 }
2275}
2276
Damjan Marion00a9dca2016-08-17 17:05:46 +02002277static u8 *
2278format_ixge_device_id (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002279{
2280 u32 device_id = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002281 char *t = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002282 switch (device_id)
2283 {
2284#define _(f,n) case n: t = #f; break;
2285 foreach_ixge_pci_device_id;
2286#undef _
2287 default:
2288 t = 0;
2289 break;
2290 }
2291 if (t == 0)
2292 s = format (s, "unknown 0x%x", device_id);
2293 else
2294 s = format (s, "%s", t);
2295 return s;
2296}
2297
Damjan Marion00a9dca2016-08-17 17:05:46 +02002298static u8 *
2299format_ixge_link_status (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002300{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002301 ixge_device_t *xd = va_arg (*args, ixge_device_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +02002302 u32 v = xd->link_status_at_last_link_change;
2303
2304 s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2305
2306 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002307 char *modes[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002308 "1g", "10g parallel", "10g serial", "autoneg",
2309 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002310 char *speeds[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002311 "unknown", "100m", "1g", "10g",
2312 };
2313 s = format (s, ", mode %s, speed %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002314 modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002315 }
2316
2317 return s;
2318}
2319
Damjan Marion00a9dca2016-08-17 17:05:46 +02002320static u8 *
2321format_ixge_device (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002322{
2323 u32 dev_instance = va_arg (*args, u32);
2324 CLIB_UNUSED (int verbose) = va_arg (*args, int);
Damjan Marion23227982018-10-22 13:38:57 +02002325 vlib_main_t *vm = vlib_get_main ();
Damjan Marion00a9dca2016-08-17 17:05:46 +02002326 ixge_main_t *xm = &ixge_main;
2327 ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2328 ixge_phy_t *phy = xd->phys + xd->phy_index;
Christophe Fontained3c008d2017-10-02 18:10:54 +02002329 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +02002330
2331 ixge_update_counters (xd);
2332 xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status;
2333
2334 s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2335 format_ixge_device_id, xd->device_id,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002336 format_white_space, indent + 2, format_ixge_link_status, xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002337
2338 {
2339
Damjan Marion23227982018-10-22 13:38:57 +02002340 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2341 vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0);
Damjan Marioncef87f12017-10-05 15:32:41 +02002342
2343 if (d)
2344 s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2345 format_vlib_pci_link_speed, d);
Damjan Marionb4d89272016-05-12 22:14:45 +02002346 }
2347
2348 s = format (s, "\n%U", format_white_space, indent + 2);
2349 if (phy->mdio_address != ~0)
2350 s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
Damjan Marionc45e1902018-09-24 15:17:36 +02002351 else if (xd->sfp_eeprom.id == SFP_ID_SFP)
Damjan Marionb4d89272016-05-12 22:14:45 +02002352 s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2353 else
2354 s = format (s, "PHY not found");
2355
2356 /* FIXME */
2357 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002358 ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
2359 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002360 u32 hw_head_index = dr->head_index;
2361 u32 sw_head_index = dq->head_index;
2362 u32 nitems;
2363
2364 nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2365 s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002366 format_white_space, indent + 2, nitems, dq->n_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002367
2368 s = format (s, "\n%U%d buffers in driver rx cache",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002369 format_white_space, indent + 2,
2370 vec_len (xm->rx_buffers_to_add));
Damjan Marionb4d89272016-05-12 22:14:45 +02002371
2372 s = format (s, "\n%U%d buffers on tx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002373 format_white_space, indent + 2,
2374 xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
Damjan Marionb4d89272016-05-12 22:14:45 +02002375 }
2376 {
2377 u32 i;
2378 u64 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002379 static char *names[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002380#define _(a,f) #f,
2381#define _64(a,f) _(a,f)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002382 foreach_ixge_counter
Damjan Marionb4d89272016-05-12 22:14:45 +02002383#undef _
2384#undef _64
2385 };
2386
2387 for (i = 0; i < ARRAY_LEN (names); i++)
2388 {
2389 v = xd->counters[i] - xd->counters_last_clear[i];
2390 if (v != 0)
2391 s = format (s, "\n%U%-40U%16Ld",
2392 format_white_space, indent + 2,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002393 format_c_identifier, names[i], v);
Damjan Marionb4d89272016-05-12 22:14:45 +02002394 }
2395 }
2396
2397 return s;
2398}
2399
Damjan Marion00a9dca2016-08-17 17:05:46 +02002400static void
2401ixge_clear_hw_interface_counters (u32 instance)
Damjan Marionb4d89272016-05-12 22:14:45 +02002402{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002403 ixge_main_t *xm = &ixge_main;
2404 ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002405 ixge_update_counters (xd);
2406 memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2407}
2408
2409/*
2410 * Dynamically redirect all pkts from a specific interface
2411 * to the specified node
2412 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002413static void
2414ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
2415 u32 node_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002416{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002417 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002418 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002419 ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002420
2421 /* Shut off redirection */
2422 if (node_index == ~0)
2423 {
2424 xd->per_interface_next_index = node_index;
2425 return;
2426 }
2427
2428 xd->per_interface_next_index =
2429 vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2430}
2431
2432
Damjan Marion00a9dca2016-08-17 17:05:46 +02002433/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002434VNET_DEVICE_CLASS (ixge_device_class) = {
2435 .name = "ixge",
2436 .tx_function = ixge_interface_tx,
2437 .format_device_name = format_ixge_device_name,
2438 .format_device = format_ixge_device,
2439 .format_tx_trace = format_ixge_tx_dma_trace,
2440 .clear_counters = ixge_clear_hw_interface_counters,
2441 .admin_up_down_function = ixge_interface_admin_up_down,
2442 .rx_redirect_to_node = ixge_set_interface_next_node,
2443};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002444/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002445
Damjan Marion00a9dca2016-08-17 17:05:46 +02002446#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
Damjan Marionb4d89272016-05-12 22:14:45 +02002447
2448static clib_error_t *
2449ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
2450{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002451 ixge_main_t *xm = &ixge_main;
2452 vlib_main_t *vm = xm->vlib_main;
2453 ixge_dma_queue_t *dq;
2454 clib_error_t *error = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002455
2456 vec_validate (xd->dma_queues[rt], queue_index);
2457 dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2458
Damjan Marion00a9dca2016-08-17 17:05:46 +02002459 if (!xm->n_descriptors_per_cache_line)
2460 xm->n_descriptors_per_cache_line =
2461 CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002462
Damjan Marion00a9dca2016-08-17 17:05:46 +02002463 if (!xm->n_bytes_in_rx_buffer)
Damjan Marionb4d89272016-05-12 22:14:45 +02002464 xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
2465 xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
Damjan Marionb4d89272016-05-12 22:14:45 +02002466
Damjan Marion00a9dca2016-08-17 17:05:46 +02002467 if (!xm->n_descriptors[rt])
Damjan Marionb4d89272016-05-12 22:14:45 +02002468 xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2469
2470 dq->queue_index = queue_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002471 dq->n_descriptors =
2472 round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
Damjan Marionb4d89272016-05-12 22:14:45 +02002473 dq->head_index = dq->tail_index = 0;
2474
Damjan Marion68b4da62018-09-30 18:26:20 +02002475 dq->descriptors = vlib_physmem_alloc_aligned (vm, dq->n_descriptors *
2476 sizeof (dq->descriptors[0]),
2477 128 /* per chip spec */ );
2478 if (!dq->descriptors)
2479 return vlib_physmem_last_error (vm);
Damjan Marionb4d89272016-05-12 22:14:45 +02002480
Dave Barachb7b92992018-10-17 10:38:51 -04002481 clib_memset (dq->descriptors, 0,
2482 dq->n_descriptors * sizeof (dq->descriptors[0]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002483 vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
2484
2485 if (rt == VLIB_RX)
2486 {
2487 u32 n_alloc, i;
2488
Damjan Marioncef87f12017-10-05 15:32:41 +02002489 n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2490 vec_len (dq->descriptor_buffer_indices));
Damjan Marionb4d89272016-05-12 22:14:45 +02002491 ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2492 for (i = 0; i < n_alloc; i++)
2493 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002494 dq->descriptors[i].rx_to_hw.tail_address =
Damjan Marion8f499362018-10-22 13:07:02 +02002495 vlib_buffer_get_pa
2496 (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002497 }
2498 }
2499 else
2500 {
2501 u32 i;
2502
Damjan Marion68b4da62018-09-30 18:26:20 +02002503 dq->tx.head_index_write_back =
2504 vlib_physmem_alloc (vm, CLIB_CACHE_LINE_BYTES);
2505 if (!dq->tx.head_index_write_back)
2506 return vlib_physmem_last_error (vm);
Damjan Marionb4d89272016-05-12 22:14:45 +02002507
2508 for (i = 0; i < dq->n_descriptors; i++)
2509 dq->descriptors[i].tx = xm->tx_descriptor_template;
2510
2511 vec_validate (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2512 }
2513
2514 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002515 ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002516 u64 a;
2517
Damjan Marion68b4da62018-09-30 18:26:20 +02002518 a = vlib_physmem_get_pa (vm, dq->descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002519 dr->descriptor_address[0] = a & 0xFFFFFFFF;
2520 dr->descriptor_address[1] = a >> (u64) 32;
2521 dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2522 dq->head_index = dq->tail_index = 0;
2523
2524 if (rt == VLIB_RX)
2525 {
2526 ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2527 dr->rx_split_control =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002528 ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2529 | ( /* lo free descriptor threshold (units of 64 descriptors) */
2530 (1 << 22)) | ( /* descriptor type: advanced one buffer */
2531 (1 << 25)) | ( /* drop if no descriptors available */
2532 (1 << 28)));
Damjan Marionb4d89272016-05-12 22:14:45 +02002533
2534 /* Give hardware all but last 16 cache lines' worth of descriptors. */
2535 dq->tail_index = dq->n_descriptors -
Damjan Marion00a9dca2016-08-17 17:05:46 +02002536 16 * xm->n_descriptors_per_cache_line;
Damjan Marionb4d89272016-05-12 22:14:45 +02002537 }
2538 else
2539 {
2540 /* Make sure its initialized before hardware can get to it. */
2541 dq->tx.head_index_write_back[0] = dq->head_index;
2542
Damjan Marion68b4da62018-09-30 18:26:20 +02002543 a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
Damjan Marionb4d89272016-05-12 22:14:45 +02002544 dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2545 dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2546 }
2547
2548 /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2549 and [12] undocumented set. */
2550 if (rt == VLIB_RX)
2551 dr->dca_control &= ~((1 << 13) | (1 << 12));
2552
2553 CLIB_MEMORY_BARRIER ();
2554
2555 if (rt == VLIB_TX)
2556 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002557 xd->regs->tx_dma_control |= (1 << 0);
2558 dr->control |= ((32 << 0) /* prefetch threshold */
2559 | (64 << 8) /* host threshold */
2560 | (0 << 16) /* writeback threshold */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002561 }
2562
2563 /* Enable this queue and wait for hardware to initialize
2564 before adding to tail. */
2565 if (rt == VLIB_TX)
2566 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002567 dr->control |= 1 << 25;
2568 while (!(dr->control & (1 << 25)))
2569 ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002570 }
2571
2572 /* Set head/tail indices and enable DMA. */
2573 dr->head_index = dq->head_index;
2574 dr->tail_index = dq->tail_index;
2575 }
2576
2577 return error;
2578}
2579
Damjan Marion00a9dca2016-08-17 17:05:46 +02002580static u32
2581ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
Damjan Marionb4d89272016-05-12 22:14:45 +02002582{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002583 ixge_device_t *xd;
2584 ixge_regs_t *r;
Damjan Marionb4d89272016-05-12 22:14:45 +02002585 u32 old;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002586 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002587
2588 xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2589 r = xd->regs;
2590
2591 old = r->filter_control;
2592
John Lo4a302ee2020-05-12 22:34:39 -04002593 if (flags == ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002594 r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
John Lo4a302ee2020-05-12 22:34:39 -04002595 else if (flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
Damjan Marionb4d89272016-05-12 22:14:45 +02002596 r->filter_control = old & ~(1 << 9);
John Lo4a302ee2020-05-12 22:34:39 -04002597 else
2598 return ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002599
2600 return old;
2601}
2602
Damjan Marion00a9dca2016-08-17 17:05:46 +02002603static void
2604ixge_device_init (ixge_main_t * xm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002605{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002606 vnet_main_t *vnm = vnet_get_main ();
2607 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002608
2609 /* Reset chip(s). */
2610 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002611 {
2612 ixge_regs_t *r = xd->regs;
2613 const u32 reset_bit = (1 << 26) | (1 << 3);
2614
2615 r->control |= reset_bit;
2616
2617 /* No need to suspend. Timed to take ~1e-6 secs */
2618 while (r->control & reset_bit)
2619 ;
2620
2621 /* Software loaded. */
2622 r->extended_control |= (1 << 28);
2623
2624 ixge_phy_init (xd);
2625
2626 /* Register ethernet interface. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002627 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002628 u8 addr8[6];
2629 u32 i, addr32[2];
2630 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002631
Damjan Marion00a9dca2016-08-17 17:05:46 +02002632 addr32[0] = r->rx_ethernet_address0[0][0];
2633 addr32[1] = r->rx_ethernet_address0[0][1];
2634 for (i = 0; i < 6; i++)
2635 addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
Damjan Marionb4d89272016-05-12 22:14:45 +02002636
Damjan Marion00a9dca2016-08-17 17:05:46 +02002637 error = ethernet_register_interface
2638 (vnm, ixge_device_class.index, xd->device_index,
2639 /* ethernet address */ addr8,
2640 &xd->vlib_hw_if_index, ixge_flag_change);
2641 if (error)
2642 clib_error_report (error);
Damjan Marionb4d89272016-05-12 22:14:45 +02002643 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002644
2645 {
2646 vnet_sw_interface_t *sw =
2647 vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
2648 xd->vlib_sw_if_index = sw->sw_if_index;
2649 }
2650
2651 ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2652
2653 xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
2654
2655 ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2656
2657 /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2658 r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2659 ixge_rx_queue_to_interrupt (0)) << 0);
2660
2661 r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2662 ixge_tx_queue_to_interrupt (0)) << 8);
2663
2664 /* No use in getting too many interrupts.
2665 Limit them to one every 3/4 ring size at line rate
2666 min sized packets.
2667 No need for this since kernel/vlib main loop provides adequate interrupt
2668 limiting scheme. */
2669 if (0)
2670 {
2671 f64 line_rate_max_pps =
2672 10e9 / (8 * (64 + /* interframe padding */ 20));
2673 ixge_throttle_queue_interrupt (r, 0,
2674 .75 * xm->n_descriptors[VLIB_RX] /
2675 line_rate_max_pps);
2676 }
2677
2678 /* Accept all multicast and broadcast packets. Should really add them
2679 to the dst_ethernet_address register array. */
2680 r->filter_control |= (1 << 10) | (1 << 8);
2681
2682 /* Enable frames up to size in mac frame size register. */
2683 r->xge_mac.control |= 1 << 2;
2684 r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2685
2686 /* Enable all interrupts. */
2687 if (!IXGE_ALWAYS_POLL)
2688 r->interrupt.enable_write_1_to_set = ~0;
2689 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002690}
2691
2692static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002693ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002694{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002695 vnet_main_t *vnm = vnet_get_main ();
2696 ixge_main_t *xm = &ixge_main;
2697 ixge_device_t *xd;
2698 uword event_type, *event_data = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002699 f64 timeout, link_debounce_deadline;
2700
2701 ixge_device_init (xm);
2702
2703 /* Clear all counters. */
2704 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002705 {
2706 ixge_update_counters (xd);
Dave Barachb7b92992018-10-17 10:38:51 -04002707 clib_memset (xd->counters, 0, sizeof (xd->counters));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002708 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002709
2710 timeout = 30.0;
2711 link_debounce_deadline = 1e70;
2712
2713 while (1)
2714 {
2715 /* 36 bit stat counters could overflow in ~50 secs.
Damjan Marion00a9dca2016-08-17 17:05:46 +02002716 We poll every 30 secs to be conservative. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002717 vlib_process_wait_for_event_or_clock (vm, timeout);
2718
2719 event_type = vlib_process_get_events (vm, &event_data);
2720
Damjan Marion00a9dca2016-08-17 17:05:46 +02002721 switch (event_type)
2722 {
2723 case EVENT_SET_FLAGS:
2724 /* 1 ms */
2725 link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2726 timeout = 1e-3;
2727 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002728
Damjan Marion00a9dca2016-08-17 17:05:46 +02002729 case ~0:
2730 /* No events found: timer expired. */
2731 if (vlib_time_now (vm) > link_debounce_deadline)
2732 {
2733 vec_foreach (xd, xm->devices)
2734 {
2735 ixge_regs_t *r = xd->regs;
2736 u32 v = r->xge_mac.link_status;
2737 uword is_up = (v & (1 << 30)) != 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002738
Damjan Marion00a9dca2016-08-17 17:05:46 +02002739 vnet_hw_interface_set_flags
2740 (vnm, xd->vlib_hw_if_index,
2741 is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2742 }
2743 link_debounce_deadline = 1e70;
2744 timeout = 30.0;
2745 }
2746 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002747
Damjan Marion00a9dca2016-08-17 17:05:46 +02002748 default:
2749 ASSERT (0);
2750 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002751
2752 if (event_data)
2753 _vec_len (event_data) = 0;
2754
2755 /* Query stats every 30 secs. */
2756 {
2757 f64 now = vlib_time_now (vm);
2758 if (now - xm->time_last_stats_update > 30)
2759 {
2760 xm->time_last_stats_update = now;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002761 vec_foreach (xd, xm->devices) ixge_update_counters (xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002762 }
2763 }
2764 }
2765
2766 return 0;
2767}
2768
2769static vlib_node_registration_t ixge_process_node = {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002770 .function = ixge_process,
2771 .type = VLIB_NODE_TYPE_PROCESS,
2772 .name = "ixge-process",
Damjan Marionb4d89272016-05-12 22:14:45 +02002773};
2774
Damjan Marion00a9dca2016-08-17 17:05:46 +02002775clib_error_t *
2776ixge_init (vlib_main_t * vm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002777{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002778 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002779
2780 xm->vlib_main = vm;
Dave Barachb7b92992018-10-17 10:38:51 -04002781 clib_memset (&xm->tx_descriptor_template, 0,
2782 sizeof (xm->tx_descriptor_template));
2783 clib_memset (&xm->tx_descriptor_template_mask, 0,
2784 sizeof (xm->tx_descriptor_template_mask));
Damjan Marionb4d89272016-05-12 22:14:45 +02002785 xm->tx_descriptor_template.status0 =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002786 (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
2787 IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
2788 IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
Damjan Marionb4d89272016-05-12 22:14:45 +02002789 xm->tx_descriptor_template_mask.status0 = 0xffff;
2790 xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2791
2792 xm->tx_descriptor_template_mask.status0 &=
2793 ~(IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
2794 | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS);
2795 xm->tx_descriptor_template_mask.status1 &=
2796 ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE);
Dave Barachf8d50682019-05-14 18:01:44 -04002797 return 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002798}
2799
Dave Barachf8d50682019-05-14 18:01:44 -04002800/* *INDENT-OFF* */
2801VLIB_INIT_FUNCTION (ixge_init) =
2802{
2803 .runs_before = VLIB_INITS("pci_bus_init"),
2804};
2805/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002806
2807
2808static void
Damjan Marion23227982018-10-22 13:38:57 +02002809ixge_pci_intr_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h)
Damjan Marionb4d89272016-05-12 22:14:45 +02002810{
Damjan Marion23227982018-10-22 13:38:57 +02002811 uword private_data = vlib_pci_get_private_data (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002812
2813 vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
2814
2815 /* Let node know which device is interrupting. */
2816 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002817 vlib_node_runtime_t *rt =
2818 vlib_node_get_runtime (vm, ixge_input_node.index);
Damjan Marioncef87f12017-10-05 15:32:41 +02002819 rt->runtime_data[0] |= 1 << private_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02002820 }
2821}
2822
2823static clib_error_t *
Damjan Marioncef87f12017-10-05 15:32:41 +02002824ixge_pci_init (vlib_main_t * vm, vlib_pci_dev_handle_t h)
Damjan Marionb4d89272016-05-12 22:14:45 +02002825{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002826 ixge_main_t *xm = &ixge_main;
Damjan Marioncef87f12017-10-05 15:32:41 +02002827 clib_error_t *error = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002828 void *r;
2829 ixge_device_t *xd;
Damjan Marion23227982018-10-22 13:38:57 +02002830 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2831 vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002832
Damjan Marion23227982018-10-22 13:38:57 +02002833 error = vlib_pci_map_region (vm, h, 0, &r);
Damjan Marionb4d89272016-05-12 22:14:45 +02002834 if (error)
2835 return error;
2836
2837 vec_add2 (xm->devices, xd, 1);
2838
2839 if (vec_len (xm->devices) == 1)
2840 {
Damjan Marion652d2e12019-02-02 00:15:27 +01002841 ixge_input_node.function = ixge_input;
Damjan Marionb4d89272016-05-12 22:14:45 +02002842 }
2843
Damjan Marioncef87f12017-10-05 15:32:41 +02002844 xd->pci_dev_handle = h;
2845 xd->device_id = d->device_id;
Damjan Marionb4d89272016-05-12 22:14:45 +02002846 xd->regs = r;
2847 xd->device_index = xd - xm->devices;
Damjan Marioncef87f12017-10-05 15:32:41 +02002848 xd->pci_function = addr->function;
Damjan Marionb4d89272016-05-12 22:14:45 +02002849 xd->per_interface_next_index = ~0;
2850
Damjan Marion23227982018-10-22 13:38:57 +02002851 vlib_pci_set_private_data (vm, h, xd->device_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002852
2853 /* Chip found so enable node. */
2854 {
2855 vlib_node_set_state (vm, ixge_input_node.index,
2856 (IXGE_ALWAYS_POLL
2857 ? VLIB_NODE_STATE_POLLING
2858 : VLIB_NODE_STATE_INTERRUPT));
2859
Damjan Marioncef87f12017-10-05 15:32:41 +02002860 //dev->private_data = xd->device_index;
Damjan Marionb4d89272016-05-12 22:14:45 +02002861 }
2862
2863 if (vec_len (xm->devices) == 1)
2864 {
2865 vlib_register_node (vm, &ixge_process_node);
2866 xm->process_node_index = ixge_process_node.index;
2867 }
2868
Damjan Marion23227982018-10-22 13:38:57 +02002869 error = vlib_pci_bus_master_enable (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002870
2871 if (error)
2872 return error;
2873
Damjan Marion23227982018-10-22 13:38:57 +02002874 return vlib_pci_intr_enable (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002875}
2876
Damjan Marion00a9dca2016-08-17 17:05:46 +02002877/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002878PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2879 .init_function = ixge_pci_init,
2880 .interrupt_handler = ixge_pci_intr_handler,
2881 .supported_devices = {
2882#define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2883 foreach_ixge_pci_device_id
2884#undef _
2885 { 0 },
2886 },
2887};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002888/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002889
Damjan Marion00a9dca2016-08-17 17:05:46 +02002890void
2891ixge_set_next_node (ixge_rx_next_t next, char *name)
Damjan Marionb4d89272016-05-12 22:14:45 +02002892{
2893 vlib_node_registration_t *r = &ixge_input_node;
2894
2895 switch (next)
2896 {
2897 case IXGE_RX_NEXT_IP4_INPUT:
2898 case IXGE_RX_NEXT_IP6_INPUT:
2899 case IXGE_RX_NEXT_ETHERNET_INPUT:
2900 r->next_nodes[next] = name;
2901 break;
2902
2903 default:
2904 clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2905 break;
2906 }
2907}
Damjan Marion00a9dca2016-08-17 17:05:46 +02002908
Damjan Marion374e2c52017-03-09 20:38:15 +01002909/* *INDENT-OFF* */
2910VLIB_PLUGIN_REGISTER () = {
2911 .version = VPP_BUILD_VER,
2912 .default_disabled = 1,
Damjan Marion1bfb0dd2017-03-22 11:08:39 +01002913 .description = "Intel 82599 Family Native Driver (experimental)",
Damjan Marion374e2c52017-03-09 20:38:15 +01002914};
Damjan Marion7bee80c2017-04-26 15:32:12 +02002915#endif
Damjan Marion374e2c52017-03-09 20:38:15 +01002916
2917/* *INDENT-ON* */
Damjan Marion7bee80c2017-04-26 15:32:12 +02002918
Damjan Marion00a9dca2016-08-17 17:05:46 +02002919/*
2920 * fd.io coding-style-patch-verification: ON
2921 *
2922 * Local Variables:
2923 * eval: (c-set-style "gnu")
2924 * End:
2925 */