blob: 481db06594e6b5a08f6a2b06a7dc5c132ea5a595 [file] [log] [blame]
Damjan Marionb4d89272016-05-12 22:14:45 +02001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * WARNING!
18 * This driver is not intended for production use and it is unsupported.
19 * It is provided for educational use only.
20 * Please use supported DPDK driver instead.
21 */
22
Gabriel Ganneb81831d2017-12-05 17:33:37 +010023#if __x86_64__ || __i386__ || __aarch64__
Damjan Marionb4d89272016-05-12 22:14:45 +020024#include <vppinfra/vector.h>
25
26#ifndef CLIB_HAVE_VEC128
27#warning HACK: ixge driver wont really work, missing u32x4
28typedef unsigned long long u32x4;
29#endif
30
31#include <vlib/vlib.h>
32#include <vlib/unix/unix.h>
33#include <vlib/pci/pci.h>
34#include <vnet/vnet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010035#include <ixge/ixge.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020036#include <vnet/ethernet/ethernet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010037#include <vnet/plugin/plugin.h>
38#include <vpp/app/version.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020039
40#define IXGE_ALWAYS_POLL 0
41
42#define EVENT_SET_FLAGS 0
43#define IXGE_HWBP_RACE_ELOG 0
44
45#define PCI_VENDOR_ID_INTEL 0x8086
46
47/* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48#define XGE_PHY_DEV_TYPE_PMA_PMD 1
49#define XGE_PHY_DEV_TYPE_PHY_XS 4
50#define XGE_PHY_ID1 0x2
51#define XGE_PHY_ID2 0x3
52#define XGE_PHY_CONTROL 0x0
53#define XGE_PHY_CONTROL_RESET (1 << 15)
54
55ixge_main_t ixge_main;
56static vlib_node_registration_t ixge_input_node;
57static vlib_node_registration_t ixge_process_node;
58
Damjan Marion00a9dca2016-08-17 17:05:46 +020059static void
60ixge_semaphore_get (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020061{
Damjan Marion00a9dca2016-08-17 17:05:46 +020062 ixge_main_t *xm = &ixge_main;
63 vlib_main_t *vm = xm->vlib_main;
64 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020065 u32 i;
66
67 i = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +020068 while (!(r->software_semaphore & (1 << 0)))
Damjan Marionb4d89272016-05-12 22:14:45 +020069 {
70 if (i > 0)
71 vlib_process_suspend (vm, 100e-6);
72 i++;
73 }
Damjan Marion00a9dca2016-08-17 17:05:46 +020074 do
75 {
76 r->software_semaphore |= 1 << 1;
77 }
78 while (!(r->software_semaphore & (1 << 1)));
Damjan Marionb4d89272016-05-12 22:14:45 +020079}
80
Damjan Marion00a9dca2016-08-17 17:05:46 +020081static void
82ixge_semaphore_release (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020083{
Damjan Marion00a9dca2016-08-17 17:05:46 +020084 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020085 r->software_semaphore &= ~3;
86}
87
Damjan Marion00a9dca2016-08-17 17:05:46 +020088static void
89ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +020090{
Damjan Marion00a9dca2016-08-17 17:05:46 +020091 ixge_main_t *xm = &ixge_main;
92 vlib_main_t *vm = xm->vlib_main;
93 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020094 u32 fw_mask = sw_mask << 5;
95 u32 m, done = 0;
96
Damjan Marion00a9dca2016-08-17 17:05:46 +020097 while (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +020098 {
99 ixge_semaphore_get (xd);
100 m = r->software_firmware_sync;
101 done = (m & fw_mask) == 0;
102 if (done)
103 r->software_firmware_sync = m | sw_mask;
104 ixge_semaphore_release (xd);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200105 if (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +0200106 vlib_process_suspend (vm, 10e-3);
107 }
108}
109
Damjan Marion00a9dca2016-08-17 17:05:46 +0200110static void
111ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +0200112{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200113 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200114 ixge_semaphore_get (xd);
115 r->software_firmware_sync &= ~sw_mask;
116 ixge_semaphore_release (xd);
117}
118
Damjan Marion00a9dca2016-08-17 17:05:46 +0200119u32
120ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121 u32 v, u32 is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200122{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200123 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200124 const u32 busy_bit = 1 << 30;
125 u32 x;
126
127 ASSERT (xd->phy_index < 2);
128 ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129
130 ASSERT (reg_index < (1 << 16));
131 ASSERT (dev_type < (1 << 5));
Damjan Marion00a9dca2016-08-17 17:05:46 +0200132 if (!is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200133 r->xge_mac.phy_data = v;
134
135 /* Address cycle. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200136 x =
137 reg_index | (dev_type << 16) | (xd->
138 phys[xd->phy_index].mdio_address << 21);
Damjan Marionb4d89272016-05-12 22:14:45 +0200139 r->xge_mac.phy_command = x | busy_bit;
140 /* Busy wait timed to take 28e-6 secs. No suspend. */
141 while (r->xge_mac.phy_command & busy_bit)
142 ;
143
144 r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145 while (r->xge_mac.phy_command & busy_bit)
146 ;
147
148 if (is_read)
149 v = r->xge_mac.phy_data >> 16;
150
151 ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152
153 return v;
154}
155
Damjan Marion00a9dca2016-08-17 17:05:46 +0200156static u32
157ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
Damjan Marionb4d89272016-05-12 22:14:45 +0200158{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200159 return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160 1);
161}
162
163static void
164ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165{
166 (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167 0);
168}
169
170static void
171ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172{
173 ixge_main_t *xm = &ixge_main;
174 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200175 u32 v;
176
177 v = 0;
178 v |= (sda != 0) << 3;
179 v |= (scl != 0) << 1;
180 xd->regs->i2c_control = v;
181}
182
Damjan Marion00a9dca2016-08-17 17:05:46 +0200183static void
184ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
Damjan Marionb4d89272016-05-12 22:14:45 +0200185{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200186 ixge_main_t *xm = &ixge_main;
187 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200188 u32 v;
189
190 v = xd->regs->i2c_control;
191 *sda = (v & (1 << 2)) != 0;
192 *scl = (v & (1 << 0)) != 0;
193}
194
Damjan Marion00a9dca2016-08-17 17:05:46 +0200195static u16
196ixge_read_eeprom (ixge_device_t * xd, u32 address)
Damjan Marionb4d89272016-05-12 22:14:45 +0200197{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200198 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200199 u32 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200200 r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200201 /* Wait for done bit. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200202 while (!((v = r->eeprom_read) & (1 << 1)))
Damjan Marionb4d89272016-05-12 22:14:45 +0200203 ;
204 return v >> 16;
205}
206
207static void
208ixge_sfp_enable_disable_laser (ixge_device_t * xd, uword enable)
209{
210 u32 tx_disable_bit = 1 << 3;
211 if (enable)
212 xd->regs->sdp_control &= ~tx_disable_bit;
213 else
214 xd->regs->sdp_control |= tx_disable_bit;
215}
216
217static void
218ixge_sfp_enable_disable_10g (ixge_device_t * xd, uword enable)
219{
220 u32 is_10g_bit = 1 << 5;
221 if (enable)
222 xd->regs->sdp_control |= is_10g_bit;
223 else
224 xd->regs->sdp_control &= ~is_10g_bit;
225}
226
227static clib_error_t *
228ixge_sfp_phy_init_from_eeprom (ixge_device_t * xd, u16 sfp_type)
229{
230 u16 a, id, reg_values_addr = 0;
231
232 a = ixge_read_eeprom (xd, 0x2b);
233 if (a == 0 || a == 0xffff)
234 return clib_error_create ("no init sequence in eeprom");
235
236 while (1)
237 {
238 id = ixge_read_eeprom (xd, ++a);
239 if (id == 0xffff)
240 break;
241 reg_values_addr = ixge_read_eeprom (xd, ++a);
242 if (id == sfp_type)
243 break;
244 }
245 if (id != sfp_type)
246 return clib_error_create ("failed to find id 0x%x", sfp_type);
247
248 ixge_software_firmware_sync (xd, 1 << 3);
249 while (1)
250 {
251 u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252 if (v == 0xffff)
253 break;
254 xd->regs->core_analog_config = v;
255 }
256 ixge_software_firmware_sync_release (xd, 1 << 3);
257
258 /* Make sure laser is off. We'll turn on the laser when
259 the interface is brought up. */
260 ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261 ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262
263 return 0;
264}
265
266static void
267ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
268{
269 u32 v;
270
271 if (is_up)
272 {
273 /* pma/pmd 10g serial SFI. */
274 xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275 xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276
277 v = xd->regs->xge_mac.auto_negotiation_control;
278 v &= ~(7 << 13);
279 v |= (0 << 13);
280 /* Restart autoneg. */
281 v |= (1 << 12);
282 xd->regs->xge_mac.auto_negotiation_control = v;
283
Damjan Marion00a9dca2016-08-17 17:05:46 +0200284 while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
Damjan Marionb4d89272016-05-12 22:14:45 +0200285 ;
286
287 v = xd->regs->xge_mac.auto_negotiation_control;
288
289 /* link mode 10g sfi serdes */
290 v &= ~(7 << 13);
291 v |= (3 << 13);
292
293 /* Restart autoneg. */
294 v |= (1 << 12);
295 xd->regs->xge_mac.auto_negotiation_control = v;
296
297 xd->regs->xge_mac.link_status;
298 }
299
300 ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301
302 /* Give time for link partner to notice that we're up. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200303 if (is_up && vlib_in_process_context (vlib_get_main ()))
304 {
305 vlib_process_suspend (vlib_get_main (), 300e-3);
306 }
Damjan Marionb4d89272016-05-12 22:14:45 +0200307}
308
309always_inline ixge_dma_regs_t *
310get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
311{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200312 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200313 ASSERT (qi < 128);
314 if (rt == VLIB_RX)
315 return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316 else
317 return &r->tx_dma[qi];
318}
319
320static clib_error_t *
321ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
322{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200323 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200324 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200325 ixge_main_t *xm = &ixge_main;
326 ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
327 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200328
329 if (is_up)
330 {
331 xd->regs->rx_enable |= 1;
332 xd->regs->tx_dma_control |= 1;
333 dr->control |= 1 << 25;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200334 while (!(dr->control & (1 << 25)))
335 ;
Damjan Marionb4d89272016-05-12 22:14:45 +0200336 }
337 else
338 {
339 xd->regs->rx_enable &= ~1;
340 xd->regs->tx_dma_control &= ~1;
341 }
342
343 ixge_sfp_device_up_down (xd, is_up);
344
345 return /* no error */ 0;
346}
347
Damjan Marion00a9dca2016-08-17 17:05:46 +0200348static void
349ixge_sfp_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200350{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200351 ixge_phy_t *phy = xd->phys + xd->phy_index;
352 i2c_bus_t *ib = &xd->i2c_bus;
Damjan Marionb4d89272016-05-12 22:14:45 +0200353
354 ib->private_data = xd->device_index;
355 ib->put_bits = ixge_i2c_put_bits;
356 ib->get_bits = ixge_i2c_get_bits;
357 vlib_i2c_init (ib);
358
Damjan Marion00a9dca2016-08-17 17:05:46 +0200359 vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
Damjan Marionb4d89272016-05-12 22:14:45 +0200360
Damjan Marion00a9dca2016-08-17 17:05:46 +0200361 if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
Damjan Marionc45e1902018-09-24 15:17:36 +0200362 xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
Damjan Marionb4d89272016-05-12 22:14:45 +0200363 else
364 {
365 /* FIXME 5 => SR/LR eeprom ID. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200366 clib_error_t *e =
367 ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
Damjan Marionb4d89272016-05-12 22:14:45 +0200368 if (e)
369 clib_error_report (e);
370 }
371
372 phy->mdio_address = ~0;
373}
374
Damjan Marion00a9dca2016-08-17 17:05:46 +0200375static void
376ixge_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200377{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200378 ixge_main_t *xm = &ixge_main;
379 vlib_main_t *vm = xm->vlib_main;
380 ixge_phy_t *phy = xd->phys + xd->phy_index;
Damjan Marionb4d89272016-05-12 22:14:45 +0200381
382 switch (xd->device_id)
383 {
384 case IXGE_82599_sfp:
385 case IXGE_82599_sfp_em:
386 case IXGE_82599_sfp_fcoe:
387 /* others? */
388 return ixge_sfp_phy_init (xd);
389
390 default:
391 break;
392 }
393
394 /* Probe address of phy. */
395 {
396 u32 i, v;
397
398 phy->mdio_address = ~0;
399 for (i = 0; i < 32; i++)
400 {
401 phy->mdio_address = i;
402 v = ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1);
403 if (v != 0xffff && v != 0)
404 break;
405 }
406
407 /* No PHY found? */
408 if (i >= 32)
409 return;
410 }
411
Damjan Marion00a9dca2016-08-17 17:05:46 +0200412 phy->id =
413 ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
414 ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200415
416 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200417 ELOG_TYPE_DECLARE (e) =
418 {
419 .function = (char *) __FUNCTION__,.format =
420 "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421 struct
422 {
423 u32 instance, id, address;
424 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +0200425 ed = ELOG_DATA (&vm->elog_main, e);
426 ed->instance = xd->device_index;
427 ed->id = phy->id;
428 ed->address = phy->mdio_address;
429 }
430
431 /* Reset phy. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200432 ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
433 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200434
435 /* Wait for self-clearning reset bit to clear. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200436 do
437 {
438 vlib_process_suspend (vm, 1e-3);
439 }
440 while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
441 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200442}
443
Damjan Marion00a9dca2016-08-17 17:05:46 +0200444static u8 *
445format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200446{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200447 ixge_rx_from_hw_descriptor_t *d =
448 va_arg (*va, ixge_rx_from_hw_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200449 u32 s0 = d->status[0], s2 = d->status[2];
450 u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200451 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200452
453 s = format (s, "%s-owned",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200454 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
455 "hw");
456 s =
457 format (s, ", length this descriptor %d, l3 offset %d",
458 d->n_packet_bytes_this_descriptor,
459 IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
Damjan Marionb4d89272016-05-12 22:14:45 +0200460 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
461 s = format (s, ", end-of-packet");
462
463 s = format (s, "\n%U", format_white_space, indent);
464
465 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR)
466 s = format (s, "layer2 error");
467
468 if (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2)
469 {
470 s = format (s, "layer 2 type %d", (s0 & 0x1f));
471 return s;
472 }
473
474 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN)
475 s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476 format_white_space, indent);
477
478 if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479 {
480 s = format (s, "ip4%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200481 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200483 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
484 s = format (s, " checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200485 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
486 "bad" : "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200487 }
488 if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489 s = format (s, "ip6%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200490 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200492 is_tcp = is_udp = 0;
493 if ((is_ip = (is_ip4 | is_ip6)))
494 {
495 is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496 is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497 if (is_tcp)
498 s = format (s, ", tcp");
499 if (is_udp)
500 s = format (s, ", udp");
501 }
502
503 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
504 s = format (s, ", tcp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200505 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
506 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200507 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
508 s = format (s, ", udp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200509 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
510 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200511
512 return s;
513}
514
Damjan Marion00a9dca2016-08-17 17:05:46 +0200515static u8 *
516format_ixge_tx_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200517{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200518 ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200519 u32 s0 = d->status0, s1 = d->status1;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200520 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200521 u32 v;
522
523 s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200524 d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200525
526 s = format (s, "\n%U", format_white_space, indent);
527
528 if ((v = (s0 >> 0) & 3))
529 s = format (s, "reserved 0x%x, ", v);
530
531 if ((v = (s0 >> 2) & 3))
532 s = format (s, "mac 0x%x, ", v);
533
534 if ((v = (s0 >> 4) & 0xf) != 3)
535 s = format (s, "type 0x%x, ", v);
536
537 s = format (s, "%s%s%s%s%s%s%s%s",
538 (s0 & (1 << 8)) ? "eop, " : "",
539 (s0 & (1 << 9)) ? "insert-fcs, " : "",
540 (s0 & (1 << 10)) ? "reserved26, " : "",
541 (s0 & (1 << 11)) ? "report-status, " : "",
542 (s0 & (1 << 12)) ? "reserved28, " : "",
543 (s0 & (1 << 13)) ? "is-advanced, " : "",
544 (s0 & (1 << 14)) ? "vlan-enable, " : "",
545 (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546
547 if ((v = s1 & 0xf) != 0)
548 s = format (s, "status 0x%x, ", v);
549
550 if ((v = (s1 >> 4) & 0xf))
551 s = format (s, "context 0x%x, ", v);
552
553 if ((v = (s1 >> 8) & 0x3f))
554 s = format (s, "options 0x%x, ", v);
555
556 return s;
557}
558
Damjan Marion00a9dca2016-08-17 17:05:46 +0200559typedef struct
560{
Damjan Marionb4d89272016-05-12 22:14:45 +0200561 ixge_descriptor_t before, after;
562
563 u32 buffer_index;
564
565 u16 device_index;
566
567 u8 queue_index;
568
569 u8 is_start_of_packet;
570
571 /* Copy of VLIB buffer; packet data stored in pre_data. */
572 vlib_buffer_t buffer;
573} ixge_rx_dma_trace_t;
574
Damjan Marion00a9dca2016-08-17 17:05:46 +0200575static u8 *
576format_ixge_rx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200577{
578 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200579 vlib_node_t *node = va_arg (*va, vlib_node_t *);
580 vnet_main_t *vnm = vnet_get_main ();
581 ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582 ixge_main_t *xm = &ixge_main;
583 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
584 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200585 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200586
587 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200588 vnet_sw_interface_t *sw =
589 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
590 s =
591 format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200593 }
594
595 s = format (s, "\n%Ubefore: %U",
596 format_white_space, indent,
597 format_ixge_rx_from_hw_descriptor, &t->before);
598 s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200600 t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
Damjan Marionb4d89272016-05-12 22:14:45 +0200601
Benoît Ganne03f2a012021-07-20 16:49:13 +0200602 s = format (s, "\n%Ubuffer 0x%x: %U", format_white_space, indent,
603 t->buffer_index, format_vnet_buffer_no_chain, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200604
Damjan Marion00a9dca2016-08-17 17:05:46 +0200605 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200606
607 f = node->format_buffer;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200608 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200609 f = format_hex_bytes;
610 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
611
612 return s;
613}
614
615#define foreach_ixge_error \
616 _ (none, "no error") \
617 _ (tx_full_drops, "tx ring full drops") \
618 _ (ip4_checksum_error, "ip4 checksum errors") \
619 _ (rx_alloc_fail, "rx buf alloc from free list failed") \
620 _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
621
Damjan Marion00a9dca2016-08-17 17:05:46 +0200622typedef enum
623{
Damjan Marionb4d89272016-05-12 22:14:45 +0200624#define _(f,s) IXGE_ERROR_##f,
625 foreach_ixge_error
626#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200627 IXGE_N_ERROR,
Damjan Marionb4d89272016-05-12 22:14:45 +0200628} ixge_error_t;
629
630always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200631ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
632 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200633 u8 * next0, u8 * error0, u32 * flags0)
634{
635 u8 is0_ip4, is0_ip6, n0, e0;
636 u32 f0;
637
638 e0 = IXGE_ERROR_none;
639 n0 = IXGE_RX_NEXT_ETHERNET_INPUT;
640
641 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
642 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
643
644 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200645 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200646
647 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
648 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
649
650 n0 = (xd->per_interface_next_index != ~0) ?
651 xd->per_interface_next_index : n0;
652
653 /* Check for error. */
654 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
655
656 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
657 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200658 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200659
660 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
661 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200662 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200663
664 *error0 = e0;
665 *next0 = n0;
666 *flags0 = f0;
667}
668
669always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200670ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
671 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200672 u32 s10, u32 s12,
673 u8 * next0, u8 * error0, u32 * flags0,
674 u8 * next1, u8 * error1, u32 * flags1)
675{
676 u8 is0_ip4, is0_ip6, n0, e0;
677 u8 is1_ip4, is1_ip6, n1, e1;
678 u32 f0, f1;
679
680 e0 = e1 = IXGE_ERROR_none;
681 n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
682
683 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
684 is1_ip4 = s12 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
685
686 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
687 n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
688
689 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200690 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200691 e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200692 ? IXGE_ERROR_ip4_checksum_error : e1);
Damjan Marionb4d89272016-05-12 22:14:45 +0200693
694 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
695 is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696
697 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
698 n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
699
700 n0 = (xd->per_interface_next_index != ~0) ?
701 xd->per_interface_next_index : n0;
702 n1 = (xd->per_interface_next_index != ~0) ?
703 xd->per_interface_next_index : n1;
704
705 /* Check for error. */
706 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
707 n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
708
709 *error0 = e0;
710 *error1 = e1;
711
712 *next0 = n0;
713 *next1 = n1;
714
715 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
716 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200717 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200718 f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
719 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200720 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200721
722 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
723 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200724 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200725 f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
726 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200727 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200728
729 *flags0 = f0;
730 *flags1 = f1;
731}
732
733static void
734ixge_rx_trace (ixge_main_t * xm,
735 ixge_device_t * xd,
736 ixge_dma_queue_t * dq,
737 ixge_descriptor_t * before_descriptors,
738 u32 * before_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200739 ixge_descriptor_t * after_descriptors, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200740{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200741 vlib_main_t *vm = xm->vlib_main;
742 vlib_node_runtime_t *node = dq->rx.node;
743 ixge_rx_from_hw_descriptor_t *bd;
744 ixge_rx_to_hw_descriptor_t *ad;
745 u32 *b, n_left, is_sop, next_index_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200746
747 n_left = n_descriptors;
748 b = before_buffers;
749 bd = &before_descriptors->rx_from_hw;
750 ad = &after_descriptors->rx_to_hw;
751 is_sop = dq->rx.is_start_of_packet;
752 next_index_sop = dq->rx.saved_start_of_packet_next_index;
753
754 while (n_left >= 2)
755 {
756 u32 bi0, bi1, flags0, flags1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200757 vlib_buffer_t *b0, *b1;
758 ixge_rx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200759 u8 next0, error0, next1, error1;
760
761 bi0 = b[0];
762 bi1 = b[1];
763 n_left -= 2;
764
765 b0 = vlib_get_buffer (vm, bi0);
766 b1 = vlib_get_buffer (vm, bi1);
767
768 ixge_rx_next_and_error_from_status_x2 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200769 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200770 bd[1].status[0], bd[1].status[2],
771 &next0, &error0, &flags0,
772 &next1, &error1, &flags1);
773
774 next_index_sop = is_sop ? next0 : next_index_sop;
775 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
776 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
777 t0->is_start_of_packet = is_sop;
778 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
779
780 next_index_sop = is_sop ? next1 : next_index_sop;
781 vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
782 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
783 t1->is_start_of_packet = is_sop;
784 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
785
786 t0->queue_index = dq->queue_index;
787 t1->queue_index = dq->queue_index;
788 t0->device_index = xd->device_index;
789 t1->device_index = xd->device_index;
790 t0->before.rx_from_hw = bd[0];
791 t1->before.rx_from_hw = bd[1];
792 t0->after.rx_to_hw = ad[0];
793 t1->after.rx_to_hw = ad[1];
794 t0->buffer_index = bi0;
795 t1->buffer_index = bi1;
796 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
797 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
798 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
799 sizeof (t0->buffer.pre_data));
800 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
801 sizeof (t1->buffer.pre_data));
802
803 b += 2;
804 bd += 2;
805 ad += 2;
806 }
807
808 while (n_left >= 1)
809 {
810 u32 bi0, flags0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200811 vlib_buffer_t *b0;
812 ixge_rx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200813 u8 next0, error0;
814
815 bi0 = b[0];
816 n_left -= 1;
817
818 b0 = vlib_get_buffer (vm, bi0);
819
820 ixge_rx_next_and_error_from_status_x1 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200821 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200822 &next0, &error0, &flags0);
823
824 next_index_sop = is_sop ? next0 : next_index_sop;
825 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
826 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
827 t0->is_start_of_packet = is_sop;
828 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
829
830 t0->queue_index = dq->queue_index;
831 t0->device_index = xd->device_index;
832 t0->before.rx_from_hw = bd[0];
833 t0->after.rx_to_hw = ad[0];
834 t0->buffer_index = bi0;
835 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
836 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
837 sizeof (t0->buffer.pre_data));
838
839 b += 1;
840 bd += 1;
841 ad += 1;
842 }
843}
844
Damjan Marion00a9dca2016-08-17 17:05:46 +0200845typedef struct
846{
Damjan Marionb4d89272016-05-12 22:14:45 +0200847 ixge_tx_descriptor_t descriptor;
848
849 u32 buffer_index;
850
851 u16 device_index;
852
853 u8 queue_index;
854
855 u8 is_start_of_packet;
856
857 /* Copy of VLIB buffer; packet data stored in pre_data. */
858 vlib_buffer_t buffer;
859} ixge_tx_dma_trace_t;
860
Damjan Marion00a9dca2016-08-17 17:05:46 +0200861static u8 *
862format_ixge_tx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200863{
864 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
865 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200866 ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
867 vnet_main_t *vnm = vnet_get_main ();
868 ixge_main_t *xm = &ixge_main;
869 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
870 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200871 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200872
873 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200874 vnet_sw_interface_t *sw =
875 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
876 s =
877 format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
878 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200879 }
880
881 s = format (s, "\n%Udescriptor: %U",
882 format_white_space, indent,
883 format_ixge_tx_descriptor, &t->descriptor);
884
Benoît Ganne03f2a012021-07-20 16:49:13 +0200885 s = format (s, "\n%Ubuffer 0x%x: %U", format_white_space, indent,
886 t->buffer_index, format_vnet_buffer_no_chain, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200887
Damjan Marion00a9dca2016-08-17 17:05:46 +0200888 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200889
890 f = format_ethernet_header_with_length;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200891 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200892 f = format_hex_bytes;
893 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
894
895 return s;
896}
897
Damjan Marion00a9dca2016-08-17 17:05:46 +0200898typedef struct
899{
900 vlib_node_runtime_t *node;
Damjan Marionb4d89272016-05-12 22:14:45 +0200901
902 u32 is_start_of_packet;
903
904 u32 n_bytes_in_packet;
905
Damjan Marion00a9dca2016-08-17 17:05:46 +0200906 ixge_tx_descriptor_t *start_of_packet_descriptor;
Damjan Marionb4d89272016-05-12 22:14:45 +0200907} ixge_tx_state_t;
908
909static void
910ixge_tx_trace (ixge_main_t * xm,
911 ixge_device_t * xd,
912 ixge_dma_queue_t * dq,
913 ixge_tx_state_t * tx_state,
914 ixge_tx_descriptor_t * descriptors,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200915 u32 * buffers, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200916{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200917 vlib_main_t *vm = xm->vlib_main;
918 vlib_node_runtime_t *node = tx_state->node;
919 ixge_tx_descriptor_t *d;
920 u32 *b, n_left, is_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200921
922 n_left = n_descriptors;
923 b = buffers;
924 d = descriptors;
925 is_sop = tx_state->is_start_of_packet;
926
927 while (n_left >= 2)
928 {
929 u32 bi0, bi1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200930 vlib_buffer_t *b0, *b1;
931 ixge_tx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200932
933 bi0 = b[0];
934 bi1 = b[1];
935 n_left -= 2;
936
937 b0 = vlib_get_buffer (vm, bi0);
938 b1 = vlib_get_buffer (vm, bi1);
939
940 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
941 t0->is_start_of_packet = is_sop;
942 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
943
944 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
945 t1->is_start_of_packet = is_sop;
946 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
947
948 t0->queue_index = dq->queue_index;
949 t1->queue_index = dq->queue_index;
950 t0->device_index = xd->device_index;
951 t1->device_index = xd->device_index;
952 t0->descriptor = d[0];
953 t1->descriptor = d[1];
954 t0->buffer_index = bi0;
955 t1->buffer_index = bi1;
956 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
957 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
958 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
959 sizeof (t0->buffer.pre_data));
960 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
961 sizeof (t1->buffer.pre_data));
962
963 b += 2;
964 d += 2;
965 }
966
967 while (n_left >= 1)
968 {
969 u32 bi0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200970 vlib_buffer_t *b0;
971 ixge_tx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200972
973 bi0 = b[0];
974 n_left -= 1;
975
976 b0 = vlib_get_buffer (vm, bi0);
977
978 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
979 t0->is_start_of_packet = is_sop;
980 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
981
982 t0->queue_index = dq->queue_index;
983 t0->device_index = xd->device_index;
984 t0->descriptor = d[0];
985 t0->buffer_index = bi0;
986 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
987 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
988 sizeof (t0->buffer.pre_data));
989
990 b += 1;
991 d += 1;
992 }
993}
994
995always_inline uword
996ixge_ring_sub (ixge_dma_queue_t * q, u32 i0, u32 i1)
997{
998 i32 d = i1 - i0;
999 ASSERT (i0 < q->n_descriptors);
1000 ASSERT (i1 < q->n_descriptors);
1001 return d < 0 ? q->n_descriptors + d : d;
1002}
1003
1004always_inline uword
1005ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
1006{
1007 u32 d = i0 + i1;
1008 ASSERT (i0 < q->n_descriptors);
1009 ASSERT (i1 < q->n_descriptors);
1010 d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1011 return d;
1012}
1013
1014always_inline uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02001015ixge_tx_descriptor_matches_template (ixge_main_t * xm,
1016 ixge_tx_descriptor_t * d)
Damjan Marionb4d89272016-05-12 22:14:45 +02001017{
1018 u32 cmp;
1019
1020 cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1021 ^ xm->tx_descriptor_template.status0);
1022 if (cmp)
1023 return 0;
1024 cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1025 ^ xm->tx_descriptor_template.status1);
1026 if (cmp)
1027 return 0;
1028
1029 return 1;
1030}
1031
1032static uword
1033ixge_tx_no_wrap (ixge_main_t * xm,
1034 ixge_device_t * xd,
1035 ixge_dma_queue_t * dq,
1036 u32 * buffers,
1037 u32 start_descriptor_index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001038 u32 n_descriptors, ixge_tx_state_t * tx_state)
Damjan Marionb4d89272016-05-12 22:14:45 +02001039{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001040 vlib_main_t *vm = xm->vlib_main;
1041 ixge_tx_descriptor_t *d, *d_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001042 u32 n_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001043 u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1044 u32 *to_tx =
1045 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001046 u32 is_sop = tx_state->is_start_of_packet;
1047 u32 len_sop = tx_state->n_bytes_in_packet;
1048 u16 template_status = xm->tx_descriptor_template.status0;
1049 u32 descriptor_prefetch_rotor = 0;
1050
1051 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1052 d = &dq->descriptors[start_descriptor_index].tx;
1053 d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1054
1055 while (n_left >= 4)
1056 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001057 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001058 u32 bi0, fi0, len0;
1059 u32 bi1, fi1, len1;
1060 u8 is_eop0, is_eop1;
1061
1062 /* Prefetch next iteration. */
1063 vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1064 vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1065
1066 if ((descriptor_prefetch_rotor & 0x3) == 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001067 CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001068
1069 descriptor_prefetch_rotor += 2;
1070
1071 bi0 = buffers[0];
1072 bi1 = buffers[1];
1073
1074 to_free[0] = fi0 = to_tx[0];
1075 to_tx[0] = bi0;
1076 to_free += fi0 != 0;
1077
1078 to_free[0] = fi1 = to_tx[1];
1079 to_tx[1] = bi1;
1080 to_free += fi1 != 0;
1081
1082 buffers += 2;
1083 n_left -= 2;
1084 to_tx += 2;
1085
1086 b0 = vlib_get_buffer (vm, bi0);
1087 b1 = vlib_get_buffer (vm, bi1);
1088
1089 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1090 is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1091
1092 len0 = b0->current_length;
1093 len1 = b1->current_length;
1094
1095 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1096 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
1097
Damjan Marion8f499362018-10-22 13:07:02 +02001098 d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1099 d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001100
1101 d[0].n_bytes_this_buffer = len0;
1102 d[1].n_bytes_this_buffer = len1;
1103
Damjan Marion00a9dca2016-08-17 17:05:46 +02001104 d[0].status0 =
1105 template_status | (is_eop0 <<
1106 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
1107 d[1].status0 =
1108 template_status | (is_eop1 <<
1109 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001110
1111 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001112 d_sop[0].status1 =
1113 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001114 d += 1;
1115 d_sop = is_eop0 ? d : d_sop;
1116
1117 is_sop = is_eop0;
1118
1119 len_sop = (is_sop ? 0 : len_sop) + len1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001120 d_sop[0].status1 =
1121 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001122 d += 1;
1123 d_sop = is_eop1 ? d : d_sop;
1124
1125 is_sop = is_eop1;
1126 }
1127
1128 while (n_left > 0)
1129 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001130 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001131 u32 bi0, fi0, len0;
1132 u8 is_eop0;
1133
1134 bi0 = buffers[0];
1135
1136 to_free[0] = fi0 = to_tx[0];
1137 to_tx[0] = bi0;
1138 to_free += fi0 != 0;
1139
1140 buffers += 1;
1141 n_left -= 1;
1142 to_tx += 1;
1143
1144 b0 = vlib_get_buffer (vm, bi0);
1145
1146 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1147
1148 len0 = b0->current_length;
1149
1150 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1151
Damjan Marion8f499362018-10-22 13:07:02 +02001152 d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001153 d[0].n_bytes_this_buffer = len0;
1154
Damjan Marion00a9dca2016-08-17 17:05:46 +02001155 d[0].status0 =
1156 template_status | (is_eop0 <<
1157 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001158
1159 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001160 d_sop[0].status1 =
1161 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001162 d += 1;
1163 d_sop = is_eop0 ? d : d_sop;
1164
1165 is_sop = is_eop0;
1166 }
1167
1168 if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1169 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001170 to_tx =
1171 vec_elt_at_index (dq->descriptor_buffer_indices,
1172 start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001173 ixge_tx_trace (xm, xd, dq, tx_state,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001174 &dq->descriptors[start_descriptor_index].tx, to_tx,
Damjan Marionb4d89272016-05-12 22:14:45 +02001175 n_descriptors);
1176 }
1177
Damjan Marion00a9dca2016-08-17 17:05:46 +02001178 _vec_len (xm->tx_buffers_pending_free) =
1179 to_free - xm->tx_buffers_pending_free;
Damjan Marionb4d89272016-05-12 22:14:45 +02001180
1181 /* When we are done d_sop can point to end of ring. Wrap it if so. */
1182 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001183 ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001184
1185 ASSERT (d_sop - d_start <= dq->n_descriptors);
1186 d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1187 }
1188
1189 tx_state->is_start_of_packet = is_sop;
1190 tx_state->start_of_packet_descriptor = d_sop;
1191 tx_state->n_bytes_in_packet = len_sop;
1192
1193 return n_descriptors;
1194}
1195
1196static uword
1197ixge_interface_tx (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001198 vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02001199{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001200 ixge_main_t *xm = &ixge_main;
1201 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1202 ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
1203 ixge_dma_queue_t *dq;
1204 u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001205 u32 queue_index = 0; /* fixme parameter */
1206 ixge_tx_state_t tx_state;
1207
1208 tx_state.node = node;
1209 tx_state.is_start_of_packet = 1;
1210 tx_state.start_of_packet_descriptor = 0;
1211 tx_state.n_bytes_in_packet = 0;
1212
1213 from = vlib_frame_vector_args (f);
1214
1215 dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1216
1217 dq->head_index = dq->tx.head_index_write_back[0];
1218
1219 /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1220 n_left_tx = dq->n_descriptors - 1;
1221 n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1222
1223 _vec_len (xm->tx_buffers_pending_free) = 0;
1224
1225 n_descriptors_to_tx = f->n_vectors;
1226 n_tail_drop = 0;
1227 if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1228 {
1229 i32 i, n_ok, i_eop, i_sop;
1230
1231 i_sop = i_eop = ~0;
1232 for (i = n_left_tx - 1; i >= 0; i--)
1233 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001234 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1235 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marionb4d89272016-05-12 22:14:45 +02001236 {
1237 if (i_sop != ~0 && i_eop != ~0)
1238 break;
1239 i_eop = i;
1240 i_sop = i + 1;
1241 }
1242 }
1243 if (i == 0)
1244 n_ok = 0;
1245 else
1246 n_ok = i_eop + 1;
1247
1248 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001249 ELOG_TYPE_DECLARE (e) =
1250 {
1251 .function = (char *) __FUNCTION__,.format =
1252 "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1253 "i2i2i2i2",};
1254 struct
1255 {
1256 u16 instance, to_tx, head, tail;
1257 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001258 ed = ELOG_DATA (&vm->elog_main, e);
1259 ed->instance = xd->device_index;
1260 ed->to_tx = n_descriptors_to_tx;
1261 ed->head = dq->head_index;
1262 ed->tail = dq->tail_index;
1263 }
1264
1265 if (n_ok < n_descriptors_to_tx)
1266 {
1267 n_tail_drop = n_descriptors_to_tx - n_ok;
1268 vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001269 vlib_error_count (vm, ixge_input_node.index,
1270 IXGE_ERROR_tx_full_drops, n_tail_drop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001271 }
1272
1273 n_descriptors_to_tx = n_ok;
1274 }
1275
1276 dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1277
1278 /* Process from tail to end of descriptor ring. */
1279 if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1280 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001281 u32 n =
1282 clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
Damjan Marionb4d89272016-05-12 22:14:45 +02001283 n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1284 from += n;
1285 n_descriptors_to_tx -= n;
1286 dq->tail_index += n;
1287 ASSERT (dq->tail_index <= dq->n_descriptors);
1288 if (dq->tail_index == dq->n_descriptors)
1289 dq->tail_index = 0;
1290 }
1291
1292 if (n_descriptors_to_tx > 0)
1293 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001294 u32 n =
1295 ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
Damjan Marionb4d89272016-05-12 22:14:45 +02001296 from += n;
1297 ASSERT (n == n_descriptors_to_tx);
1298 dq->tail_index += n;
1299 ASSERT (dq->tail_index <= dq->n_descriptors);
1300 if (dq->tail_index == dq->n_descriptors)
1301 dq->tail_index = 0;
1302 }
1303
1304 /* We should only get full packets. */
1305 ASSERT (tx_state.is_start_of_packet);
1306
1307 /* Report status when last descriptor is done. */
1308 {
1309 u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001310 ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001311 d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
1312 }
1313
1314 /* Give new descriptors to hardware. */
1315 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001316 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001317
1318 CLIB_MEMORY_BARRIER ();
1319
1320 dr->tail_index = dq->tail_index;
1321 }
1322
1323 /* Free any buffers that are done. */
1324 {
1325 u32 n = _vec_len (xm->tx_buffers_pending_free);
1326 if (n > 0)
1327 {
1328 vlib_buffer_free_no_next (vm, xm->tx_buffers_pending_free, n);
1329 _vec_len (xm->tx_buffers_pending_free) = 0;
1330 ASSERT (dq->tx.n_buffers_on_ring >= n);
1331 dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1332 }
1333 }
1334
1335 return f->n_vectors;
1336}
1337
1338static uword
1339ixge_rx_queue_no_wrap (ixge_main_t * xm,
1340 ixge_device_t * xd,
1341 ixge_dma_queue_t * dq,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001342 u32 start_descriptor_index, u32 n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +02001343{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001344 vlib_main_t *vm = xm->vlib_main;
1345 vlib_node_runtime_t *node = dq->rx.node;
1346 ixge_descriptor_t *d;
1347 static ixge_descriptor_t *d_trace_save;
1348 static u32 *d_trace_buffers;
Damjan Marionb4d89272016-05-12 22:14:45 +02001349 u32 n_descriptors_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001350 u32 *to_rx =
1351 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1352 u32 *to_add;
Damjan Marionb4d89272016-05-12 22:14:45 +02001353 u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1354 u32 bi_last = dq->rx.saved_last_buffer_index;
1355 u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1356 u32 is_sop = dq->rx.is_start_of_packet;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001357 u32 next_index, n_left_to_next, *to_next;
Damjan Marionb4d89272016-05-12 22:14:45 +02001358 u32 n_packets = 0;
1359 u32 n_bytes = 0;
1360 u32 n_trace = vlib_get_trace_count (vm, node);
Dave Barach11fb09e2020-08-06 12:10:09 -04001361 vlib_buffer_t *b_last, b_placeholder;
Damjan Marionb4d89272016-05-12 22:14:45 +02001362
1363 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1364 d = &dq->descriptors[start_descriptor_index];
1365
Dave Barach11fb09e2020-08-06 12:10:09 -04001366 b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_placeholder;
Damjan Marionb4d89272016-05-12 22:14:45 +02001367 next_index = dq->rx.next_index;
1368
1369 if (n_trace > 0)
1370 {
1371 u32 n = clib_min (n_trace, n_descriptors);
1372 if (d_trace_save)
1373 {
1374 _vec_len (d_trace_save) = 0;
1375 _vec_len (d_trace_buffers) = 0;
1376 }
1377 vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1378 vec_add (d_trace_buffers, to_rx, n);
1379 }
1380
1381 {
1382 uword l = vec_len (xm->rx_buffers_to_add);
1383
1384 if (l < n_descriptors_left)
1385 {
1386 u32 n_to_alloc = 2 * dq->n_descriptors - l;
1387 u32 n_allocated;
1388
1389 vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1390
1391 _vec_len (xm->rx_buffers_to_add) = l;
Damjan Marioncef87f12017-10-05 15:32:41 +02001392 n_allocated =
1393 vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
Damjan Marionb4d89272016-05-12 22:14:45 +02001394 _vec_len (xm->rx_buffers_to_add) += n_allocated;
1395
Damjan Marion00a9dca2016-08-17 17:05:46 +02001396 /* Handle transient allocation failure */
1397 if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
Damjan Marionb4d89272016-05-12 22:14:45 +02001398 {
1399 if (n_allocated == 0)
1400 vlib_error_count (vm, ixge_input_node.index,
1401 IXGE_ERROR_rx_alloc_no_physmem, 1);
1402 else
1403 vlib_error_count (vm, ixge_input_node.index,
1404 IXGE_ERROR_rx_alloc_fail, 1);
1405
1406 n_descriptors_left = l + n_allocated;
1407 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001408 n_descriptors = n_descriptors_left;
Damjan Marionb4d89272016-05-12 22:14:45 +02001409 }
1410
1411 /* Add buffers from end of vector going backwards. */
1412 to_add = vec_end (xm->rx_buffers_to_add) - 1;
1413 }
1414
1415 while (n_descriptors_left > 0)
1416 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001417 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Damjan Marionb4d89272016-05-12 22:14:45 +02001418
1419 while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1420 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001421 vlib_buffer_t *b0, *b1;
Damjan Marion8f499362018-10-22 13:07:02 +02001422 vlib_buffer_t *f0, *f1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001423 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1424 u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1425 u8 is_eop0, error0, next0;
1426 u8 is_eop1, error1, next1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001427 ixge_descriptor_t d0, d1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001428
1429 vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1430 vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1431
Damjan Marion00a9dca2016-08-17 17:05:46 +02001432 CLIB_PREFETCH (d + 2, 32, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001433
Damjan Marion00a9dca2016-08-17 17:05:46 +02001434 d0.as_u32x4 = d[0].as_u32x4;
1435 d1.as_u32x4 = d[1].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001436
1437 s20 = d0.rx_from_hw.status[2];
1438 s21 = d1.rx_from_hw.status[2];
1439
1440 s00 = d0.rx_from_hw.status[0];
1441 s01 = d1.rx_from_hw.status[0];
1442
Damjan Marion00a9dca2016-08-17 17:05:46 +02001443 if (!
1444 ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001445 goto found_hw_owned_descriptor_x2;
1446
1447 bi0 = to_rx[0];
1448 bi1 = to_rx[1];
1449
1450 ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1451 fi0 = to_add[0];
1452 fi1 = to_add[-1];
1453
1454 to_rx[0] = fi0;
1455 to_rx[1] = fi1;
1456 to_rx += 2;
1457 to_add -= 2;
1458
Damjan Marioncef87f12017-10-05 15:32:41 +02001459#if 0
Steven899a84b2018-01-29 20:09:09 -08001460 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0));
1461 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi1));
1462 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0));
1463 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi1));
Damjan Marioncef87f12017-10-05 15:32:41 +02001464#endif
Damjan Marionb4d89272016-05-12 22:14:45 +02001465
1466 b0 = vlib_get_buffer (vm, bi0);
1467 b1 = vlib_get_buffer (vm, bi1);
1468
Damjan Marionb4d89272016-05-12 22:14:45 +02001469 CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
1470 CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
1471
1472 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1473 is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1474
1475 ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1476 &next0, &error0, &flags0,
1477 &next1, &error1, &flags1);
1478
1479 next0 = is_sop ? next0 : next_index_sop;
1480 next1 = is_eop0 ? next1 : next0;
1481 next_index_sop = next1;
1482
1483 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1484 b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1485
1486 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1487 vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001488 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1489 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001490
1491 b0->error = node->errors[error0];
1492 b1->error = node->errors[error1];
1493
1494 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1495 len1 = d1.rx_from_hw.n_packet_bytes_this_descriptor;
1496 n_bytes += len0 + len1;
1497 n_packets += is_eop0 + is_eop1;
1498
1499 /* Give new buffers to hardware. */
Damjan Marion8f499362018-10-22 13:07:02 +02001500 f0 = vlib_get_buffer (vm, fi0);
1501 f1 = vlib_get_buffer (vm, fi1);
1502 d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0);
1503 d1.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001504 d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
1505 d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001506 d[0].as_u32x4 = d0.as_u32x4;
1507 d[1].as_u32x4 = d1.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001508
1509 d += 2;
1510 n_descriptors_left -= 2;
1511
1512 /* Point to either l2 or l3 header depending on next. */
1513 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001514 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001515 l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001516 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001517
1518 b0->current_length = len0 - l3_offset0;
1519 b1->current_length = len1 - l3_offset1;
1520 b0->current_data = l3_offset0;
1521 b1->current_data = l3_offset1;
1522
1523 b_last->next_buffer = is_sop ? ~0 : bi0;
1524 b0->next_buffer = is_eop0 ? ~0 : bi1;
1525 bi_last = bi1;
1526 b_last = b1;
1527
1528 if (CLIB_DEBUG > 0)
1529 {
1530 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1531 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1532
1533 if (is_eop0)
1534 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001535 u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1536 /* follow_buffer_next */ 1);
1537 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001538 }
1539 if (is_eop1)
1540 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001541 u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1542 /* follow_buffer_next */ 1);
1543 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001544 }
1545 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001546 if (0) /* "Dave" version */
1547 {
1548 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1549 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001550
Damjan Marion00a9dca2016-08-17 17:05:46 +02001551 if (is_eop0)
1552 {
1553 to_next[0] = bi_sop0;
1554 to_next++;
1555 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001556
Damjan Marion00a9dca2016-08-17 17:05:46 +02001557 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1558 to_next, n_left_to_next,
1559 bi_sop0, next0);
1560 }
1561 if (is_eop1)
1562 {
1563 to_next[0] = bi_sop1;
1564 to_next++;
1565 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001566
Damjan Marion00a9dca2016-08-17 17:05:46 +02001567 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1568 to_next, n_left_to_next,
1569 bi_sop1, next1);
1570 }
1571 is_sop = is_eop1;
1572 bi_sop = bi_sop1;
1573 }
1574 if (1) /* "Eliot" version */
1575 {
1576 /* Speculatively enqueue to cached next. */
1577 u8 saved_is_sop = is_sop;
1578 u32 bi_sop_save = bi_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001579
Damjan Marion00a9dca2016-08-17 17:05:46 +02001580 bi_sop = saved_is_sop ? bi0 : bi_sop;
1581 to_next[0] = bi_sop;
1582 to_next += is_eop0;
1583 n_left_to_next -= is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001584
Damjan Marion00a9dca2016-08-17 17:05:46 +02001585 bi_sop = is_eop0 ? bi1 : bi_sop;
1586 to_next[0] = bi_sop;
1587 to_next += is_eop1;
1588 n_left_to_next -= is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001589
Damjan Marion00a9dca2016-08-17 17:05:46 +02001590 is_sop = is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001591
Damjan Marion00a9dca2016-08-17 17:05:46 +02001592 if (PREDICT_FALSE
1593 (!(next0 == next_index && next1 == next_index)))
1594 {
1595 /* Undo speculation. */
1596 to_next -= is_eop0 + is_eop1;
1597 n_left_to_next += is_eop0 + is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001598
Damjan Marion00a9dca2016-08-17 17:05:46 +02001599 /* Re-do both descriptors being careful about where we enqueue. */
1600 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1601 if (is_eop0)
1602 {
1603 if (next0 != next_index)
1604 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1605 else
1606 {
1607 to_next[0] = bi_sop;
1608 to_next += 1;
1609 n_left_to_next -= 1;
1610 }
1611 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001612
Damjan Marion00a9dca2016-08-17 17:05:46 +02001613 bi_sop = is_eop0 ? bi1 : bi_sop;
1614 if (is_eop1)
1615 {
1616 if (next1 != next_index)
1617 vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1618 else
1619 {
1620 to_next[0] = bi_sop;
1621 to_next += 1;
1622 n_left_to_next -= 1;
1623 }
1624 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001625
Damjan Marion00a9dca2016-08-17 17:05:46 +02001626 /* Switch cached next index when next for both packets is the same. */
1627 if (is_eop0 && is_eop1 && next0 == next1)
1628 {
1629 vlib_put_next_frame (vm, node, next_index,
1630 n_left_to_next);
1631 next_index = next0;
1632 vlib_get_next_frame (vm, node, next_index,
1633 to_next, n_left_to_next);
1634 }
1635 }
1636 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001637 }
1638
Damjan Marion00a9dca2016-08-17 17:05:46 +02001639 /* Bail out of dual loop and proceed with single loop. */
Damjan Marionb4d89272016-05-12 22:14:45 +02001640 found_hw_owned_descriptor_x2:
1641
1642 while (n_descriptors_left > 0 && n_left_to_next > 0)
1643 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001644 vlib_buffer_t *b0;
Damjan Marion8f499362018-10-22 13:07:02 +02001645 vlib_buffer_t *f0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001646 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1647 u8 is_eop0, error0, next0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001648 ixge_descriptor_t d0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001649
Damjan Marion00a9dca2016-08-17 17:05:46 +02001650 d0.as_u32x4 = d[0].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001651
1652 s20 = d0.rx_from_hw.status[2];
1653 s00 = d0.rx_from_hw.status[0];
1654
Damjan Marion00a9dca2016-08-17 17:05:46 +02001655 if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001656 goto found_hw_owned_descriptor_x1;
1657
1658 bi0 = to_rx[0];
1659 ASSERT (to_add >= xm->rx_buffers_to_add);
1660 fi0 = to_add[0];
1661
1662 to_rx[0] = fi0;
1663 to_rx += 1;
1664 to_add -= 1;
1665
Damjan Marioncef87f12017-10-05 15:32:41 +02001666#if 0
Steven899a84b2018-01-29 20:09:09 -08001667 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0));
1668 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0));
Damjan Marioncef87f12017-10-05 15:32:41 +02001669#endif
Damjan Marionb4d89272016-05-12 22:14:45 +02001670
1671 b0 = vlib_get_buffer (vm, bi0);
1672
Damjan Marionb4d89272016-05-12 22:14:45 +02001673 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1674 ixge_rx_next_and_error_from_status_x1
Damjan Marion00a9dca2016-08-17 17:05:46 +02001675 (xd, s00, s20, &next0, &error0, &flags0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001676
1677 next0 = is_sop ? next0 : next_index_sop;
1678 next_index_sop = next0;
1679
1680 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1681
1682 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001683 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001684
1685 b0->error = node->errors[error0];
1686
1687 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1688 n_bytes += len0;
1689 n_packets += is_eop0;
1690
1691 /* Give new buffer to hardware. */
Damjan Marion8f499362018-10-22 13:07:02 +02001692 f0 = vlib_get_buffer (vm, fi0);
1693 d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001694 d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001695 d[0].as_u32x4 = d0.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001696
1697 d += 1;
1698 n_descriptors_left -= 1;
1699
1700 /* Point to either l2 or l3 header depending on next. */
1701 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001702 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001703 b0->current_length = len0 - l3_offset0;
1704 b0->current_data = l3_offset0;
1705
1706 b_last->next_buffer = is_sop ? ~0 : bi0;
1707 bi_last = bi0;
1708 b_last = b0;
1709
1710 bi_sop = is_sop ? bi0 : bi_sop;
1711
1712 if (CLIB_DEBUG > 0 && is_eop0)
1713 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001714 u8 *msg =
1715 vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1716 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001717 }
1718
Damjan Marion00a9dca2016-08-17 17:05:46 +02001719 if (0) /* "Dave" version */
1720 {
1721 if (is_eop0)
1722 {
1723 to_next[0] = bi_sop;
1724 to_next++;
1725 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001726
Damjan Marion00a9dca2016-08-17 17:05:46 +02001727 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1728 to_next, n_left_to_next,
1729 bi_sop, next0);
1730 }
1731 }
1732 if (1) /* "Eliot" version */
1733 {
1734 if (PREDICT_TRUE (next0 == next_index))
1735 {
1736 to_next[0] = bi_sop;
1737 to_next += is_eop0;
1738 n_left_to_next -= is_eop0;
1739 }
1740 else
1741 {
1742 if (next0 != next_index && is_eop0)
1743 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001744
Damjan Marion00a9dca2016-08-17 17:05:46 +02001745 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1746 next_index = next0;
1747 vlib_get_next_frame (vm, node, next_index,
1748 to_next, n_left_to_next);
1749 }
1750 }
1751 is_sop = is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001752 }
1753 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1754 }
1755
Damjan Marion00a9dca2016-08-17 17:05:46 +02001756found_hw_owned_descriptor_x1:
Damjan Marionb4d89272016-05-12 22:14:45 +02001757 if (n_descriptors_left > 0)
1758 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1759
1760 _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1761
1762 {
1763 u32 n_done = n_descriptors - n_descriptors_left;
1764
1765 if (n_trace > 0 && n_done > 0)
1766 {
1767 u32 n = clib_min (n_trace, n_done);
1768 ixge_rx_trace (xm, xd, dq,
1769 d_trace_save,
1770 d_trace_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001771 &dq->descriptors[start_descriptor_index], n);
Damjan Marionb4d89272016-05-12 22:14:45 +02001772 vlib_set_trace_count (vm, node, n_trace - n);
1773 }
1774 if (d_trace_save)
1775 {
1776 _vec_len (d_trace_save) = 0;
1777 _vec_len (d_trace_buffers) = 0;
1778 }
1779
1780 /* Don't keep a reference to b_last if we don't have to.
1781 Otherwise we can over-write a next_buffer pointer after already haven
1782 enqueued a packet. */
1783 if (is_sop)
1784 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001785 b_last->next_buffer = ~0;
1786 bi_last = ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001787 }
1788
1789 dq->rx.n_descriptors_done_this_call = n_done;
1790 dq->rx.n_descriptors_done_total += n_done;
1791 dq->rx.is_start_of_packet = is_sop;
1792 dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1793 dq->rx.saved_last_buffer_index = bi_last;
1794 dq->rx.saved_start_of_packet_next_index = next_index_sop;
1795 dq->rx.next_index = next_index;
1796 dq->rx.n_bytes += n_bytes;
1797
1798 return n_packets;
1799 }
1800}
1801
1802static uword
1803ixge_rx_queue (ixge_main_t * xm,
1804 ixge_device_t * xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001805 vlib_node_runtime_t * node, u32 queue_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02001806{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001807 ixge_dma_queue_t *dq =
1808 vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1809 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001810 uword n_packets = 0;
1811 u32 hw_head_index, sw_head_index;
1812
1813 /* One time initialization. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02001814 if (!dq->rx.node)
Damjan Marionb4d89272016-05-12 22:14:45 +02001815 {
1816 dq->rx.node = node;
1817 dq->rx.is_start_of_packet = 1;
1818 dq->rx.saved_start_of_packet_buffer_index = ~0;
1819 dq->rx.saved_last_buffer_index = ~0;
1820 }
1821
1822 dq->rx.next_index = node->cached_next_index;
1823
1824 dq->rx.n_descriptors_done_total = 0;
1825 dq->rx.n_descriptors_done_this_call = 0;
1826 dq->rx.n_bytes = 0;
1827
1828 /* Fetch head from hardware and compare to where we think we are. */
1829 hw_head_index = dr->head_index;
1830 sw_head_index = dq->head_index;
1831
1832 if (hw_head_index == sw_head_index)
1833 goto done;
1834
1835 if (hw_head_index < sw_head_index)
1836 {
1837 u32 n_tried = dq->n_descriptors - sw_head_index;
1838 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001839 sw_head_index =
1840 ixge_ring_add (dq, sw_head_index,
1841 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001842
1843 if (dq->rx.n_descriptors_done_this_call != n_tried)
1844 goto done;
1845 }
1846 if (hw_head_index >= sw_head_index)
1847 {
1848 u32 n_tried = hw_head_index - sw_head_index;
1849 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001850 sw_head_index =
1851 ixge_ring_add (dq, sw_head_index,
1852 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001853 }
1854
Damjan Marion00a9dca2016-08-17 17:05:46 +02001855done:
Damjan Marionb4d89272016-05-12 22:14:45 +02001856 dq->head_index = sw_head_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001857 dq->tail_index =
1858 ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
Damjan Marionb4d89272016-05-12 22:14:45 +02001859
1860 /* Give tail back to hardware. */
1861 CLIB_MEMORY_BARRIER ();
1862
1863 dr->tail_index = dq->tail_index;
1864
Damjan Marion00a9dca2016-08-17 17:05:46 +02001865 vlib_increment_combined_counter (vnet_main.
1866 interface_main.combined_sw_if_counters +
1867 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001868 0 /* thread_index */ ,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001869 xd->vlib_sw_if_index, n_packets,
Damjan Marionb4d89272016-05-12 22:14:45 +02001870 dq->rx.n_bytes);
1871
1872 return n_packets;
1873}
1874
Damjan Marion00a9dca2016-08-17 17:05:46 +02001875static void
1876ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
Damjan Marionb4d89272016-05-12 22:14:45 +02001877{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001878 vlib_main_t *vm = xm->vlib_main;
1879 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02001880
1881 if (i != 20)
1882 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001883 ELOG_TYPE_DECLARE (e) =
1884 {
1885 .function = (char *) __FUNCTION__,.format =
1886 "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1887 16,.enum_strings =
1888 {
1889 "flow director",
1890 "rx miss",
1891 "pci exception",
1892 "mailbox",
1893 "link status change",
1894 "linksec key exchange",
1895 "manageability event",
1896 "reserved23",
1897 "sdp0",
1898 "sdp1",
1899 "sdp2",
1900 "sdp3",
1901 "ecc", "descriptor handler error", "tcp timer", "other",},};
1902 struct
1903 {
1904 u8 instance;
1905 u8 index;
1906 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001907 ed = ELOG_DATA (&vm->elog_main, e);
1908 ed->instance = xd->device_index;
1909 ed->index = i - 16;
1910 }
1911 else
1912 {
1913 u32 v = r->xge_mac.link_status;
1914 uword is_up = (v & (1 << 30)) != 0;
1915
Damjan Marion00a9dca2016-08-17 17:05:46 +02001916 ELOG_TYPE_DECLARE (e) =
1917 {
1918 .function = (char *) __FUNCTION__,.format =
1919 "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1920 struct
1921 {
1922 u32 instance, link_status;
1923 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001924 ed = ELOG_DATA (&vm->elog_main, e);
1925 ed->instance = xd->device_index;
1926 ed->link_status = v;
1927 xd->link_status_at_last_link_change = v;
1928
1929 vlib_process_signal_event (vm, ixge_process_node.index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001930 EVENT_SET_FLAGS,
1931 ((is_up << 31) | xd->vlib_hw_if_index));
Damjan Marionb4d89272016-05-12 22:14:45 +02001932 }
1933}
1934
1935always_inline u32
1936clean_block (u32 * b, u32 * t, u32 n_left)
1937{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001938 u32 *t0 = t;
Damjan Marionb4d89272016-05-12 22:14:45 +02001939
1940 while (n_left >= 4)
1941 {
1942 u32 bi0, bi1, bi2, bi3;
1943
1944 t[0] = bi0 = b[0];
1945 b[0] = 0;
1946 t += bi0 != 0;
1947
1948 t[0] = bi1 = b[1];
1949 b[1] = 0;
1950 t += bi1 != 0;
1951
1952 t[0] = bi2 = b[2];
1953 b[2] = 0;
1954 t += bi2 != 0;
1955
1956 t[0] = bi3 = b[3];
1957 b[3] = 0;
1958 t += bi3 != 0;
1959
1960 b += 4;
1961 n_left -= 4;
1962 }
1963
1964 while (n_left > 0)
1965 {
1966 u32 bi0;
1967
1968 t[0] = bi0 = b[0];
1969 b[0] = 0;
1970 t += bi0 != 0;
1971 b += 1;
1972 n_left -= 1;
1973 }
1974
1975 return t - t0;
1976}
1977
1978static void
1979ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1980{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001981 vlib_main_t *vm = xm->vlib_main;
1982 ixge_dma_queue_t *dq =
1983 vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1984 u32 n_clean, *b, *t, *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001985 i32 n_hw_owned_descriptors;
1986 i32 first_to_clean, last_to_clean;
1987 u64 hwbp_race = 0;
1988
1989 /* Handle case where head write back pointer update
1990 * arrives after the interrupt during high PCI bus loads.
1991 */
1992 while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
1993 dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
1994 {
1995 hwbp_race++;
1996 if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
1997 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001998 ELOG_TYPE_DECLARE (e) =
1999 {
2000 .function = (char *) __FUNCTION__,.format =
2001 "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2002 = "i4i4i4i4",};
2003 struct
2004 {
2005 u32 instance, head_index, tail_index, n_buffers_on_ring;
2006 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02002007 ed = ELOG_DATA (&vm->elog_main, e);
2008 ed->instance = xd->device_index;
2009 ed->head_index = dq->head_index;
2010 ed->tail_index = dq->tail_index;
2011 ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2012 }
2013 }
2014
2015 dq->head_index = dq->tx.head_index_write_back[0];
2016 n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002017 ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002018 n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2019
2020 if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2021 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002022 ELOG_TYPE_DECLARE (e) =
2023 {
2024 .function = (char *) __FUNCTION__,.format =
2025 "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2026 = "i4i4i4i4i4",};
2027 struct
2028 {
2029 u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2030 } *ed;
2031 ed = ELOG_DATA (&vm->elog_main, e);
2032 ed->instance = xd->device_index;
2033 ed->head_index = dq->head_index;
2034 ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2035 ed->n_clean = n_clean;
2036 ed->retries = hwbp_race;
Damjan Marionb4d89272016-05-12 22:14:45 +02002037 }
2038
2039 /*
2040 * This function used to wait until hardware owned zero descriptors.
2041 * At high PPS rates, that doesn't happen until the TX ring is
2042 * completely full of descriptors which need to be cleaned up.
2043 * That, in turn, causes TX ring-full drops and/or long RX service
2044 * interruptions.
2045 */
2046 if (n_clean == 0)
2047 return;
2048
2049 /* Clean the n_clean descriptors prior to the reported hardware head */
2050 last_to_clean = dq->head_index - 1;
2051 last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002052 last_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002053
2054 first_to_clean = (last_to_clean) - (n_clean - 1);
2055 first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002056 first_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002057
2058 vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2059 t0 = t = xm->tx_buffers_pending_free;
2060 b = dq->descriptor_buffer_indices + first_to_clean;
2061
2062 /* Wrap case: clean from first to end, then start to last */
2063 if (first_to_clean > last_to_clean)
2064 {
2065 t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2066 first_to_clean = 0;
2067 b = dq->descriptor_buffer_indices;
2068 }
2069
2070 /* Typical case: clean from first to last */
2071 if (first_to_clean <= last_to_clean)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002072 t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
Damjan Marionb4d89272016-05-12 22:14:45 +02002073
2074 if (t > t0)
2075 {
2076 u32 n = t - t0;
2077 vlib_buffer_free_no_next (vm, t0, n);
2078 ASSERT (dq->tx.n_buffers_on_ring >= n);
2079 dq->tx.n_buffers_on_ring -= n;
2080 _vec_len (xm->tx_buffers_pending_free) = 0;
2081 }
2082}
2083
2084/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002085always_inline uword
2086ixge_interrupt_is_rx_queue (uword i)
2087{
2088 return i < 8;
2089}
Damjan Marionb4d89272016-05-12 22:14:45 +02002090
Damjan Marion00a9dca2016-08-17 17:05:46 +02002091always_inline uword
2092ixge_interrupt_is_tx_queue (uword i)
2093{
2094 return i >= 8 && i < 16;
2095}
Damjan Marionb4d89272016-05-12 22:14:45 +02002096
Damjan Marion00a9dca2016-08-17 17:05:46 +02002097always_inline uword
2098ixge_tx_queue_to_interrupt (uword i)
2099{
2100 return 8 + i;
2101}
Damjan Marionb4d89272016-05-12 22:14:45 +02002102
Damjan Marion00a9dca2016-08-17 17:05:46 +02002103always_inline uword
2104ixge_rx_queue_to_interrupt (uword i)
2105{
2106 return 0 + i;
2107}
Damjan Marionb4d89272016-05-12 22:14:45 +02002108
Damjan Marion00a9dca2016-08-17 17:05:46 +02002109always_inline uword
2110ixge_interrupt_rx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002111{
2112 ASSERT (ixge_interrupt_is_rx_queue (i));
2113 return i - 0;
2114}
2115
Damjan Marion00a9dca2016-08-17 17:05:46 +02002116always_inline uword
2117ixge_interrupt_tx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002118{
2119 ASSERT (ixge_interrupt_is_tx_queue (i));
2120 return i - 8;
2121}
2122
2123static uword
2124ixge_device_input (ixge_main_t * xm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002125 ixge_device_t * xd, vlib_node_runtime_t * node)
Damjan Marionb4d89272016-05-12 22:14:45 +02002126{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002127 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002128 u32 i, s;
2129 uword n_rx_packets = 0;
2130
2131 s = r->interrupt.status_write_1_to_set;
2132 if (s)
2133 r->interrupt.status_write_1_to_clear = s;
2134
Damjan Marion00a9dca2016-08-17 17:05:46 +02002135 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002136 foreach_set_bit (i, s, ({
2137 if (ixge_interrupt_is_rx_queue (i))
2138 n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2139
2140 else if (ixge_interrupt_is_tx_queue (i))
2141 ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2142
2143 else
2144 ixge_interrupt (xm, xd, i);
2145 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002146 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002147
2148 return n_rx_packets;
2149}
2150
2151static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002152ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002153{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002154 ixge_main_t *xm = &ixge_main;
2155 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002156 uword n_rx_packets = 0;
2157
2158 if (node->state == VLIB_NODE_STATE_INTERRUPT)
2159 {
2160 uword i;
2161
2162 /* Loop over devices with interrupts. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002163 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002164 foreach_set_bit (i, node->runtime_data[0], ({
2165 xd = vec_elt_at_index (xm->devices, i);
2166 n_rx_packets += ixge_device_input (xm, xd, node);
2167
2168 /* Re-enable interrupts since we're going to stay in interrupt mode. */
2169 if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2170 xd->regs->interrupt.enable_write_1_to_set = ~0;
2171 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002172 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002173
2174 /* Clear mask of devices with pending interrupts. */
2175 node->runtime_data[0] = 0;
2176 }
2177 else
2178 {
2179 /* Poll all devices for input/interrupts. */
2180 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002181 {
2182 n_rx_packets += ixge_device_input (xm, xd, node);
Damjan Marionb4d89272016-05-12 22:14:45 +02002183
Damjan Marion00a9dca2016-08-17 17:05:46 +02002184 /* Re-enable interrupts when switching out of polling mode. */
2185 if (node->flags &
2186 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
2187 xd->regs->interrupt.enable_write_1_to_set = ~0;
2188 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002189 }
2190
2191 return n_rx_packets;
2192}
2193
Damjan Marion00a9dca2016-08-17 17:05:46 +02002194static char *ixge_error_strings[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002195#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002196 foreach_ixge_error
Damjan Marionb4d89272016-05-12 22:14:45 +02002197#undef _
2198};
2199
Damjan Marion00a9dca2016-08-17 17:05:46 +02002200/* *INDENT-OFF* */
Damjan Marion98897e22016-06-17 16:42:02 +02002201VLIB_REGISTER_NODE (ixge_input_node, static) = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002202 .function = ixge_input,
2203 .type = VLIB_NODE_TYPE_INPUT,
2204 .name = "ixge-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02002205 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Damjan Marionb4d89272016-05-12 22:14:45 +02002206
2207 /* Will be enabled if/when hardware is detected. */
2208 .state = VLIB_NODE_STATE_DISABLED,
2209
2210 .format_buffer = format_ethernet_header_with_length,
2211 .format_trace = format_ixge_rx_dma_trace,
2212
2213 .n_errors = IXGE_N_ERROR,
2214 .error_strings = ixge_error_strings,
2215
2216 .n_next_nodes = IXGE_RX_N_NEXT,
2217 .next_nodes = {
2218 [IXGE_RX_NEXT_DROP] = "error-drop",
2219 [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2220 [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2221 [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2222 },
2223};
2224
Damjan Marion00a9dca2016-08-17 17:05:46 +02002225/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002226
Damjan Marion00a9dca2016-08-17 17:05:46 +02002227static u8 *
2228format_ixge_device_name (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002229{
Damjan Marion23227982018-10-22 13:38:57 +02002230 vlib_main_t *vm = vlib_get_main ();
Damjan Marionb4d89272016-05-12 22:14:45 +02002231 u32 i = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002232 ixge_main_t *xm = &ixge_main;
2233 ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
Damjan Marion23227982018-10-22 13:38:57 +02002234 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
Damjan Marioncef87f12017-10-05 15:32:41 +02002235 return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2236 addr->domain, addr->bus, addr->slot, addr->function);
Damjan Marionb4d89272016-05-12 22:14:45 +02002237}
2238
2239#define IXGE_COUNTER_IS_64_BIT (1 << 0)
2240#define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2241
2242static u8 ixge_counter_flags[] = {
2243#define _(a,f) 0,
2244#define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2245 foreach_ixge_counter
2246#undef _
2247#undef _64
2248};
2249
Damjan Marion00a9dca2016-08-17 17:05:46 +02002250static void
2251ixge_update_counters (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +02002252{
2253 /* Byte offset for counter registers. */
2254 static u32 reg_offsets[] = {
2255#define _(a,f) (a) / sizeof (u32),
2256#define _64(a,f) _(a,f)
2257 foreach_ixge_counter
2258#undef _
2259#undef _64
2260 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002261 volatile u32 *r = (volatile u32 *) xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002262 int i;
2263
2264 for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2265 {
2266 u32 o = reg_offsets[i];
2267 xd->counters[i] += r[o];
2268 if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
2269 r[o] = 0;
2270 if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002271 xd->counters[i] += (u64) r[o + 1] << (u64) 32;
Damjan Marionb4d89272016-05-12 22:14:45 +02002272 }
2273}
2274
Damjan Marion00a9dca2016-08-17 17:05:46 +02002275static u8 *
2276format_ixge_device_id (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002277{
2278 u32 device_id = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002279 char *t = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002280 switch (device_id)
2281 {
2282#define _(f,n) case n: t = #f; break;
2283 foreach_ixge_pci_device_id;
2284#undef _
2285 default:
2286 t = 0;
2287 break;
2288 }
2289 if (t == 0)
2290 s = format (s, "unknown 0x%x", device_id);
2291 else
2292 s = format (s, "%s", t);
2293 return s;
2294}
2295
Damjan Marion00a9dca2016-08-17 17:05:46 +02002296static u8 *
2297format_ixge_link_status (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002298{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002299 ixge_device_t *xd = va_arg (*args, ixge_device_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +02002300 u32 v = xd->link_status_at_last_link_change;
2301
2302 s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2303
2304 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002305 char *modes[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002306 "1g", "10g parallel", "10g serial", "autoneg",
2307 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002308 char *speeds[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002309 "unknown", "100m", "1g", "10g",
2310 };
2311 s = format (s, ", mode %s, speed %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002312 modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002313 }
2314
2315 return s;
2316}
2317
Damjan Marion00a9dca2016-08-17 17:05:46 +02002318static u8 *
2319format_ixge_device (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002320{
2321 u32 dev_instance = va_arg (*args, u32);
2322 CLIB_UNUSED (int verbose) = va_arg (*args, int);
Damjan Marion23227982018-10-22 13:38:57 +02002323 vlib_main_t *vm = vlib_get_main ();
Damjan Marion00a9dca2016-08-17 17:05:46 +02002324 ixge_main_t *xm = &ixge_main;
2325 ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2326 ixge_phy_t *phy = xd->phys + xd->phy_index;
Christophe Fontained3c008d2017-10-02 18:10:54 +02002327 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +02002328
2329 ixge_update_counters (xd);
2330 xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status;
2331
2332 s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2333 format_ixge_device_id, xd->device_id,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002334 format_white_space, indent + 2, format_ixge_link_status, xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002335
2336 {
2337
Damjan Marion23227982018-10-22 13:38:57 +02002338 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2339 vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0);
Damjan Marioncef87f12017-10-05 15:32:41 +02002340
2341 if (d)
2342 s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2343 format_vlib_pci_link_speed, d);
Damjan Marionb4d89272016-05-12 22:14:45 +02002344 }
2345
2346 s = format (s, "\n%U", format_white_space, indent + 2);
2347 if (phy->mdio_address != ~0)
2348 s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
Damjan Marionc45e1902018-09-24 15:17:36 +02002349 else if (xd->sfp_eeprom.id == SFP_ID_SFP)
Damjan Marionb4d89272016-05-12 22:14:45 +02002350 s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2351 else
2352 s = format (s, "PHY not found");
2353
2354 /* FIXME */
2355 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002356 ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
2357 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002358 u32 hw_head_index = dr->head_index;
2359 u32 sw_head_index = dq->head_index;
2360 u32 nitems;
2361
2362 nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2363 s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002364 format_white_space, indent + 2, nitems, dq->n_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002365
2366 s = format (s, "\n%U%d buffers in driver rx cache",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002367 format_white_space, indent + 2,
2368 vec_len (xm->rx_buffers_to_add));
Damjan Marionb4d89272016-05-12 22:14:45 +02002369
2370 s = format (s, "\n%U%d buffers on tx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002371 format_white_space, indent + 2,
2372 xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
Damjan Marionb4d89272016-05-12 22:14:45 +02002373 }
2374 {
2375 u32 i;
2376 u64 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002377 static char *names[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002378#define _(a,f) #f,
2379#define _64(a,f) _(a,f)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002380 foreach_ixge_counter
Damjan Marionb4d89272016-05-12 22:14:45 +02002381#undef _
2382#undef _64
2383 };
2384
2385 for (i = 0; i < ARRAY_LEN (names); i++)
2386 {
2387 v = xd->counters[i] - xd->counters_last_clear[i];
2388 if (v != 0)
2389 s = format (s, "\n%U%-40U%16Ld",
2390 format_white_space, indent + 2,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002391 format_c_identifier, names[i], v);
Damjan Marionb4d89272016-05-12 22:14:45 +02002392 }
2393 }
2394
2395 return s;
2396}
2397
Damjan Marion00a9dca2016-08-17 17:05:46 +02002398static void
2399ixge_clear_hw_interface_counters (u32 instance)
Damjan Marionb4d89272016-05-12 22:14:45 +02002400{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002401 ixge_main_t *xm = &ixge_main;
2402 ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002403 ixge_update_counters (xd);
2404 memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2405}
2406
2407/*
2408 * Dynamically redirect all pkts from a specific interface
2409 * to the specified node
2410 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002411static void
2412ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
2413 u32 node_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002414{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002415 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002416 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002417 ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002418
2419 /* Shut off redirection */
2420 if (node_index == ~0)
2421 {
2422 xd->per_interface_next_index = node_index;
2423 return;
2424 }
2425
2426 xd->per_interface_next_index =
2427 vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2428}
2429
2430
Damjan Marion00a9dca2016-08-17 17:05:46 +02002431/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002432VNET_DEVICE_CLASS (ixge_device_class) = {
2433 .name = "ixge",
2434 .tx_function = ixge_interface_tx,
2435 .format_device_name = format_ixge_device_name,
2436 .format_device = format_ixge_device,
2437 .format_tx_trace = format_ixge_tx_dma_trace,
2438 .clear_counters = ixge_clear_hw_interface_counters,
2439 .admin_up_down_function = ixge_interface_admin_up_down,
2440 .rx_redirect_to_node = ixge_set_interface_next_node,
2441};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002442/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002443
Damjan Marion00a9dca2016-08-17 17:05:46 +02002444#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
Damjan Marionb4d89272016-05-12 22:14:45 +02002445
2446static clib_error_t *
2447ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
2448{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002449 ixge_main_t *xm = &ixge_main;
2450 vlib_main_t *vm = xm->vlib_main;
2451 ixge_dma_queue_t *dq;
2452 clib_error_t *error = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002453
2454 vec_validate (xd->dma_queues[rt], queue_index);
2455 dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2456
Damjan Marion00a9dca2016-08-17 17:05:46 +02002457 if (!xm->n_descriptors_per_cache_line)
2458 xm->n_descriptors_per_cache_line =
2459 CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002460
Damjan Marion00a9dca2016-08-17 17:05:46 +02002461 if (!xm->n_bytes_in_rx_buffer)
Damjan Marionb4d89272016-05-12 22:14:45 +02002462 xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
2463 xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
Damjan Marionb4d89272016-05-12 22:14:45 +02002464
Damjan Marion00a9dca2016-08-17 17:05:46 +02002465 if (!xm->n_descriptors[rt])
Damjan Marionb4d89272016-05-12 22:14:45 +02002466 xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2467
2468 dq->queue_index = queue_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002469 dq->n_descriptors =
2470 round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
Damjan Marionb4d89272016-05-12 22:14:45 +02002471 dq->head_index = dq->tail_index = 0;
2472
Damjan Marion68b4da62018-09-30 18:26:20 +02002473 dq->descriptors = vlib_physmem_alloc_aligned (vm, dq->n_descriptors *
2474 sizeof (dq->descriptors[0]),
2475 128 /* per chip spec */ );
2476 if (!dq->descriptors)
2477 return vlib_physmem_last_error (vm);
Damjan Marionb4d89272016-05-12 22:14:45 +02002478
Dave Barachb7b92992018-10-17 10:38:51 -04002479 clib_memset (dq->descriptors, 0,
2480 dq->n_descriptors * sizeof (dq->descriptors[0]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002481 vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
2482
2483 if (rt == VLIB_RX)
2484 {
2485 u32 n_alloc, i;
2486
Damjan Marioncef87f12017-10-05 15:32:41 +02002487 n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2488 vec_len (dq->descriptor_buffer_indices));
Damjan Marionb4d89272016-05-12 22:14:45 +02002489 ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2490 for (i = 0; i < n_alloc; i++)
2491 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002492 dq->descriptors[i].rx_to_hw.tail_address =
Damjan Marion8f499362018-10-22 13:07:02 +02002493 vlib_buffer_get_pa
2494 (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002495 }
2496 }
2497 else
2498 {
2499 u32 i;
2500
Damjan Marion68b4da62018-09-30 18:26:20 +02002501 dq->tx.head_index_write_back =
2502 vlib_physmem_alloc (vm, CLIB_CACHE_LINE_BYTES);
2503 if (!dq->tx.head_index_write_back)
2504 return vlib_physmem_last_error (vm);
Damjan Marionb4d89272016-05-12 22:14:45 +02002505
2506 for (i = 0; i < dq->n_descriptors; i++)
2507 dq->descriptors[i].tx = xm->tx_descriptor_template;
2508
2509 vec_validate (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2510 }
2511
2512 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002513 ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002514 u64 a;
2515
Damjan Marion68b4da62018-09-30 18:26:20 +02002516 a = vlib_physmem_get_pa (vm, dq->descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002517 dr->descriptor_address[0] = a & 0xFFFFFFFF;
2518 dr->descriptor_address[1] = a >> (u64) 32;
2519 dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2520 dq->head_index = dq->tail_index = 0;
2521
2522 if (rt == VLIB_RX)
2523 {
2524 ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2525 dr->rx_split_control =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002526 ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2527 | ( /* lo free descriptor threshold (units of 64 descriptors) */
2528 (1 << 22)) | ( /* descriptor type: advanced one buffer */
2529 (1 << 25)) | ( /* drop if no descriptors available */
2530 (1 << 28)));
Damjan Marionb4d89272016-05-12 22:14:45 +02002531
2532 /* Give hardware all but last 16 cache lines' worth of descriptors. */
2533 dq->tail_index = dq->n_descriptors -
Damjan Marion00a9dca2016-08-17 17:05:46 +02002534 16 * xm->n_descriptors_per_cache_line;
Damjan Marionb4d89272016-05-12 22:14:45 +02002535 }
2536 else
2537 {
2538 /* Make sure its initialized before hardware can get to it. */
2539 dq->tx.head_index_write_back[0] = dq->head_index;
2540
Damjan Marion68b4da62018-09-30 18:26:20 +02002541 a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
Damjan Marionb4d89272016-05-12 22:14:45 +02002542 dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2543 dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2544 }
2545
2546 /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2547 and [12] undocumented set. */
2548 if (rt == VLIB_RX)
2549 dr->dca_control &= ~((1 << 13) | (1 << 12));
2550
2551 CLIB_MEMORY_BARRIER ();
2552
2553 if (rt == VLIB_TX)
2554 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002555 xd->regs->tx_dma_control |= (1 << 0);
2556 dr->control |= ((32 << 0) /* prefetch threshold */
2557 | (64 << 8) /* host threshold */
2558 | (0 << 16) /* writeback threshold */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002559 }
2560
2561 /* Enable this queue and wait for hardware to initialize
2562 before adding to tail. */
2563 if (rt == VLIB_TX)
2564 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002565 dr->control |= 1 << 25;
2566 while (!(dr->control & (1 << 25)))
2567 ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002568 }
2569
2570 /* Set head/tail indices and enable DMA. */
2571 dr->head_index = dq->head_index;
2572 dr->tail_index = dq->tail_index;
2573 }
2574
2575 return error;
2576}
2577
Damjan Marion00a9dca2016-08-17 17:05:46 +02002578static u32
2579ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
Damjan Marionb4d89272016-05-12 22:14:45 +02002580{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002581 ixge_device_t *xd;
2582 ixge_regs_t *r;
Damjan Marionb4d89272016-05-12 22:14:45 +02002583 u32 old;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002584 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002585
2586 xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2587 r = xd->regs;
2588
2589 old = r->filter_control;
2590
John Lo4a302ee2020-05-12 22:34:39 -04002591 if (flags == ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002592 r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
John Lo4a302ee2020-05-12 22:34:39 -04002593 else if (flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
Damjan Marionb4d89272016-05-12 22:14:45 +02002594 r->filter_control = old & ~(1 << 9);
John Lo4a302ee2020-05-12 22:34:39 -04002595 else
2596 return ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002597
2598 return old;
2599}
2600
Damjan Marion00a9dca2016-08-17 17:05:46 +02002601static void
2602ixge_device_init (ixge_main_t * xm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002603{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002604 vnet_main_t *vnm = vnet_get_main ();
2605 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002606
2607 /* Reset chip(s). */
2608 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002609 {
2610 ixge_regs_t *r = xd->regs;
2611 const u32 reset_bit = (1 << 26) | (1 << 3);
2612
2613 r->control |= reset_bit;
2614
2615 /* No need to suspend. Timed to take ~1e-6 secs */
2616 while (r->control & reset_bit)
2617 ;
2618
2619 /* Software loaded. */
2620 r->extended_control |= (1 << 28);
2621
2622 ixge_phy_init (xd);
2623
2624 /* Register ethernet interface. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002625 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002626 u8 addr8[6];
2627 u32 i, addr32[2];
2628 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002629
Damjan Marion00a9dca2016-08-17 17:05:46 +02002630 addr32[0] = r->rx_ethernet_address0[0][0];
2631 addr32[1] = r->rx_ethernet_address0[0][1];
2632 for (i = 0; i < 6; i++)
2633 addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
Damjan Marionb4d89272016-05-12 22:14:45 +02002634
Damjan Marion00a9dca2016-08-17 17:05:46 +02002635 error = ethernet_register_interface
2636 (vnm, ixge_device_class.index, xd->device_index,
2637 /* ethernet address */ addr8,
2638 &xd->vlib_hw_if_index, ixge_flag_change);
2639 if (error)
2640 clib_error_report (error);
Damjan Marionb4d89272016-05-12 22:14:45 +02002641 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002642
2643 {
2644 vnet_sw_interface_t *sw =
2645 vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
2646 xd->vlib_sw_if_index = sw->sw_if_index;
2647 }
2648
2649 ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2650
2651 xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
2652
2653 ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2654
2655 /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2656 r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2657 ixge_rx_queue_to_interrupt (0)) << 0);
2658
2659 r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2660 ixge_tx_queue_to_interrupt (0)) << 8);
2661
2662 /* No use in getting too many interrupts.
2663 Limit them to one every 3/4 ring size at line rate
2664 min sized packets.
2665 No need for this since kernel/vlib main loop provides adequate interrupt
2666 limiting scheme. */
2667 if (0)
2668 {
2669 f64 line_rate_max_pps =
2670 10e9 / (8 * (64 + /* interframe padding */ 20));
2671 ixge_throttle_queue_interrupt (r, 0,
2672 .75 * xm->n_descriptors[VLIB_RX] /
2673 line_rate_max_pps);
2674 }
2675
2676 /* Accept all multicast and broadcast packets. Should really add them
2677 to the dst_ethernet_address register array. */
2678 r->filter_control |= (1 << 10) | (1 << 8);
2679
2680 /* Enable frames up to size in mac frame size register. */
2681 r->xge_mac.control |= 1 << 2;
2682 r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2683
2684 /* Enable all interrupts. */
2685 if (!IXGE_ALWAYS_POLL)
2686 r->interrupt.enable_write_1_to_set = ~0;
2687 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002688}
2689
2690static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002691ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002692{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002693 vnet_main_t *vnm = vnet_get_main ();
2694 ixge_main_t *xm = &ixge_main;
2695 ixge_device_t *xd;
2696 uword event_type, *event_data = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002697 f64 timeout, link_debounce_deadline;
2698
2699 ixge_device_init (xm);
2700
2701 /* Clear all counters. */
2702 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002703 {
2704 ixge_update_counters (xd);
Dave Barachb7b92992018-10-17 10:38:51 -04002705 clib_memset (xd->counters, 0, sizeof (xd->counters));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002706 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002707
2708 timeout = 30.0;
2709 link_debounce_deadline = 1e70;
2710
2711 while (1)
2712 {
2713 /* 36 bit stat counters could overflow in ~50 secs.
Damjan Marion00a9dca2016-08-17 17:05:46 +02002714 We poll every 30 secs to be conservative. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002715 vlib_process_wait_for_event_or_clock (vm, timeout);
2716
2717 event_type = vlib_process_get_events (vm, &event_data);
2718
Damjan Marion00a9dca2016-08-17 17:05:46 +02002719 switch (event_type)
2720 {
2721 case EVENT_SET_FLAGS:
2722 /* 1 ms */
2723 link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2724 timeout = 1e-3;
2725 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002726
Damjan Marion00a9dca2016-08-17 17:05:46 +02002727 case ~0:
2728 /* No events found: timer expired. */
2729 if (vlib_time_now (vm) > link_debounce_deadline)
2730 {
2731 vec_foreach (xd, xm->devices)
2732 {
2733 ixge_regs_t *r = xd->regs;
2734 u32 v = r->xge_mac.link_status;
2735 uword is_up = (v & (1 << 30)) != 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002736
Damjan Marion00a9dca2016-08-17 17:05:46 +02002737 vnet_hw_interface_set_flags
2738 (vnm, xd->vlib_hw_if_index,
2739 is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2740 }
2741 link_debounce_deadline = 1e70;
2742 timeout = 30.0;
2743 }
2744 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002745
Damjan Marion00a9dca2016-08-17 17:05:46 +02002746 default:
2747 ASSERT (0);
2748 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002749
2750 if (event_data)
2751 _vec_len (event_data) = 0;
2752
2753 /* Query stats every 30 secs. */
2754 {
2755 f64 now = vlib_time_now (vm);
2756 if (now - xm->time_last_stats_update > 30)
2757 {
2758 xm->time_last_stats_update = now;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002759 vec_foreach (xd, xm->devices) ixge_update_counters (xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002760 }
2761 }
2762 }
2763
2764 return 0;
2765}
2766
2767static vlib_node_registration_t ixge_process_node = {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002768 .function = ixge_process,
2769 .type = VLIB_NODE_TYPE_PROCESS,
2770 .name = "ixge-process",
Damjan Marionb4d89272016-05-12 22:14:45 +02002771};
2772
Damjan Marion00a9dca2016-08-17 17:05:46 +02002773clib_error_t *
2774ixge_init (vlib_main_t * vm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002775{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002776 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002777
2778 xm->vlib_main = vm;
Dave Barachb7b92992018-10-17 10:38:51 -04002779 clib_memset (&xm->tx_descriptor_template, 0,
2780 sizeof (xm->tx_descriptor_template));
2781 clib_memset (&xm->tx_descriptor_template_mask, 0,
2782 sizeof (xm->tx_descriptor_template_mask));
Damjan Marionb4d89272016-05-12 22:14:45 +02002783 xm->tx_descriptor_template.status0 =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002784 (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
2785 IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
2786 IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
Damjan Marionb4d89272016-05-12 22:14:45 +02002787 xm->tx_descriptor_template_mask.status0 = 0xffff;
2788 xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2789
2790 xm->tx_descriptor_template_mask.status0 &=
2791 ~(IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
2792 | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS);
2793 xm->tx_descriptor_template_mask.status1 &=
2794 ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE);
Dave Barachf8d50682019-05-14 18:01:44 -04002795 return 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002796}
2797
Dave Barachf8d50682019-05-14 18:01:44 -04002798/* *INDENT-OFF* */
2799VLIB_INIT_FUNCTION (ixge_init) =
2800{
2801 .runs_before = VLIB_INITS("pci_bus_init"),
2802};
2803/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002804
2805
2806static void
Damjan Marion23227982018-10-22 13:38:57 +02002807ixge_pci_intr_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h)
Damjan Marionb4d89272016-05-12 22:14:45 +02002808{
Damjan Marion23227982018-10-22 13:38:57 +02002809 uword private_data = vlib_pci_get_private_data (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002810
2811 vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
2812
2813 /* Let node know which device is interrupting. */
2814 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002815 vlib_node_runtime_t *rt =
2816 vlib_node_get_runtime (vm, ixge_input_node.index);
Damjan Marioncef87f12017-10-05 15:32:41 +02002817 rt->runtime_data[0] |= 1 << private_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02002818 }
2819}
2820
2821static clib_error_t *
Damjan Marioncef87f12017-10-05 15:32:41 +02002822ixge_pci_init (vlib_main_t * vm, vlib_pci_dev_handle_t h)
Damjan Marionb4d89272016-05-12 22:14:45 +02002823{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002824 ixge_main_t *xm = &ixge_main;
Damjan Marioncef87f12017-10-05 15:32:41 +02002825 clib_error_t *error = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002826 void *r;
2827 ixge_device_t *xd;
Damjan Marion23227982018-10-22 13:38:57 +02002828 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2829 vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002830
Damjan Marion23227982018-10-22 13:38:57 +02002831 error = vlib_pci_map_region (vm, h, 0, &r);
Damjan Marionb4d89272016-05-12 22:14:45 +02002832 if (error)
2833 return error;
2834
2835 vec_add2 (xm->devices, xd, 1);
2836
2837 if (vec_len (xm->devices) == 1)
2838 {
Damjan Marion652d2e12019-02-02 00:15:27 +01002839 ixge_input_node.function = ixge_input;
Damjan Marionb4d89272016-05-12 22:14:45 +02002840 }
2841
Damjan Marioncef87f12017-10-05 15:32:41 +02002842 xd->pci_dev_handle = h;
2843 xd->device_id = d->device_id;
Damjan Marionb4d89272016-05-12 22:14:45 +02002844 xd->regs = r;
2845 xd->device_index = xd - xm->devices;
Damjan Marioncef87f12017-10-05 15:32:41 +02002846 xd->pci_function = addr->function;
Damjan Marionb4d89272016-05-12 22:14:45 +02002847 xd->per_interface_next_index = ~0;
2848
Damjan Marion23227982018-10-22 13:38:57 +02002849 vlib_pci_set_private_data (vm, h, xd->device_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002850
2851 /* Chip found so enable node. */
2852 {
2853 vlib_node_set_state (vm, ixge_input_node.index,
2854 (IXGE_ALWAYS_POLL
2855 ? VLIB_NODE_STATE_POLLING
2856 : VLIB_NODE_STATE_INTERRUPT));
2857
Damjan Marioncef87f12017-10-05 15:32:41 +02002858 //dev->private_data = xd->device_index;
Damjan Marionb4d89272016-05-12 22:14:45 +02002859 }
2860
2861 if (vec_len (xm->devices) == 1)
2862 {
2863 vlib_register_node (vm, &ixge_process_node);
2864 xm->process_node_index = ixge_process_node.index;
2865 }
2866
Damjan Marion23227982018-10-22 13:38:57 +02002867 error = vlib_pci_bus_master_enable (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002868
2869 if (error)
2870 return error;
2871
Damjan Marion23227982018-10-22 13:38:57 +02002872 return vlib_pci_intr_enable (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002873}
2874
Damjan Marion00a9dca2016-08-17 17:05:46 +02002875/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002876PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2877 .init_function = ixge_pci_init,
2878 .interrupt_handler = ixge_pci_intr_handler,
2879 .supported_devices = {
2880#define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2881 foreach_ixge_pci_device_id
2882#undef _
2883 { 0 },
2884 },
2885};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002886/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002887
Damjan Marion00a9dca2016-08-17 17:05:46 +02002888void
2889ixge_set_next_node (ixge_rx_next_t next, char *name)
Damjan Marionb4d89272016-05-12 22:14:45 +02002890{
2891 vlib_node_registration_t *r = &ixge_input_node;
2892
2893 switch (next)
2894 {
2895 case IXGE_RX_NEXT_IP4_INPUT:
2896 case IXGE_RX_NEXT_IP6_INPUT:
2897 case IXGE_RX_NEXT_ETHERNET_INPUT:
2898 r->next_nodes[next] = name;
2899 break;
2900
2901 default:
2902 clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2903 break;
2904 }
2905}
Damjan Marion00a9dca2016-08-17 17:05:46 +02002906
Damjan Marion374e2c52017-03-09 20:38:15 +01002907/* *INDENT-OFF* */
2908VLIB_PLUGIN_REGISTER () = {
2909 .version = VPP_BUILD_VER,
2910 .default_disabled = 1,
Damjan Marion1bfb0dd2017-03-22 11:08:39 +01002911 .description = "Intel 82599 Family Native Driver (experimental)",
Damjan Marion374e2c52017-03-09 20:38:15 +01002912};
Damjan Marion7bee80c2017-04-26 15:32:12 +02002913#endif
Damjan Marion374e2c52017-03-09 20:38:15 +01002914
2915/* *INDENT-ON* */
Damjan Marion7bee80c2017-04-26 15:32:12 +02002916
Damjan Marion00a9dca2016-08-17 17:05:46 +02002917/*
2918 * fd.io coding-style-patch-verification: ON
2919 *
2920 * Local Variables:
2921 * eval: (c-set-style "gnu")
2922 * End:
2923 */