blob: f61db131f5323a6b6bfcd1e368133688f8f8da29 [file] [log] [blame]
Damjan Marionb4d89272016-05-12 22:14:45 +02001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * WARNING!
18 * This driver is not intended for production use and it is unsupported.
19 * It is provided for educational use only.
20 * Please use supported DPDK driver instead.
21 */
22
Gabriel Ganneb81831d2017-12-05 17:33:37 +010023#if __x86_64__ || __i386__ || __aarch64__
Damjan Marionb4d89272016-05-12 22:14:45 +020024#include <vppinfra/vector.h>
25
26#ifndef CLIB_HAVE_VEC128
27#warning HACK: ixge driver wont really work, missing u32x4
28typedef unsigned long long u32x4;
29#endif
30
31#include <vlib/vlib.h>
32#include <vlib/unix/unix.h>
33#include <vlib/pci/pci.h>
34#include <vnet/vnet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010035#include <ixge/ixge.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020036#include <vnet/ethernet/ethernet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010037#include <vnet/plugin/plugin.h>
38#include <vpp/app/version.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020039
40#define IXGE_ALWAYS_POLL 0
41
42#define EVENT_SET_FLAGS 0
43#define IXGE_HWBP_RACE_ELOG 0
44
45#define PCI_VENDOR_ID_INTEL 0x8086
46
47/* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48#define XGE_PHY_DEV_TYPE_PMA_PMD 1
49#define XGE_PHY_DEV_TYPE_PHY_XS 4
50#define XGE_PHY_ID1 0x2
51#define XGE_PHY_ID2 0x3
52#define XGE_PHY_CONTROL 0x0
53#define XGE_PHY_CONTROL_RESET (1 << 15)
54
55ixge_main_t ixge_main;
56static vlib_node_registration_t ixge_input_node;
57static vlib_node_registration_t ixge_process_node;
58
Damjan Marion00a9dca2016-08-17 17:05:46 +020059static void
60ixge_semaphore_get (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020061{
Damjan Marion00a9dca2016-08-17 17:05:46 +020062 ixge_main_t *xm = &ixge_main;
63 vlib_main_t *vm = xm->vlib_main;
64 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020065 u32 i;
66
67 i = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +020068 while (!(r->software_semaphore & (1 << 0)))
Damjan Marionb4d89272016-05-12 22:14:45 +020069 {
70 if (i > 0)
71 vlib_process_suspend (vm, 100e-6);
72 i++;
73 }
Damjan Marion00a9dca2016-08-17 17:05:46 +020074 do
75 {
76 r->software_semaphore |= 1 << 1;
77 }
78 while (!(r->software_semaphore & (1 << 1)));
Damjan Marionb4d89272016-05-12 22:14:45 +020079}
80
Damjan Marion00a9dca2016-08-17 17:05:46 +020081static void
82ixge_semaphore_release (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020083{
Damjan Marion00a9dca2016-08-17 17:05:46 +020084 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020085 r->software_semaphore &= ~3;
86}
87
Damjan Marion00a9dca2016-08-17 17:05:46 +020088static void
89ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +020090{
Damjan Marion00a9dca2016-08-17 17:05:46 +020091 ixge_main_t *xm = &ixge_main;
92 vlib_main_t *vm = xm->vlib_main;
93 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020094 u32 fw_mask = sw_mask << 5;
95 u32 m, done = 0;
96
Damjan Marion00a9dca2016-08-17 17:05:46 +020097 while (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +020098 {
99 ixge_semaphore_get (xd);
100 m = r->software_firmware_sync;
101 done = (m & fw_mask) == 0;
102 if (done)
103 r->software_firmware_sync = m | sw_mask;
104 ixge_semaphore_release (xd);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200105 if (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +0200106 vlib_process_suspend (vm, 10e-3);
107 }
108}
109
Damjan Marion00a9dca2016-08-17 17:05:46 +0200110static void
111ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +0200112{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200113 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200114 ixge_semaphore_get (xd);
115 r->software_firmware_sync &= ~sw_mask;
116 ixge_semaphore_release (xd);
117}
118
Damjan Marion00a9dca2016-08-17 17:05:46 +0200119u32
120ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121 u32 v, u32 is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200122{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200123 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200124 const u32 busy_bit = 1 << 30;
125 u32 x;
126
127 ASSERT (xd->phy_index < 2);
128 ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129
130 ASSERT (reg_index < (1 << 16));
131 ASSERT (dev_type < (1 << 5));
Damjan Marion00a9dca2016-08-17 17:05:46 +0200132 if (!is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200133 r->xge_mac.phy_data = v;
134
135 /* Address cycle. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200136 x =
137 reg_index | (dev_type << 16) | (xd->
138 phys[xd->phy_index].mdio_address << 21);
Damjan Marionb4d89272016-05-12 22:14:45 +0200139 r->xge_mac.phy_command = x | busy_bit;
140 /* Busy wait timed to take 28e-6 secs. No suspend. */
141 while (r->xge_mac.phy_command & busy_bit)
142 ;
143
144 r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145 while (r->xge_mac.phy_command & busy_bit)
146 ;
147
148 if (is_read)
149 v = r->xge_mac.phy_data >> 16;
150
151 ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152
153 return v;
154}
155
Damjan Marion00a9dca2016-08-17 17:05:46 +0200156static u32
157ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
Damjan Marionb4d89272016-05-12 22:14:45 +0200158{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200159 return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160 1);
161}
162
163static void
164ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165{
166 (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167 0);
168}
169
170static void
171ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172{
173 ixge_main_t *xm = &ixge_main;
174 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200175 u32 v;
176
177 v = 0;
178 v |= (sda != 0) << 3;
179 v |= (scl != 0) << 1;
180 xd->regs->i2c_control = v;
181}
182
Damjan Marion00a9dca2016-08-17 17:05:46 +0200183static void
184ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
Damjan Marionb4d89272016-05-12 22:14:45 +0200185{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200186 ixge_main_t *xm = &ixge_main;
187 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200188 u32 v;
189
190 v = xd->regs->i2c_control;
191 *sda = (v & (1 << 2)) != 0;
192 *scl = (v & (1 << 0)) != 0;
193}
194
Damjan Marion00a9dca2016-08-17 17:05:46 +0200195static u16
196ixge_read_eeprom (ixge_device_t * xd, u32 address)
Damjan Marionb4d89272016-05-12 22:14:45 +0200197{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200198 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200199 u32 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200200 r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200201 /* Wait for done bit. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200202 while (!((v = r->eeprom_read) & (1 << 1)))
Damjan Marionb4d89272016-05-12 22:14:45 +0200203 ;
204 return v >> 16;
205}
206
207static void
208ixge_sfp_enable_disable_laser (ixge_device_t * xd, uword enable)
209{
210 u32 tx_disable_bit = 1 << 3;
211 if (enable)
212 xd->regs->sdp_control &= ~tx_disable_bit;
213 else
214 xd->regs->sdp_control |= tx_disable_bit;
215}
216
217static void
218ixge_sfp_enable_disable_10g (ixge_device_t * xd, uword enable)
219{
220 u32 is_10g_bit = 1 << 5;
221 if (enable)
222 xd->regs->sdp_control |= is_10g_bit;
223 else
224 xd->regs->sdp_control &= ~is_10g_bit;
225}
226
227static clib_error_t *
228ixge_sfp_phy_init_from_eeprom (ixge_device_t * xd, u16 sfp_type)
229{
230 u16 a, id, reg_values_addr = 0;
231
232 a = ixge_read_eeprom (xd, 0x2b);
233 if (a == 0 || a == 0xffff)
234 return clib_error_create ("no init sequence in eeprom");
235
236 while (1)
237 {
238 id = ixge_read_eeprom (xd, ++a);
239 if (id == 0xffff)
240 break;
241 reg_values_addr = ixge_read_eeprom (xd, ++a);
242 if (id == sfp_type)
243 break;
244 }
245 if (id != sfp_type)
246 return clib_error_create ("failed to find id 0x%x", sfp_type);
247
248 ixge_software_firmware_sync (xd, 1 << 3);
249 while (1)
250 {
251 u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252 if (v == 0xffff)
253 break;
254 xd->regs->core_analog_config = v;
255 }
256 ixge_software_firmware_sync_release (xd, 1 << 3);
257
258 /* Make sure laser is off. We'll turn on the laser when
259 the interface is brought up. */
260 ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261 ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262
263 return 0;
264}
265
266static void
267ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
268{
269 u32 v;
270
271 if (is_up)
272 {
273 /* pma/pmd 10g serial SFI. */
274 xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275 xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276
277 v = xd->regs->xge_mac.auto_negotiation_control;
278 v &= ~(7 << 13);
279 v |= (0 << 13);
280 /* Restart autoneg. */
281 v |= (1 << 12);
282 xd->regs->xge_mac.auto_negotiation_control = v;
283
Damjan Marion00a9dca2016-08-17 17:05:46 +0200284 while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
Damjan Marionb4d89272016-05-12 22:14:45 +0200285 ;
286
287 v = xd->regs->xge_mac.auto_negotiation_control;
288
289 /* link mode 10g sfi serdes */
290 v &= ~(7 << 13);
291 v |= (3 << 13);
292
293 /* Restart autoneg. */
294 v |= (1 << 12);
295 xd->regs->xge_mac.auto_negotiation_control = v;
296
297 xd->regs->xge_mac.link_status;
298 }
299
300 ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301
302 /* Give time for link partner to notice that we're up. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200303 if (is_up && vlib_in_process_context (vlib_get_main ()))
304 {
305 vlib_process_suspend (vlib_get_main (), 300e-3);
306 }
Damjan Marionb4d89272016-05-12 22:14:45 +0200307}
308
309always_inline ixge_dma_regs_t *
310get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
311{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200312 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200313 ASSERT (qi < 128);
314 if (rt == VLIB_RX)
315 return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316 else
317 return &r->tx_dma[qi];
318}
319
320static clib_error_t *
321ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
322{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200323 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200324 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200325 ixge_main_t *xm = &ixge_main;
326 ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
327 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200328
329 if (is_up)
330 {
331 xd->regs->rx_enable |= 1;
332 xd->regs->tx_dma_control |= 1;
333 dr->control |= 1 << 25;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200334 while (!(dr->control & (1 << 25)))
335 ;
Damjan Marionb4d89272016-05-12 22:14:45 +0200336 }
337 else
338 {
339 xd->regs->rx_enable &= ~1;
340 xd->regs->tx_dma_control &= ~1;
341 }
342
343 ixge_sfp_device_up_down (xd, is_up);
344
345 return /* no error */ 0;
346}
347
Damjan Marion00a9dca2016-08-17 17:05:46 +0200348static void
349ixge_sfp_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200350{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200351 ixge_phy_t *phy = xd->phys + xd->phy_index;
352 i2c_bus_t *ib = &xd->i2c_bus;
Damjan Marionb4d89272016-05-12 22:14:45 +0200353
354 ib->private_data = xd->device_index;
355 ib->put_bits = ixge_i2c_put_bits;
356 ib->get_bits = ixge_i2c_get_bits;
357 vlib_i2c_init (ib);
358
Damjan Marion00a9dca2016-08-17 17:05:46 +0200359 vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
Damjan Marionb4d89272016-05-12 22:14:45 +0200360
Damjan Marion00a9dca2016-08-17 17:05:46 +0200361 if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
Damjan Marionc45e1902018-09-24 15:17:36 +0200362 xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
Damjan Marionb4d89272016-05-12 22:14:45 +0200363 else
364 {
365 /* FIXME 5 => SR/LR eeprom ID. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200366 clib_error_t *e =
367 ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
Damjan Marionb4d89272016-05-12 22:14:45 +0200368 if (e)
369 clib_error_report (e);
370 }
371
372 phy->mdio_address = ~0;
373}
374
Damjan Marion00a9dca2016-08-17 17:05:46 +0200375static void
376ixge_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200377{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200378 ixge_main_t *xm = &ixge_main;
379 vlib_main_t *vm = xm->vlib_main;
380 ixge_phy_t *phy = xd->phys + xd->phy_index;
Damjan Marionb4d89272016-05-12 22:14:45 +0200381
382 switch (xd->device_id)
383 {
384 case IXGE_82599_sfp:
385 case IXGE_82599_sfp_em:
386 case IXGE_82599_sfp_fcoe:
387 /* others? */
388 return ixge_sfp_phy_init (xd);
389
390 default:
391 break;
392 }
393
394 /* Probe address of phy. */
395 {
396 u32 i, v;
397
398 phy->mdio_address = ~0;
399 for (i = 0; i < 32; i++)
400 {
401 phy->mdio_address = i;
402 v = ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1);
403 if (v != 0xffff && v != 0)
404 break;
405 }
406
407 /* No PHY found? */
408 if (i >= 32)
409 return;
410 }
411
Damjan Marion00a9dca2016-08-17 17:05:46 +0200412 phy->id =
413 ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
414 ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200415
416 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200417 ELOG_TYPE_DECLARE (e) =
418 {
419 .function = (char *) __FUNCTION__,.format =
420 "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421 struct
422 {
423 u32 instance, id, address;
424 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +0200425 ed = ELOG_DATA (&vm->elog_main, e);
426 ed->instance = xd->device_index;
427 ed->id = phy->id;
428 ed->address = phy->mdio_address;
429 }
430
431 /* Reset phy. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200432 ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
433 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200434
435 /* Wait for self-clearning reset bit to clear. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200436 do
437 {
438 vlib_process_suspend (vm, 1e-3);
439 }
440 while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
441 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200442}
443
Damjan Marion00a9dca2016-08-17 17:05:46 +0200444static u8 *
445format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200446{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200447 ixge_rx_from_hw_descriptor_t *d =
448 va_arg (*va, ixge_rx_from_hw_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200449 u32 s0 = d->status[0], s2 = d->status[2];
450 u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200451 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200452
453 s = format (s, "%s-owned",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200454 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
455 "hw");
456 s =
457 format (s, ", length this descriptor %d, l3 offset %d",
458 d->n_packet_bytes_this_descriptor,
459 IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
Damjan Marionb4d89272016-05-12 22:14:45 +0200460 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
461 s = format (s, ", end-of-packet");
462
463 s = format (s, "\n%U", format_white_space, indent);
464
465 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR)
466 s = format (s, "layer2 error");
467
468 if (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2)
469 {
470 s = format (s, "layer 2 type %d", (s0 & 0x1f));
471 return s;
472 }
473
474 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN)
475 s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476 format_white_space, indent);
477
478 if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479 {
480 s = format (s, "ip4%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200481 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200483 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
484 s = format (s, " checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200485 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
486 "bad" : "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200487 }
488 if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489 s = format (s, "ip6%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200490 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200492 is_tcp = is_udp = 0;
493 if ((is_ip = (is_ip4 | is_ip6)))
494 {
495 is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496 is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497 if (is_tcp)
498 s = format (s, ", tcp");
499 if (is_udp)
500 s = format (s, ", udp");
501 }
502
503 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
504 s = format (s, ", tcp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200505 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
506 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200507 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
508 s = format (s, ", udp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200509 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
510 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200511
512 return s;
513}
514
Damjan Marion00a9dca2016-08-17 17:05:46 +0200515static u8 *
516format_ixge_tx_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200517{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200518 ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200519 u32 s0 = d->status0, s1 = d->status1;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200520 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200521 u32 v;
522
523 s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200524 d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200525
526 s = format (s, "\n%U", format_white_space, indent);
527
528 if ((v = (s0 >> 0) & 3))
529 s = format (s, "reserved 0x%x, ", v);
530
531 if ((v = (s0 >> 2) & 3))
532 s = format (s, "mac 0x%x, ", v);
533
534 if ((v = (s0 >> 4) & 0xf) != 3)
535 s = format (s, "type 0x%x, ", v);
536
537 s = format (s, "%s%s%s%s%s%s%s%s",
538 (s0 & (1 << 8)) ? "eop, " : "",
539 (s0 & (1 << 9)) ? "insert-fcs, " : "",
540 (s0 & (1 << 10)) ? "reserved26, " : "",
541 (s0 & (1 << 11)) ? "report-status, " : "",
542 (s0 & (1 << 12)) ? "reserved28, " : "",
543 (s0 & (1 << 13)) ? "is-advanced, " : "",
544 (s0 & (1 << 14)) ? "vlan-enable, " : "",
545 (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546
547 if ((v = s1 & 0xf) != 0)
548 s = format (s, "status 0x%x, ", v);
549
550 if ((v = (s1 >> 4) & 0xf))
551 s = format (s, "context 0x%x, ", v);
552
553 if ((v = (s1 >> 8) & 0x3f))
554 s = format (s, "options 0x%x, ", v);
555
556 return s;
557}
558
Damjan Marion00a9dca2016-08-17 17:05:46 +0200559typedef struct
560{
Damjan Marionb4d89272016-05-12 22:14:45 +0200561 ixge_descriptor_t before, after;
562
563 u32 buffer_index;
564
565 u16 device_index;
566
567 u8 queue_index;
568
569 u8 is_start_of_packet;
570
571 /* Copy of VLIB buffer; packet data stored in pre_data. */
572 vlib_buffer_t buffer;
573} ixge_rx_dma_trace_t;
574
Damjan Marion00a9dca2016-08-17 17:05:46 +0200575static u8 *
576format_ixge_rx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200577{
578 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200579 vlib_node_t *node = va_arg (*va, vlib_node_t *);
580 vnet_main_t *vnm = vnet_get_main ();
581 ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582 ixge_main_t *xm = &ixge_main;
583 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
584 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200585 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200586
587 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200588 vnet_sw_interface_t *sw =
589 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
590 s =
591 format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200593 }
594
595 s = format (s, "\n%Ubefore: %U",
596 format_white_space, indent,
597 format_ixge_rx_from_hw_descriptor, &t->before);
598 s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200600 t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
Damjan Marionb4d89272016-05-12 22:14:45 +0200601
602 s = format (s, "\n%Ubuffer 0x%x: %U",
603 format_white_space, indent,
Damjan Marionbd846cd2017-11-21 13:12:41 +0100604 t->buffer_index, format_vnet_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200605
Damjan Marion00a9dca2016-08-17 17:05:46 +0200606 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200607
608 f = node->format_buffer;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200609 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200610 f = format_hex_bytes;
611 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612
613 return s;
614}
615
616#define foreach_ixge_error \
617 _ (none, "no error") \
618 _ (tx_full_drops, "tx ring full drops") \
619 _ (ip4_checksum_error, "ip4 checksum errors") \
620 _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621 _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622
Damjan Marion00a9dca2016-08-17 17:05:46 +0200623typedef enum
624{
Damjan Marionb4d89272016-05-12 22:14:45 +0200625#define _(f,s) IXGE_ERROR_##f,
626 foreach_ixge_error
627#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200628 IXGE_N_ERROR,
Damjan Marionb4d89272016-05-12 22:14:45 +0200629} ixge_error_t;
630
631always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200632ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
633 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200634 u8 * next0, u8 * error0, u32 * flags0)
635{
636 u8 is0_ip4, is0_ip6, n0, e0;
637 u32 f0;
638
639 e0 = IXGE_ERROR_none;
640 n0 = IXGE_RX_NEXT_ETHERNET_INPUT;
641
642 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
643 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644
645 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200646 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200647
648 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650
651 n0 = (xd->per_interface_next_index != ~0) ?
652 xd->per_interface_next_index : n0;
653
654 /* Check for error. */
655 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656
657 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
658 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200659 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200660
661 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
662 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200663 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200664
665 *error0 = e0;
666 *next0 = n0;
667 *flags0 = f0;
668}
669
670always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200671ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
672 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200673 u32 s10, u32 s12,
674 u8 * next0, u8 * error0, u32 * flags0,
675 u8 * next1, u8 * error1, u32 * flags1)
676{
677 u8 is0_ip4, is0_ip6, n0, e0;
678 u8 is1_ip4, is1_ip6, n1, e1;
679 u32 f0, f1;
680
681 e0 = e1 = IXGE_ERROR_none;
682 n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683
684 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
685 is1_ip4 = s12 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
686
687 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688 n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689
690 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200691 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200692 e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200693 ? IXGE_ERROR_ip4_checksum_error : e1);
Damjan Marionb4d89272016-05-12 22:14:45 +0200694
695 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696 is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697
698 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699 n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700
701 n0 = (xd->per_interface_next_index != ~0) ?
702 xd->per_interface_next_index : n0;
703 n1 = (xd->per_interface_next_index != ~0) ?
704 xd->per_interface_next_index : n1;
705
706 /* Check for error. */
707 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708 n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709
710 *error0 = e0;
711 *error1 = e1;
712
713 *next0 = n0;
714 *next1 = n1;
715
716 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
717 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200718 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200719 f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
720 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200721 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200722
723 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
724 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200725 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200726 f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
727 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200728 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200729
730 *flags0 = f0;
731 *flags1 = f1;
732}
733
734static void
735ixge_rx_trace (ixge_main_t * xm,
736 ixge_device_t * xd,
737 ixge_dma_queue_t * dq,
738 ixge_descriptor_t * before_descriptors,
739 u32 * before_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200740 ixge_descriptor_t * after_descriptors, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200741{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200742 vlib_main_t *vm = xm->vlib_main;
743 vlib_node_runtime_t *node = dq->rx.node;
744 ixge_rx_from_hw_descriptor_t *bd;
745 ixge_rx_to_hw_descriptor_t *ad;
746 u32 *b, n_left, is_sop, next_index_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200747
748 n_left = n_descriptors;
749 b = before_buffers;
750 bd = &before_descriptors->rx_from_hw;
751 ad = &after_descriptors->rx_to_hw;
752 is_sop = dq->rx.is_start_of_packet;
753 next_index_sop = dq->rx.saved_start_of_packet_next_index;
754
755 while (n_left >= 2)
756 {
757 u32 bi0, bi1, flags0, flags1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200758 vlib_buffer_t *b0, *b1;
759 ixge_rx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200760 u8 next0, error0, next1, error1;
761
762 bi0 = b[0];
763 bi1 = b[1];
764 n_left -= 2;
765
766 b0 = vlib_get_buffer (vm, bi0);
767 b1 = vlib_get_buffer (vm, bi1);
768
769 ixge_rx_next_and_error_from_status_x2 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200770 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200771 bd[1].status[0], bd[1].status[2],
772 &next0, &error0, &flags0,
773 &next1, &error1, &flags1);
774
775 next_index_sop = is_sop ? next0 : next_index_sop;
776 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778 t0->is_start_of_packet = is_sop;
779 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780
781 next_index_sop = is_sop ? next1 : next_index_sop;
782 vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784 t1->is_start_of_packet = is_sop;
785 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786
787 t0->queue_index = dq->queue_index;
788 t1->queue_index = dq->queue_index;
789 t0->device_index = xd->device_index;
790 t1->device_index = xd->device_index;
791 t0->before.rx_from_hw = bd[0];
792 t1->before.rx_from_hw = bd[1];
793 t0->after.rx_to_hw = ad[0];
794 t1->after.rx_to_hw = ad[1];
795 t0->buffer_index = bi0;
796 t1->buffer_index = bi1;
797 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800 sizeof (t0->buffer.pre_data));
801 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802 sizeof (t1->buffer.pre_data));
803
804 b += 2;
805 bd += 2;
806 ad += 2;
807 }
808
809 while (n_left >= 1)
810 {
811 u32 bi0, flags0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200812 vlib_buffer_t *b0;
813 ixge_rx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200814 u8 next0, error0;
815
816 bi0 = b[0];
817 n_left -= 1;
818
819 b0 = vlib_get_buffer (vm, bi0);
820
821 ixge_rx_next_and_error_from_status_x1 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200822 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200823 &next0, &error0, &flags0);
824
825 next_index_sop = is_sop ? next0 : next_index_sop;
826 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828 t0->is_start_of_packet = is_sop;
829 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830
831 t0->queue_index = dq->queue_index;
832 t0->device_index = xd->device_index;
833 t0->before.rx_from_hw = bd[0];
834 t0->after.rx_to_hw = ad[0];
835 t0->buffer_index = bi0;
836 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838 sizeof (t0->buffer.pre_data));
839
840 b += 1;
841 bd += 1;
842 ad += 1;
843 }
844}
845
Damjan Marion00a9dca2016-08-17 17:05:46 +0200846typedef struct
847{
Damjan Marionb4d89272016-05-12 22:14:45 +0200848 ixge_tx_descriptor_t descriptor;
849
850 u32 buffer_index;
851
852 u16 device_index;
853
854 u8 queue_index;
855
856 u8 is_start_of_packet;
857
858 /* Copy of VLIB buffer; packet data stored in pre_data. */
859 vlib_buffer_t buffer;
860} ixge_tx_dma_trace_t;
861
Damjan Marion00a9dca2016-08-17 17:05:46 +0200862static u8 *
863format_ixge_tx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200864{
865 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200867 ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868 vnet_main_t *vnm = vnet_get_main ();
869 ixge_main_t *xm = &ixge_main;
870 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
871 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200872 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200873
874 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200875 vnet_sw_interface_t *sw =
876 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
877 s =
878 format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200880 }
881
882 s = format (s, "\n%Udescriptor: %U",
883 format_white_space, indent,
884 format_ixge_tx_descriptor, &t->descriptor);
885
886 s = format (s, "\n%Ubuffer 0x%x: %U",
887 format_white_space, indent,
Damjan Marionbd846cd2017-11-21 13:12:41 +0100888 t->buffer_index, format_vnet_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200889
Damjan Marion00a9dca2016-08-17 17:05:46 +0200890 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200891
892 f = format_ethernet_header_with_length;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200893 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200894 f = format_hex_bytes;
895 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896
897 return s;
898}
899
Damjan Marion00a9dca2016-08-17 17:05:46 +0200900typedef struct
901{
902 vlib_node_runtime_t *node;
Damjan Marionb4d89272016-05-12 22:14:45 +0200903
904 u32 is_start_of_packet;
905
906 u32 n_bytes_in_packet;
907
Damjan Marion00a9dca2016-08-17 17:05:46 +0200908 ixge_tx_descriptor_t *start_of_packet_descriptor;
Damjan Marionb4d89272016-05-12 22:14:45 +0200909} ixge_tx_state_t;
910
911static void
912ixge_tx_trace (ixge_main_t * xm,
913 ixge_device_t * xd,
914 ixge_dma_queue_t * dq,
915 ixge_tx_state_t * tx_state,
916 ixge_tx_descriptor_t * descriptors,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200917 u32 * buffers, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200918{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200919 vlib_main_t *vm = xm->vlib_main;
920 vlib_node_runtime_t *node = tx_state->node;
921 ixge_tx_descriptor_t *d;
922 u32 *b, n_left, is_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200923
924 n_left = n_descriptors;
925 b = buffers;
926 d = descriptors;
927 is_sop = tx_state->is_start_of_packet;
928
929 while (n_left >= 2)
930 {
931 u32 bi0, bi1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200932 vlib_buffer_t *b0, *b1;
933 ixge_tx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200934
935 bi0 = b[0];
936 bi1 = b[1];
937 n_left -= 2;
938
939 b0 = vlib_get_buffer (vm, bi0);
940 b1 = vlib_get_buffer (vm, bi1);
941
942 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943 t0->is_start_of_packet = is_sop;
944 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945
946 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947 t1->is_start_of_packet = is_sop;
948 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949
950 t0->queue_index = dq->queue_index;
951 t1->queue_index = dq->queue_index;
952 t0->device_index = xd->device_index;
953 t1->device_index = xd->device_index;
954 t0->descriptor = d[0];
955 t1->descriptor = d[1];
956 t0->buffer_index = bi0;
957 t1->buffer_index = bi1;
958 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961 sizeof (t0->buffer.pre_data));
962 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963 sizeof (t1->buffer.pre_data));
964
965 b += 2;
966 d += 2;
967 }
968
969 while (n_left >= 1)
970 {
971 u32 bi0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200972 vlib_buffer_t *b0;
973 ixge_tx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200974
975 bi0 = b[0];
976 n_left -= 1;
977
978 b0 = vlib_get_buffer (vm, bi0);
979
980 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981 t0->is_start_of_packet = is_sop;
982 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983
984 t0->queue_index = dq->queue_index;
985 t0->device_index = xd->device_index;
986 t0->descriptor = d[0];
987 t0->buffer_index = bi0;
988 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990 sizeof (t0->buffer.pre_data));
991
992 b += 1;
993 d += 1;
994 }
995}
996
997always_inline uword
998ixge_ring_sub (ixge_dma_queue_t * q, u32 i0, u32 i1)
999{
1000 i32 d = i1 - i0;
1001 ASSERT (i0 < q->n_descriptors);
1002 ASSERT (i1 < q->n_descriptors);
1003 return d < 0 ? q->n_descriptors + d : d;
1004}
1005
1006always_inline uword
1007ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
1008{
1009 u32 d = i0 + i1;
1010 ASSERT (i0 < q->n_descriptors);
1011 ASSERT (i1 < q->n_descriptors);
1012 d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013 return d;
1014}
1015
1016always_inline uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02001017ixge_tx_descriptor_matches_template (ixge_main_t * xm,
1018 ixge_tx_descriptor_t * d)
Damjan Marionb4d89272016-05-12 22:14:45 +02001019{
1020 u32 cmp;
1021
1022 cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1023 ^ xm->tx_descriptor_template.status0);
1024 if (cmp)
1025 return 0;
1026 cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1027 ^ xm->tx_descriptor_template.status1);
1028 if (cmp)
1029 return 0;
1030
1031 return 1;
1032}
1033
1034static uword
1035ixge_tx_no_wrap (ixge_main_t * xm,
1036 ixge_device_t * xd,
1037 ixge_dma_queue_t * dq,
1038 u32 * buffers,
1039 u32 start_descriptor_index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001040 u32 n_descriptors, ixge_tx_state_t * tx_state)
Damjan Marionb4d89272016-05-12 22:14:45 +02001041{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001042 vlib_main_t *vm = xm->vlib_main;
1043 ixge_tx_descriptor_t *d, *d_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001044 u32 n_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001045 u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046 u32 *to_tx =
1047 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001048 u32 is_sop = tx_state->is_start_of_packet;
1049 u32 len_sop = tx_state->n_bytes_in_packet;
1050 u16 template_status = xm->tx_descriptor_template.status0;
1051 u32 descriptor_prefetch_rotor = 0;
1052
1053 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054 d = &dq->descriptors[start_descriptor_index].tx;
1055 d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056
1057 while (n_left >= 4)
1058 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001059 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001060 u32 bi0, fi0, len0;
1061 u32 bi1, fi1, len1;
1062 u8 is_eop0, is_eop1;
1063
1064 /* Prefetch next iteration. */
1065 vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066 vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067
1068 if ((descriptor_prefetch_rotor & 0x3) == 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001069 CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001070
1071 descriptor_prefetch_rotor += 2;
1072
1073 bi0 = buffers[0];
1074 bi1 = buffers[1];
1075
1076 to_free[0] = fi0 = to_tx[0];
1077 to_tx[0] = bi0;
1078 to_free += fi0 != 0;
1079
1080 to_free[0] = fi1 = to_tx[1];
1081 to_tx[1] = bi1;
1082 to_free += fi1 != 0;
1083
1084 buffers += 2;
1085 n_left -= 2;
1086 to_tx += 2;
1087
1088 b0 = vlib_get_buffer (vm, bi0);
1089 b1 = vlib_get_buffer (vm, bi1);
1090
1091 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092 is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093
1094 len0 = b0->current_length;
1095 len1 = b1->current_length;
1096
1097 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1098 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
1099
Damjan Marion8f499362018-10-22 13:07:02 +02001100 d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1101 d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001102
1103 d[0].n_bytes_this_buffer = len0;
1104 d[1].n_bytes_this_buffer = len1;
1105
Damjan Marion00a9dca2016-08-17 17:05:46 +02001106 d[0].status0 =
1107 template_status | (is_eop0 <<
1108 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
1109 d[1].status0 =
1110 template_status | (is_eop1 <<
1111 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001112
1113 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001114 d_sop[0].status1 =
1115 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001116 d += 1;
1117 d_sop = is_eop0 ? d : d_sop;
1118
1119 is_sop = is_eop0;
1120
1121 len_sop = (is_sop ? 0 : len_sop) + len1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001122 d_sop[0].status1 =
1123 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001124 d += 1;
1125 d_sop = is_eop1 ? d : d_sop;
1126
1127 is_sop = is_eop1;
1128 }
1129
1130 while (n_left > 0)
1131 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001132 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001133 u32 bi0, fi0, len0;
1134 u8 is_eop0;
1135
1136 bi0 = buffers[0];
1137
1138 to_free[0] = fi0 = to_tx[0];
1139 to_tx[0] = bi0;
1140 to_free += fi0 != 0;
1141
1142 buffers += 1;
1143 n_left -= 1;
1144 to_tx += 1;
1145
1146 b0 = vlib_get_buffer (vm, bi0);
1147
1148 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149
1150 len0 = b0->current_length;
1151
1152 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1153
Damjan Marion8f499362018-10-22 13:07:02 +02001154 d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001155 d[0].n_bytes_this_buffer = len0;
1156
Damjan Marion00a9dca2016-08-17 17:05:46 +02001157 d[0].status0 =
1158 template_status | (is_eop0 <<
1159 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001160
1161 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001162 d_sop[0].status1 =
1163 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001164 d += 1;
1165 d_sop = is_eop0 ? d : d_sop;
1166
1167 is_sop = is_eop0;
1168 }
1169
1170 if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1171 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001172 to_tx =
1173 vec_elt_at_index (dq->descriptor_buffer_indices,
1174 start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001175 ixge_tx_trace (xm, xd, dq, tx_state,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001176 &dq->descriptors[start_descriptor_index].tx, to_tx,
Damjan Marionb4d89272016-05-12 22:14:45 +02001177 n_descriptors);
1178 }
1179
Damjan Marion00a9dca2016-08-17 17:05:46 +02001180 _vec_len (xm->tx_buffers_pending_free) =
1181 to_free - xm->tx_buffers_pending_free;
Damjan Marionb4d89272016-05-12 22:14:45 +02001182
1183 /* When we are done d_sop can point to end of ring. Wrap it if so. */
1184 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001185 ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001186
1187 ASSERT (d_sop - d_start <= dq->n_descriptors);
1188 d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1189 }
1190
1191 tx_state->is_start_of_packet = is_sop;
1192 tx_state->start_of_packet_descriptor = d_sop;
1193 tx_state->n_bytes_in_packet = len_sop;
1194
1195 return n_descriptors;
1196}
1197
1198static uword
1199ixge_interface_tx (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001200 vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02001201{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001202 ixge_main_t *xm = &ixge_main;
1203 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1204 ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
1205 ixge_dma_queue_t *dq;
1206 u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001207 u32 queue_index = 0; /* fixme parameter */
1208 ixge_tx_state_t tx_state;
1209
1210 tx_state.node = node;
1211 tx_state.is_start_of_packet = 1;
1212 tx_state.start_of_packet_descriptor = 0;
1213 tx_state.n_bytes_in_packet = 0;
1214
1215 from = vlib_frame_vector_args (f);
1216
1217 dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1218
1219 dq->head_index = dq->tx.head_index_write_back[0];
1220
1221 /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1222 n_left_tx = dq->n_descriptors - 1;
1223 n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1224
1225 _vec_len (xm->tx_buffers_pending_free) = 0;
1226
1227 n_descriptors_to_tx = f->n_vectors;
1228 n_tail_drop = 0;
1229 if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1230 {
1231 i32 i, n_ok, i_eop, i_sop;
1232
1233 i_sop = i_eop = ~0;
1234 for (i = n_left_tx - 1; i >= 0; i--)
1235 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001236 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1237 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marionb4d89272016-05-12 22:14:45 +02001238 {
1239 if (i_sop != ~0 && i_eop != ~0)
1240 break;
1241 i_eop = i;
1242 i_sop = i + 1;
1243 }
1244 }
1245 if (i == 0)
1246 n_ok = 0;
1247 else
1248 n_ok = i_eop + 1;
1249
1250 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001251 ELOG_TYPE_DECLARE (e) =
1252 {
1253 .function = (char *) __FUNCTION__,.format =
1254 "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1255 "i2i2i2i2",};
1256 struct
1257 {
1258 u16 instance, to_tx, head, tail;
1259 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001260 ed = ELOG_DATA (&vm->elog_main, e);
1261 ed->instance = xd->device_index;
1262 ed->to_tx = n_descriptors_to_tx;
1263 ed->head = dq->head_index;
1264 ed->tail = dq->tail_index;
1265 }
1266
1267 if (n_ok < n_descriptors_to_tx)
1268 {
1269 n_tail_drop = n_descriptors_to_tx - n_ok;
1270 vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001271 vlib_error_count (vm, ixge_input_node.index,
1272 IXGE_ERROR_tx_full_drops, n_tail_drop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001273 }
1274
1275 n_descriptors_to_tx = n_ok;
1276 }
1277
1278 dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1279
1280 /* Process from tail to end of descriptor ring. */
1281 if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1282 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001283 u32 n =
1284 clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
Damjan Marionb4d89272016-05-12 22:14:45 +02001285 n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1286 from += n;
1287 n_descriptors_to_tx -= n;
1288 dq->tail_index += n;
1289 ASSERT (dq->tail_index <= dq->n_descriptors);
1290 if (dq->tail_index == dq->n_descriptors)
1291 dq->tail_index = 0;
1292 }
1293
1294 if (n_descriptors_to_tx > 0)
1295 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001296 u32 n =
1297 ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
Damjan Marionb4d89272016-05-12 22:14:45 +02001298 from += n;
1299 ASSERT (n == n_descriptors_to_tx);
1300 dq->tail_index += n;
1301 ASSERT (dq->tail_index <= dq->n_descriptors);
1302 if (dq->tail_index == dq->n_descriptors)
1303 dq->tail_index = 0;
1304 }
1305
1306 /* We should only get full packets. */
1307 ASSERT (tx_state.is_start_of_packet);
1308
1309 /* Report status when last descriptor is done. */
1310 {
1311 u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001312 ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001313 d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
1314 }
1315
1316 /* Give new descriptors to hardware. */
1317 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001318 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001319
1320 CLIB_MEMORY_BARRIER ();
1321
1322 dr->tail_index = dq->tail_index;
1323 }
1324
1325 /* Free any buffers that are done. */
1326 {
1327 u32 n = _vec_len (xm->tx_buffers_pending_free);
1328 if (n > 0)
1329 {
1330 vlib_buffer_free_no_next (vm, xm->tx_buffers_pending_free, n);
1331 _vec_len (xm->tx_buffers_pending_free) = 0;
1332 ASSERT (dq->tx.n_buffers_on_ring >= n);
1333 dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1334 }
1335 }
1336
1337 return f->n_vectors;
1338}
1339
1340static uword
1341ixge_rx_queue_no_wrap (ixge_main_t * xm,
1342 ixge_device_t * xd,
1343 ixge_dma_queue_t * dq,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001344 u32 start_descriptor_index, u32 n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +02001345{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001346 vlib_main_t *vm = xm->vlib_main;
1347 vlib_node_runtime_t *node = dq->rx.node;
1348 ixge_descriptor_t *d;
1349 static ixge_descriptor_t *d_trace_save;
1350 static u32 *d_trace_buffers;
Damjan Marionb4d89272016-05-12 22:14:45 +02001351 u32 n_descriptors_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001352 u32 *to_rx =
1353 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1354 u32 *to_add;
Damjan Marionb4d89272016-05-12 22:14:45 +02001355 u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1356 u32 bi_last = dq->rx.saved_last_buffer_index;
1357 u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1358 u32 is_sop = dq->rx.is_start_of_packet;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001359 u32 next_index, n_left_to_next, *to_next;
Damjan Marionb4d89272016-05-12 22:14:45 +02001360 u32 n_packets = 0;
1361 u32 n_bytes = 0;
1362 u32 n_trace = vlib_get_trace_count (vm, node);
Dave Barach11fb09e2020-08-06 12:10:09 -04001363 vlib_buffer_t *b_last, b_placeholder;
Damjan Marionb4d89272016-05-12 22:14:45 +02001364
1365 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1366 d = &dq->descriptors[start_descriptor_index];
1367
Dave Barach11fb09e2020-08-06 12:10:09 -04001368 b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_placeholder;
Damjan Marionb4d89272016-05-12 22:14:45 +02001369 next_index = dq->rx.next_index;
1370
1371 if (n_trace > 0)
1372 {
1373 u32 n = clib_min (n_trace, n_descriptors);
1374 if (d_trace_save)
1375 {
1376 _vec_len (d_trace_save) = 0;
1377 _vec_len (d_trace_buffers) = 0;
1378 }
1379 vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1380 vec_add (d_trace_buffers, to_rx, n);
1381 }
1382
1383 {
1384 uword l = vec_len (xm->rx_buffers_to_add);
1385
1386 if (l < n_descriptors_left)
1387 {
1388 u32 n_to_alloc = 2 * dq->n_descriptors - l;
1389 u32 n_allocated;
1390
1391 vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1392
1393 _vec_len (xm->rx_buffers_to_add) = l;
Damjan Marioncef87f12017-10-05 15:32:41 +02001394 n_allocated =
1395 vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
Damjan Marionb4d89272016-05-12 22:14:45 +02001396 _vec_len (xm->rx_buffers_to_add) += n_allocated;
1397
Damjan Marion00a9dca2016-08-17 17:05:46 +02001398 /* Handle transient allocation failure */
1399 if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
Damjan Marionb4d89272016-05-12 22:14:45 +02001400 {
1401 if (n_allocated == 0)
1402 vlib_error_count (vm, ixge_input_node.index,
1403 IXGE_ERROR_rx_alloc_no_physmem, 1);
1404 else
1405 vlib_error_count (vm, ixge_input_node.index,
1406 IXGE_ERROR_rx_alloc_fail, 1);
1407
1408 n_descriptors_left = l + n_allocated;
1409 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001410 n_descriptors = n_descriptors_left;
Damjan Marionb4d89272016-05-12 22:14:45 +02001411 }
1412
1413 /* Add buffers from end of vector going backwards. */
1414 to_add = vec_end (xm->rx_buffers_to_add) - 1;
1415 }
1416
1417 while (n_descriptors_left > 0)
1418 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001419 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Damjan Marionb4d89272016-05-12 22:14:45 +02001420
1421 while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1422 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001423 vlib_buffer_t *b0, *b1;
Damjan Marion8f499362018-10-22 13:07:02 +02001424 vlib_buffer_t *f0, *f1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001425 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1426 u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1427 u8 is_eop0, error0, next0;
1428 u8 is_eop1, error1, next1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001429 ixge_descriptor_t d0, d1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001430
1431 vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1432 vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1433
Damjan Marion00a9dca2016-08-17 17:05:46 +02001434 CLIB_PREFETCH (d + 2, 32, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001435
Damjan Marion00a9dca2016-08-17 17:05:46 +02001436 d0.as_u32x4 = d[0].as_u32x4;
1437 d1.as_u32x4 = d[1].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001438
1439 s20 = d0.rx_from_hw.status[2];
1440 s21 = d1.rx_from_hw.status[2];
1441
1442 s00 = d0.rx_from_hw.status[0];
1443 s01 = d1.rx_from_hw.status[0];
1444
Damjan Marion00a9dca2016-08-17 17:05:46 +02001445 if (!
1446 ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001447 goto found_hw_owned_descriptor_x2;
1448
1449 bi0 = to_rx[0];
1450 bi1 = to_rx[1];
1451
1452 ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1453 fi0 = to_add[0];
1454 fi1 = to_add[-1];
1455
1456 to_rx[0] = fi0;
1457 to_rx[1] = fi1;
1458 to_rx += 2;
1459 to_add -= 2;
1460
Damjan Marioncef87f12017-10-05 15:32:41 +02001461#if 0
Steven899a84b2018-01-29 20:09:09 -08001462 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0));
1463 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi1));
1464 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0));
1465 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi1));
Damjan Marioncef87f12017-10-05 15:32:41 +02001466#endif
Damjan Marionb4d89272016-05-12 22:14:45 +02001467
1468 b0 = vlib_get_buffer (vm, bi0);
1469 b1 = vlib_get_buffer (vm, bi1);
1470
Damjan Marion00a9dca2016-08-17 17:05:46 +02001471 /*
1472 * Turn this on if you run into
1473 * "bad monkey" contexts, and you want to know exactly
1474 * which nodes they've visited... See main.c...
1475 */
1476 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1477 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001478
1479 CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
1480 CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
1481
1482 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1483 is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1484
1485 ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1486 &next0, &error0, &flags0,
1487 &next1, &error1, &flags1);
1488
1489 next0 = is_sop ? next0 : next_index_sop;
1490 next1 = is_eop0 ? next1 : next0;
1491 next_index_sop = next1;
1492
1493 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1494 b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1495
1496 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1497 vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001498 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1499 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001500
1501 b0->error = node->errors[error0];
1502 b1->error = node->errors[error1];
1503
1504 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1505 len1 = d1.rx_from_hw.n_packet_bytes_this_descriptor;
1506 n_bytes += len0 + len1;
1507 n_packets += is_eop0 + is_eop1;
1508
1509 /* Give new buffers to hardware. */
Damjan Marion8f499362018-10-22 13:07:02 +02001510 f0 = vlib_get_buffer (vm, fi0);
1511 f1 = vlib_get_buffer (vm, fi1);
1512 d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0);
1513 d1.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001514 d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
1515 d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001516 d[0].as_u32x4 = d0.as_u32x4;
1517 d[1].as_u32x4 = d1.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001518
1519 d += 2;
1520 n_descriptors_left -= 2;
1521
1522 /* Point to either l2 or l3 header depending on next. */
1523 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001524 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001525 l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001526 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001527
1528 b0->current_length = len0 - l3_offset0;
1529 b1->current_length = len1 - l3_offset1;
1530 b0->current_data = l3_offset0;
1531 b1->current_data = l3_offset1;
1532
1533 b_last->next_buffer = is_sop ? ~0 : bi0;
1534 b0->next_buffer = is_eop0 ? ~0 : bi1;
1535 bi_last = bi1;
1536 b_last = b1;
1537
1538 if (CLIB_DEBUG > 0)
1539 {
1540 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1541 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1542
1543 if (is_eop0)
1544 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001545 u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1546 /* follow_buffer_next */ 1);
1547 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001548 }
1549 if (is_eop1)
1550 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001551 u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1552 /* follow_buffer_next */ 1);
1553 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001554 }
1555 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001556 if (0) /* "Dave" version */
1557 {
1558 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1559 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001560
Damjan Marion00a9dca2016-08-17 17:05:46 +02001561 if (is_eop0)
1562 {
1563 to_next[0] = bi_sop0;
1564 to_next++;
1565 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001566
Damjan Marion00a9dca2016-08-17 17:05:46 +02001567 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1568 to_next, n_left_to_next,
1569 bi_sop0, next0);
1570 }
1571 if (is_eop1)
1572 {
1573 to_next[0] = bi_sop1;
1574 to_next++;
1575 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001576
Damjan Marion00a9dca2016-08-17 17:05:46 +02001577 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1578 to_next, n_left_to_next,
1579 bi_sop1, next1);
1580 }
1581 is_sop = is_eop1;
1582 bi_sop = bi_sop1;
1583 }
1584 if (1) /* "Eliot" version */
1585 {
1586 /* Speculatively enqueue to cached next. */
1587 u8 saved_is_sop = is_sop;
1588 u32 bi_sop_save = bi_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001589
Damjan Marion00a9dca2016-08-17 17:05:46 +02001590 bi_sop = saved_is_sop ? bi0 : bi_sop;
1591 to_next[0] = bi_sop;
1592 to_next += is_eop0;
1593 n_left_to_next -= is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001594
Damjan Marion00a9dca2016-08-17 17:05:46 +02001595 bi_sop = is_eop0 ? bi1 : bi_sop;
1596 to_next[0] = bi_sop;
1597 to_next += is_eop1;
1598 n_left_to_next -= is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001599
Damjan Marion00a9dca2016-08-17 17:05:46 +02001600 is_sop = is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001601
Damjan Marion00a9dca2016-08-17 17:05:46 +02001602 if (PREDICT_FALSE
1603 (!(next0 == next_index && next1 == next_index)))
1604 {
1605 /* Undo speculation. */
1606 to_next -= is_eop0 + is_eop1;
1607 n_left_to_next += is_eop0 + is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001608
Damjan Marion00a9dca2016-08-17 17:05:46 +02001609 /* Re-do both descriptors being careful about where we enqueue. */
1610 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1611 if (is_eop0)
1612 {
1613 if (next0 != next_index)
1614 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1615 else
1616 {
1617 to_next[0] = bi_sop;
1618 to_next += 1;
1619 n_left_to_next -= 1;
1620 }
1621 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001622
Damjan Marion00a9dca2016-08-17 17:05:46 +02001623 bi_sop = is_eop0 ? bi1 : bi_sop;
1624 if (is_eop1)
1625 {
1626 if (next1 != next_index)
1627 vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1628 else
1629 {
1630 to_next[0] = bi_sop;
1631 to_next += 1;
1632 n_left_to_next -= 1;
1633 }
1634 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001635
Damjan Marion00a9dca2016-08-17 17:05:46 +02001636 /* Switch cached next index when next for both packets is the same. */
1637 if (is_eop0 && is_eop1 && next0 == next1)
1638 {
1639 vlib_put_next_frame (vm, node, next_index,
1640 n_left_to_next);
1641 next_index = next0;
1642 vlib_get_next_frame (vm, node, next_index,
1643 to_next, n_left_to_next);
1644 }
1645 }
1646 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001647 }
1648
Damjan Marion00a9dca2016-08-17 17:05:46 +02001649 /* Bail out of dual loop and proceed with single loop. */
Damjan Marionb4d89272016-05-12 22:14:45 +02001650 found_hw_owned_descriptor_x2:
1651
1652 while (n_descriptors_left > 0 && n_left_to_next > 0)
1653 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001654 vlib_buffer_t *b0;
Damjan Marion8f499362018-10-22 13:07:02 +02001655 vlib_buffer_t *f0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001656 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1657 u8 is_eop0, error0, next0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001658 ixge_descriptor_t d0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001659
Damjan Marion00a9dca2016-08-17 17:05:46 +02001660 d0.as_u32x4 = d[0].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001661
1662 s20 = d0.rx_from_hw.status[2];
1663 s00 = d0.rx_from_hw.status[0];
1664
Damjan Marion00a9dca2016-08-17 17:05:46 +02001665 if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001666 goto found_hw_owned_descriptor_x1;
1667
1668 bi0 = to_rx[0];
1669 ASSERT (to_add >= xm->rx_buffers_to_add);
1670 fi0 = to_add[0];
1671
1672 to_rx[0] = fi0;
1673 to_rx += 1;
1674 to_add -= 1;
1675
Damjan Marioncef87f12017-10-05 15:32:41 +02001676#if 0
Steven899a84b2018-01-29 20:09:09 -08001677 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0));
1678 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0));
Damjan Marioncef87f12017-10-05 15:32:41 +02001679#endif
Damjan Marionb4d89272016-05-12 22:14:45 +02001680
1681 b0 = vlib_get_buffer (vm, bi0);
1682
Damjan Marion00a9dca2016-08-17 17:05:46 +02001683 /*
1684 * Turn this on if you run into
1685 * "bad monkey" contexts, and you want to know exactly
1686 * which nodes they've visited...
1687 */
1688 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001689
1690 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1691 ixge_rx_next_and_error_from_status_x1
Damjan Marion00a9dca2016-08-17 17:05:46 +02001692 (xd, s00, s20, &next0, &error0, &flags0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001693
1694 next0 = is_sop ? next0 : next_index_sop;
1695 next_index_sop = next0;
1696
1697 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1698
1699 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001700 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001701
1702 b0->error = node->errors[error0];
1703
1704 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1705 n_bytes += len0;
1706 n_packets += is_eop0;
1707
1708 /* Give new buffer to hardware. */
Damjan Marion8f499362018-10-22 13:07:02 +02001709 f0 = vlib_get_buffer (vm, fi0);
1710 d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001711 d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001712 d[0].as_u32x4 = d0.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001713
1714 d += 1;
1715 n_descriptors_left -= 1;
1716
1717 /* Point to either l2 or l3 header depending on next. */
1718 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001719 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001720 b0->current_length = len0 - l3_offset0;
1721 b0->current_data = l3_offset0;
1722
1723 b_last->next_buffer = is_sop ? ~0 : bi0;
1724 bi_last = bi0;
1725 b_last = b0;
1726
1727 bi_sop = is_sop ? bi0 : bi_sop;
1728
1729 if (CLIB_DEBUG > 0 && is_eop0)
1730 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001731 u8 *msg =
1732 vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1733 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001734 }
1735
Damjan Marion00a9dca2016-08-17 17:05:46 +02001736 if (0) /* "Dave" version */
1737 {
1738 if (is_eop0)
1739 {
1740 to_next[0] = bi_sop;
1741 to_next++;
1742 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001743
Damjan Marion00a9dca2016-08-17 17:05:46 +02001744 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1745 to_next, n_left_to_next,
1746 bi_sop, next0);
1747 }
1748 }
1749 if (1) /* "Eliot" version */
1750 {
1751 if (PREDICT_TRUE (next0 == next_index))
1752 {
1753 to_next[0] = bi_sop;
1754 to_next += is_eop0;
1755 n_left_to_next -= is_eop0;
1756 }
1757 else
1758 {
1759 if (next0 != next_index && is_eop0)
1760 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001761
Damjan Marion00a9dca2016-08-17 17:05:46 +02001762 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1763 next_index = next0;
1764 vlib_get_next_frame (vm, node, next_index,
1765 to_next, n_left_to_next);
1766 }
1767 }
1768 is_sop = is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001769 }
1770 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1771 }
1772
Damjan Marion00a9dca2016-08-17 17:05:46 +02001773found_hw_owned_descriptor_x1:
Damjan Marionb4d89272016-05-12 22:14:45 +02001774 if (n_descriptors_left > 0)
1775 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1776
1777 _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1778
1779 {
1780 u32 n_done = n_descriptors - n_descriptors_left;
1781
1782 if (n_trace > 0 && n_done > 0)
1783 {
1784 u32 n = clib_min (n_trace, n_done);
1785 ixge_rx_trace (xm, xd, dq,
1786 d_trace_save,
1787 d_trace_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001788 &dq->descriptors[start_descriptor_index], n);
Damjan Marionb4d89272016-05-12 22:14:45 +02001789 vlib_set_trace_count (vm, node, n_trace - n);
1790 }
1791 if (d_trace_save)
1792 {
1793 _vec_len (d_trace_save) = 0;
1794 _vec_len (d_trace_buffers) = 0;
1795 }
1796
1797 /* Don't keep a reference to b_last if we don't have to.
1798 Otherwise we can over-write a next_buffer pointer after already haven
1799 enqueued a packet. */
1800 if (is_sop)
1801 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001802 b_last->next_buffer = ~0;
1803 bi_last = ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001804 }
1805
1806 dq->rx.n_descriptors_done_this_call = n_done;
1807 dq->rx.n_descriptors_done_total += n_done;
1808 dq->rx.is_start_of_packet = is_sop;
1809 dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1810 dq->rx.saved_last_buffer_index = bi_last;
1811 dq->rx.saved_start_of_packet_next_index = next_index_sop;
1812 dq->rx.next_index = next_index;
1813 dq->rx.n_bytes += n_bytes;
1814
1815 return n_packets;
1816 }
1817}
1818
1819static uword
1820ixge_rx_queue (ixge_main_t * xm,
1821 ixge_device_t * xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001822 vlib_node_runtime_t * node, u32 queue_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02001823{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001824 ixge_dma_queue_t *dq =
1825 vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1826 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001827 uword n_packets = 0;
1828 u32 hw_head_index, sw_head_index;
1829
1830 /* One time initialization. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02001831 if (!dq->rx.node)
Damjan Marionb4d89272016-05-12 22:14:45 +02001832 {
1833 dq->rx.node = node;
1834 dq->rx.is_start_of_packet = 1;
1835 dq->rx.saved_start_of_packet_buffer_index = ~0;
1836 dq->rx.saved_last_buffer_index = ~0;
1837 }
1838
1839 dq->rx.next_index = node->cached_next_index;
1840
1841 dq->rx.n_descriptors_done_total = 0;
1842 dq->rx.n_descriptors_done_this_call = 0;
1843 dq->rx.n_bytes = 0;
1844
1845 /* Fetch head from hardware and compare to where we think we are. */
1846 hw_head_index = dr->head_index;
1847 sw_head_index = dq->head_index;
1848
1849 if (hw_head_index == sw_head_index)
1850 goto done;
1851
1852 if (hw_head_index < sw_head_index)
1853 {
1854 u32 n_tried = dq->n_descriptors - sw_head_index;
1855 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001856 sw_head_index =
1857 ixge_ring_add (dq, sw_head_index,
1858 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001859
1860 if (dq->rx.n_descriptors_done_this_call != n_tried)
1861 goto done;
1862 }
1863 if (hw_head_index >= sw_head_index)
1864 {
1865 u32 n_tried = hw_head_index - sw_head_index;
1866 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001867 sw_head_index =
1868 ixge_ring_add (dq, sw_head_index,
1869 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001870 }
1871
Damjan Marion00a9dca2016-08-17 17:05:46 +02001872done:
Damjan Marionb4d89272016-05-12 22:14:45 +02001873 dq->head_index = sw_head_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001874 dq->tail_index =
1875 ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
Damjan Marionb4d89272016-05-12 22:14:45 +02001876
1877 /* Give tail back to hardware. */
1878 CLIB_MEMORY_BARRIER ();
1879
1880 dr->tail_index = dq->tail_index;
1881
Damjan Marion00a9dca2016-08-17 17:05:46 +02001882 vlib_increment_combined_counter (vnet_main.
1883 interface_main.combined_sw_if_counters +
1884 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001885 0 /* thread_index */ ,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001886 xd->vlib_sw_if_index, n_packets,
Damjan Marionb4d89272016-05-12 22:14:45 +02001887 dq->rx.n_bytes);
1888
1889 return n_packets;
1890}
1891
Damjan Marion00a9dca2016-08-17 17:05:46 +02001892static void
1893ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
Damjan Marionb4d89272016-05-12 22:14:45 +02001894{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001895 vlib_main_t *vm = xm->vlib_main;
1896 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02001897
1898 if (i != 20)
1899 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001900 ELOG_TYPE_DECLARE (e) =
1901 {
1902 .function = (char *) __FUNCTION__,.format =
1903 "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1904 16,.enum_strings =
1905 {
1906 "flow director",
1907 "rx miss",
1908 "pci exception",
1909 "mailbox",
1910 "link status change",
1911 "linksec key exchange",
1912 "manageability event",
1913 "reserved23",
1914 "sdp0",
1915 "sdp1",
1916 "sdp2",
1917 "sdp3",
1918 "ecc", "descriptor handler error", "tcp timer", "other",},};
1919 struct
1920 {
1921 u8 instance;
1922 u8 index;
1923 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001924 ed = ELOG_DATA (&vm->elog_main, e);
1925 ed->instance = xd->device_index;
1926 ed->index = i - 16;
1927 }
1928 else
1929 {
1930 u32 v = r->xge_mac.link_status;
1931 uword is_up = (v & (1 << 30)) != 0;
1932
Damjan Marion00a9dca2016-08-17 17:05:46 +02001933 ELOG_TYPE_DECLARE (e) =
1934 {
1935 .function = (char *) __FUNCTION__,.format =
1936 "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1937 struct
1938 {
1939 u32 instance, link_status;
1940 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001941 ed = ELOG_DATA (&vm->elog_main, e);
1942 ed->instance = xd->device_index;
1943 ed->link_status = v;
1944 xd->link_status_at_last_link_change = v;
1945
1946 vlib_process_signal_event (vm, ixge_process_node.index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001947 EVENT_SET_FLAGS,
1948 ((is_up << 31) | xd->vlib_hw_if_index));
Damjan Marionb4d89272016-05-12 22:14:45 +02001949 }
1950}
1951
1952always_inline u32
1953clean_block (u32 * b, u32 * t, u32 n_left)
1954{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001955 u32 *t0 = t;
Damjan Marionb4d89272016-05-12 22:14:45 +02001956
1957 while (n_left >= 4)
1958 {
1959 u32 bi0, bi1, bi2, bi3;
1960
1961 t[0] = bi0 = b[0];
1962 b[0] = 0;
1963 t += bi0 != 0;
1964
1965 t[0] = bi1 = b[1];
1966 b[1] = 0;
1967 t += bi1 != 0;
1968
1969 t[0] = bi2 = b[2];
1970 b[2] = 0;
1971 t += bi2 != 0;
1972
1973 t[0] = bi3 = b[3];
1974 b[3] = 0;
1975 t += bi3 != 0;
1976
1977 b += 4;
1978 n_left -= 4;
1979 }
1980
1981 while (n_left > 0)
1982 {
1983 u32 bi0;
1984
1985 t[0] = bi0 = b[0];
1986 b[0] = 0;
1987 t += bi0 != 0;
1988 b += 1;
1989 n_left -= 1;
1990 }
1991
1992 return t - t0;
1993}
1994
1995static void
1996ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1997{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001998 vlib_main_t *vm = xm->vlib_main;
1999 ixge_dma_queue_t *dq =
2000 vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2001 u32 n_clean, *b, *t, *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002002 i32 n_hw_owned_descriptors;
2003 i32 first_to_clean, last_to_clean;
2004 u64 hwbp_race = 0;
2005
2006 /* Handle case where head write back pointer update
2007 * arrives after the interrupt during high PCI bus loads.
2008 */
2009 while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2010 dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2011 {
2012 hwbp_race++;
2013 if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2014 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002015 ELOG_TYPE_DECLARE (e) =
2016 {
2017 .function = (char *) __FUNCTION__,.format =
2018 "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2019 = "i4i4i4i4",};
2020 struct
2021 {
2022 u32 instance, head_index, tail_index, n_buffers_on_ring;
2023 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02002024 ed = ELOG_DATA (&vm->elog_main, e);
2025 ed->instance = xd->device_index;
2026 ed->head_index = dq->head_index;
2027 ed->tail_index = dq->tail_index;
2028 ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2029 }
2030 }
2031
2032 dq->head_index = dq->tx.head_index_write_back[0];
2033 n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002034 ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002035 n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2036
2037 if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2038 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002039 ELOG_TYPE_DECLARE (e) =
2040 {
2041 .function = (char *) __FUNCTION__,.format =
2042 "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2043 = "i4i4i4i4i4",};
2044 struct
2045 {
2046 u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2047 } *ed;
2048 ed = ELOG_DATA (&vm->elog_main, e);
2049 ed->instance = xd->device_index;
2050 ed->head_index = dq->head_index;
2051 ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2052 ed->n_clean = n_clean;
2053 ed->retries = hwbp_race;
Damjan Marionb4d89272016-05-12 22:14:45 +02002054 }
2055
2056 /*
2057 * This function used to wait until hardware owned zero descriptors.
2058 * At high PPS rates, that doesn't happen until the TX ring is
2059 * completely full of descriptors which need to be cleaned up.
2060 * That, in turn, causes TX ring-full drops and/or long RX service
2061 * interruptions.
2062 */
2063 if (n_clean == 0)
2064 return;
2065
2066 /* Clean the n_clean descriptors prior to the reported hardware head */
2067 last_to_clean = dq->head_index - 1;
2068 last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002069 last_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002070
2071 first_to_clean = (last_to_clean) - (n_clean - 1);
2072 first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002073 first_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002074
2075 vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2076 t0 = t = xm->tx_buffers_pending_free;
2077 b = dq->descriptor_buffer_indices + first_to_clean;
2078
2079 /* Wrap case: clean from first to end, then start to last */
2080 if (first_to_clean > last_to_clean)
2081 {
2082 t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2083 first_to_clean = 0;
2084 b = dq->descriptor_buffer_indices;
2085 }
2086
2087 /* Typical case: clean from first to last */
2088 if (first_to_clean <= last_to_clean)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002089 t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
Damjan Marionb4d89272016-05-12 22:14:45 +02002090
2091 if (t > t0)
2092 {
2093 u32 n = t - t0;
2094 vlib_buffer_free_no_next (vm, t0, n);
2095 ASSERT (dq->tx.n_buffers_on_ring >= n);
2096 dq->tx.n_buffers_on_ring -= n;
2097 _vec_len (xm->tx_buffers_pending_free) = 0;
2098 }
2099}
2100
2101/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002102always_inline uword
2103ixge_interrupt_is_rx_queue (uword i)
2104{
2105 return i < 8;
2106}
Damjan Marionb4d89272016-05-12 22:14:45 +02002107
Damjan Marion00a9dca2016-08-17 17:05:46 +02002108always_inline uword
2109ixge_interrupt_is_tx_queue (uword i)
2110{
2111 return i >= 8 && i < 16;
2112}
Damjan Marionb4d89272016-05-12 22:14:45 +02002113
Damjan Marion00a9dca2016-08-17 17:05:46 +02002114always_inline uword
2115ixge_tx_queue_to_interrupt (uword i)
2116{
2117 return 8 + i;
2118}
Damjan Marionb4d89272016-05-12 22:14:45 +02002119
Damjan Marion00a9dca2016-08-17 17:05:46 +02002120always_inline uword
2121ixge_rx_queue_to_interrupt (uword i)
2122{
2123 return 0 + i;
2124}
Damjan Marionb4d89272016-05-12 22:14:45 +02002125
Damjan Marion00a9dca2016-08-17 17:05:46 +02002126always_inline uword
2127ixge_interrupt_rx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002128{
2129 ASSERT (ixge_interrupt_is_rx_queue (i));
2130 return i - 0;
2131}
2132
Damjan Marion00a9dca2016-08-17 17:05:46 +02002133always_inline uword
2134ixge_interrupt_tx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002135{
2136 ASSERT (ixge_interrupt_is_tx_queue (i));
2137 return i - 8;
2138}
2139
2140static uword
2141ixge_device_input (ixge_main_t * xm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002142 ixge_device_t * xd, vlib_node_runtime_t * node)
Damjan Marionb4d89272016-05-12 22:14:45 +02002143{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002144 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002145 u32 i, s;
2146 uword n_rx_packets = 0;
2147
2148 s = r->interrupt.status_write_1_to_set;
2149 if (s)
2150 r->interrupt.status_write_1_to_clear = s;
2151
Damjan Marion00a9dca2016-08-17 17:05:46 +02002152 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002153 foreach_set_bit (i, s, ({
2154 if (ixge_interrupt_is_rx_queue (i))
2155 n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2156
2157 else if (ixge_interrupt_is_tx_queue (i))
2158 ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2159
2160 else
2161 ixge_interrupt (xm, xd, i);
2162 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002163 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002164
2165 return n_rx_packets;
2166}
2167
2168static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002169ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002170{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002171 ixge_main_t *xm = &ixge_main;
2172 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002173 uword n_rx_packets = 0;
2174
2175 if (node->state == VLIB_NODE_STATE_INTERRUPT)
2176 {
2177 uword i;
2178
2179 /* Loop over devices with interrupts. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002180 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002181 foreach_set_bit (i, node->runtime_data[0], ({
2182 xd = vec_elt_at_index (xm->devices, i);
2183 n_rx_packets += ixge_device_input (xm, xd, node);
2184
2185 /* Re-enable interrupts since we're going to stay in interrupt mode. */
2186 if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2187 xd->regs->interrupt.enable_write_1_to_set = ~0;
2188 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002189 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002190
2191 /* Clear mask of devices with pending interrupts. */
2192 node->runtime_data[0] = 0;
2193 }
2194 else
2195 {
2196 /* Poll all devices for input/interrupts. */
2197 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002198 {
2199 n_rx_packets += ixge_device_input (xm, xd, node);
Damjan Marionb4d89272016-05-12 22:14:45 +02002200
Damjan Marion00a9dca2016-08-17 17:05:46 +02002201 /* Re-enable interrupts when switching out of polling mode. */
2202 if (node->flags &
2203 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
2204 xd->regs->interrupt.enable_write_1_to_set = ~0;
2205 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002206 }
2207
2208 return n_rx_packets;
2209}
2210
Damjan Marion00a9dca2016-08-17 17:05:46 +02002211static char *ixge_error_strings[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002212#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002213 foreach_ixge_error
Damjan Marionb4d89272016-05-12 22:14:45 +02002214#undef _
2215};
2216
Damjan Marion00a9dca2016-08-17 17:05:46 +02002217/* *INDENT-OFF* */
Damjan Marion98897e22016-06-17 16:42:02 +02002218VLIB_REGISTER_NODE (ixge_input_node, static) = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002219 .function = ixge_input,
2220 .type = VLIB_NODE_TYPE_INPUT,
2221 .name = "ixge-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02002222 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Damjan Marionb4d89272016-05-12 22:14:45 +02002223
2224 /* Will be enabled if/when hardware is detected. */
2225 .state = VLIB_NODE_STATE_DISABLED,
2226
2227 .format_buffer = format_ethernet_header_with_length,
2228 .format_trace = format_ixge_rx_dma_trace,
2229
2230 .n_errors = IXGE_N_ERROR,
2231 .error_strings = ixge_error_strings,
2232
2233 .n_next_nodes = IXGE_RX_N_NEXT,
2234 .next_nodes = {
2235 [IXGE_RX_NEXT_DROP] = "error-drop",
2236 [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2237 [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2238 [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2239 },
2240};
2241
Damjan Marion00a9dca2016-08-17 17:05:46 +02002242/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002243
Damjan Marion00a9dca2016-08-17 17:05:46 +02002244static u8 *
2245format_ixge_device_name (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002246{
Damjan Marion23227982018-10-22 13:38:57 +02002247 vlib_main_t *vm = vlib_get_main ();
Damjan Marionb4d89272016-05-12 22:14:45 +02002248 u32 i = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002249 ixge_main_t *xm = &ixge_main;
2250 ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
Damjan Marion23227982018-10-22 13:38:57 +02002251 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
Damjan Marioncef87f12017-10-05 15:32:41 +02002252 return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2253 addr->domain, addr->bus, addr->slot, addr->function);
Damjan Marionb4d89272016-05-12 22:14:45 +02002254}
2255
2256#define IXGE_COUNTER_IS_64_BIT (1 << 0)
2257#define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2258
2259static u8 ixge_counter_flags[] = {
2260#define _(a,f) 0,
2261#define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2262 foreach_ixge_counter
2263#undef _
2264#undef _64
2265};
2266
Damjan Marion00a9dca2016-08-17 17:05:46 +02002267static void
2268ixge_update_counters (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +02002269{
2270 /* Byte offset for counter registers. */
2271 static u32 reg_offsets[] = {
2272#define _(a,f) (a) / sizeof (u32),
2273#define _64(a,f) _(a,f)
2274 foreach_ixge_counter
2275#undef _
2276#undef _64
2277 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002278 volatile u32 *r = (volatile u32 *) xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002279 int i;
2280
2281 for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2282 {
2283 u32 o = reg_offsets[i];
2284 xd->counters[i] += r[o];
2285 if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
2286 r[o] = 0;
2287 if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002288 xd->counters[i] += (u64) r[o + 1] << (u64) 32;
Damjan Marionb4d89272016-05-12 22:14:45 +02002289 }
2290}
2291
Damjan Marion00a9dca2016-08-17 17:05:46 +02002292static u8 *
2293format_ixge_device_id (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002294{
2295 u32 device_id = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002296 char *t = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002297 switch (device_id)
2298 {
2299#define _(f,n) case n: t = #f; break;
2300 foreach_ixge_pci_device_id;
2301#undef _
2302 default:
2303 t = 0;
2304 break;
2305 }
2306 if (t == 0)
2307 s = format (s, "unknown 0x%x", device_id);
2308 else
2309 s = format (s, "%s", t);
2310 return s;
2311}
2312
Damjan Marion00a9dca2016-08-17 17:05:46 +02002313static u8 *
2314format_ixge_link_status (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002315{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002316 ixge_device_t *xd = va_arg (*args, ixge_device_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +02002317 u32 v = xd->link_status_at_last_link_change;
2318
2319 s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2320
2321 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002322 char *modes[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002323 "1g", "10g parallel", "10g serial", "autoneg",
2324 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002325 char *speeds[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002326 "unknown", "100m", "1g", "10g",
2327 };
2328 s = format (s, ", mode %s, speed %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002329 modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002330 }
2331
2332 return s;
2333}
2334
Damjan Marion00a9dca2016-08-17 17:05:46 +02002335static u8 *
2336format_ixge_device (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002337{
2338 u32 dev_instance = va_arg (*args, u32);
2339 CLIB_UNUSED (int verbose) = va_arg (*args, int);
Damjan Marion23227982018-10-22 13:38:57 +02002340 vlib_main_t *vm = vlib_get_main ();
Damjan Marion00a9dca2016-08-17 17:05:46 +02002341 ixge_main_t *xm = &ixge_main;
2342 ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2343 ixge_phy_t *phy = xd->phys + xd->phy_index;
Christophe Fontained3c008d2017-10-02 18:10:54 +02002344 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +02002345
2346 ixge_update_counters (xd);
2347 xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status;
2348
2349 s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2350 format_ixge_device_id, xd->device_id,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002351 format_white_space, indent + 2, format_ixge_link_status, xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002352
2353 {
2354
Damjan Marion23227982018-10-22 13:38:57 +02002355 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2356 vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0);
Damjan Marioncef87f12017-10-05 15:32:41 +02002357
2358 if (d)
2359 s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2360 format_vlib_pci_link_speed, d);
Damjan Marionb4d89272016-05-12 22:14:45 +02002361 }
2362
2363 s = format (s, "\n%U", format_white_space, indent + 2);
2364 if (phy->mdio_address != ~0)
2365 s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
Damjan Marionc45e1902018-09-24 15:17:36 +02002366 else if (xd->sfp_eeprom.id == SFP_ID_SFP)
Damjan Marionb4d89272016-05-12 22:14:45 +02002367 s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2368 else
2369 s = format (s, "PHY not found");
2370
2371 /* FIXME */
2372 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002373 ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
2374 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002375 u32 hw_head_index = dr->head_index;
2376 u32 sw_head_index = dq->head_index;
2377 u32 nitems;
2378
2379 nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2380 s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002381 format_white_space, indent + 2, nitems, dq->n_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002382
2383 s = format (s, "\n%U%d buffers in driver rx cache",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002384 format_white_space, indent + 2,
2385 vec_len (xm->rx_buffers_to_add));
Damjan Marionb4d89272016-05-12 22:14:45 +02002386
2387 s = format (s, "\n%U%d buffers on tx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002388 format_white_space, indent + 2,
2389 xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
Damjan Marionb4d89272016-05-12 22:14:45 +02002390 }
2391 {
2392 u32 i;
2393 u64 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002394 static char *names[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002395#define _(a,f) #f,
2396#define _64(a,f) _(a,f)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002397 foreach_ixge_counter
Damjan Marionb4d89272016-05-12 22:14:45 +02002398#undef _
2399#undef _64
2400 };
2401
2402 for (i = 0; i < ARRAY_LEN (names); i++)
2403 {
2404 v = xd->counters[i] - xd->counters_last_clear[i];
2405 if (v != 0)
2406 s = format (s, "\n%U%-40U%16Ld",
2407 format_white_space, indent + 2,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002408 format_c_identifier, names[i], v);
Damjan Marionb4d89272016-05-12 22:14:45 +02002409 }
2410 }
2411
2412 return s;
2413}
2414
Damjan Marion00a9dca2016-08-17 17:05:46 +02002415static void
2416ixge_clear_hw_interface_counters (u32 instance)
Damjan Marionb4d89272016-05-12 22:14:45 +02002417{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002418 ixge_main_t *xm = &ixge_main;
2419 ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002420 ixge_update_counters (xd);
2421 memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2422}
2423
2424/*
2425 * Dynamically redirect all pkts from a specific interface
2426 * to the specified node
2427 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002428static void
2429ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
2430 u32 node_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002431{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002432 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002433 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002434 ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002435
2436 /* Shut off redirection */
2437 if (node_index == ~0)
2438 {
2439 xd->per_interface_next_index = node_index;
2440 return;
2441 }
2442
2443 xd->per_interface_next_index =
2444 vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2445}
2446
2447
Damjan Marion00a9dca2016-08-17 17:05:46 +02002448/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002449VNET_DEVICE_CLASS (ixge_device_class) = {
2450 .name = "ixge",
2451 .tx_function = ixge_interface_tx,
2452 .format_device_name = format_ixge_device_name,
2453 .format_device = format_ixge_device,
2454 .format_tx_trace = format_ixge_tx_dma_trace,
2455 .clear_counters = ixge_clear_hw_interface_counters,
2456 .admin_up_down_function = ixge_interface_admin_up_down,
2457 .rx_redirect_to_node = ixge_set_interface_next_node,
2458};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002459/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002460
Damjan Marion00a9dca2016-08-17 17:05:46 +02002461#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
Damjan Marionb4d89272016-05-12 22:14:45 +02002462
2463static clib_error_t *
2464ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
2465{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002466 ixge_main_t *xm = &ixge_main;
2467 vlib_main_t *vm = xm->vlib_main;
2468 ixge_dma_queue_t *dq;
2469 clib_error_t *error = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002470
2471 vec_validate (xd->dma_queues[rt], queue_index);
2472 dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2473
Damjan Marion00a9dca2016-08-17 17:05:46 +02002474 if (!xm->n_descriptors_per_cache_line)
2475 xm->n_descriptors_per_cache_line =
2476 CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002477
Damjan Marion00a9dca2016-08-17 17:05:46 +02002478 if (!xm->n_bytes_in_rx_buffer)
Damjan Marionb4d89272016-05-12 22:14:45 +02002479 xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
2480 xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
Damjan Marionb4d89272016-05-12 22:14:45 +02002481
Damjan Marion00a9dca2016-08-17 17:05:46 +02002482 if (!xm->n_descriptors[rt])
Damjan Marionb4d89272016-05-12 22:14:45 +02002483 xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2484
2485 dq->queue_index = queue_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002486 dq->n_descriptors =
2487 round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
Damjan Marionb4d89272016-05-12 22:14:45 +02002488 dq->head_index = dq->tail_index = 0;
2489
Damjan Marion68b4da62018-09-30 18:26:20 +02002490 dq->descriptors = vlib_physmem_alloc_aligned (vm, dq->n_descriptors *
2491 sizeof (dq->descriptors[0]),
2492 128 /* per chip spec */ );
2493 if (!dq->descriptors)
2494 return vlib_physmem_last_error (vm);
Damjan Marionb4d89272016-05-12 22:14:45 +02002495
Dave Barachb7b92992018-10-17 10:38:51 -04002496 clib_memset (dq->descriptors, 0,
2497 dq->n_descriptors * sizeof (dq->descriptors[0]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002498 vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
2499
2500 if (rt == VLIB_RX)
2501 {
2502 u32 n_alloc, i;
2503
Damjan Marioncef87f12017-10-05 15:32:41 +02002504 n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2505 vec_len (dq->descriptor_buffer_indices));
Damjan Marionb4d89272016-05-12 22:14:45 +02002506 ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2507 for (i = 0; i < n_alloc; i++)
2508 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002509 dq->descriptors[i].rx_to_hw.tail_address =
Damjan Marion8f499362018-10-22 13:07:02 +02002510 vlib_buffer_get_pa
2511 (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002512 }
2513 }
2514 else
2515 {
2516 u32 i;
2517
Damjan Marion68b4da62018-09-30 18:26:20 +02002518 dq->tx.head_index_write_back =
2519 vlib_physmem_alloc (vm, CLIB_CACHE_LINE_BYTES);
2520 if (!dq->tx.head_index_write_back)
2521 return vlib_physmem_last_error (vm);
Damjan Marionb4d89272016-05-12 22:14:45 +02002522
2523 for (i = 0; i < dq->n_descriptors; i++)
2524 dq->descriptors[i].tx = xm->tx_descriptor_template;
2525
2526 vec_validate (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2527 }
2528
2529 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002530 ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002531 u64 a;
2532
Damjan Marion68b4da62018-09-30 18:26:20 +02002533 a = vlib_physmem_get_pa (vm, dq->descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002534 dr->descriptor_address[0] = a & 0xFFFFFFFF;
2535 dr->descriptor_address[1] = a >> (u64) 32;
2536 dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2537 dq->head_index = dq->tail_index = 0;
2538
2539 if (rt == VLIB_RX)
2540 {
2541 ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2542 dr->rx_split_control =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002543 ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2544 | ( /* lo free descriptor threshold (units of 64 descriptors) */
2545 (1 << 22)) | ( /* descriptor type: advanced one buffer */
2546 (1 << 25)) | ( /* drop if no descriptors available */
2547 (1 << 28)));
Damjan Marionb4d89272016-05-12 22:14:45 +02002548
2549 /* Give hardware all but last 16 cache lines' worth of descriptors. */
2550 dq->tail_index = dq->n_descriptors -
Damjan Marion00a9dca2016-08-17 17:05:46 +02002551 16 * xm->n_descriptors_per_cache_line;
Damjan Marionb4d89272016-05-12 22:14:45 +02002552 }
2553 else
2554 {
2555 /* Make sure its initialized before hardware can get to it. */
2556 dq->tx.head_index_write_back[0] = dq->head_index;
2557
Damjan Marion68b4da62018-09-30 18:26:20 +02002558 a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
Damjan Marionb4d89272016-05-12 22:14:45 +02002559 dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2560 dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2561 }
2562
2563 /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2564 and [12] undocumented set. */
2565 if (rt == VLIB_RX)
2566 dr->dca_control &= ~((1 << 13) | (1 << 12));
2567
2568 CLIB_MEMORY_BARRIER ();
2569
2570 if (rt == VLIB_TX)
2571 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002572 xd->regs->tx_dma_control |= (1 << 0);
2573 dr->control |= ((32 << 0) /* prefetch threshold */
2574 | (64 << 8) /* host threshold */
2575 | (0 << 16) /* writeback threshold */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002576 }
2577
2578 /* Enable this queue and wait for hardware to initialize
2579 before adding to tail. */
2580 if (rt == VLIB_TX)
2581 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002582 dr->control |= 1 << 25;
2583 while (!(dr->control & (1 << 25)))
2584 ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002585 }
2586
2587 /* Set head/tail indices and enable DMA. */
2588 dr->head_index = dq->head_index;
2589 dr->tail_index = dq->tail_index;
2590 }
2591
2592 return error;
2593}
2594
Damjan Marion00a9dca2016-08-17 17:05:46 +02002595static u32
2596ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
Damjan Marionb4d89272016-05-12 22:14:45 +02002597{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002598 ixge_device_t *xd;
2599 ixge_regs_t *r;
Damjan Marionb4d89272016-05-12 22:14:45 +02002600 u32 old;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002601 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002602
2603 xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2604 r = xd->regs;
2605
2606 old = r->filter_control;
2607
John Lo4a302ee2020-05-12 22:34:39 -04002608 if (flags == ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002609 r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
John Lo4a302ee2020-05-12 22:34:39 -04002610 else if (flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
Damjan Marionb4d89272016-05-12 22:14:45 +02002611 r->filter_control = old & ~(1 << 9);
John Lo4a302ee2020-05-12 22:34:39 -04002612 else
2613 return ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002614
2615 return old;
2616}
2617
Damjan Marion00a9dca2016-08-17 17:05:46 +02002618static void
2619ixge_device_init (ixge_main_t * xm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002620{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002621 vnet_main_t *vnm = vnet_get_main ();
2622 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002623
2624 /* Reset chip(s). */
2625 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002626 {
2627 ixge_regs_t *r = xd->regs;
2628 const u32 reset_bit = (1 << 26) | (1 << 3);
2629
2630 r->control |= reset_bit;
2631
2632 /* No need to suspend. Timed to take ~1e-6 secs */
2633 while (r->control & reset_bit)
2634 ;
2635
2636 /* Software loaded. */
2637 r->extended_control |= (1 << 28);
2638
2639 ixge_phy_init (xd);
2640
2641 /* Register ethernet interface. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002642 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002643 u8 addr8[6];
2644 u32 i, addr32[2];
2645 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002646
Damjan Marion00a9dca2016-08-17 17:05:46 +02002647 addr32[0] = r->rx_ethernet_address0[0][0];
2648 addr32[1] = r->rx_ethernet_address0[0][1];
2649 for (i = 0; i < 6; i++)
2650 addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
Damjan Marionb4d89272016-05-12 22:14:45 +02002651
Damjan Marion00a9dca2016-08-17 17:05:46 +02002652 error = ethernet_register_interface
2653 (vnm, ixge_device_class.index, xd->device_index,
2654 /* ethernet address */ addr8,
2655 &xd->vlib_hw_if_index, ixge_flag_change);
2656 if (error)
2657 clib_error_report (error);
Damjan Marionb4d89272016-05-12 22:14:45 +02002658 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002659
2660 {
2661 vnet_sw_interface_t *sw =
2662 vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
2663 xd->vlib_sw_if_index = sw->sw_if_index;
2664 }
2665
2666 ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2667
2668 xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
2669
2670 ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2671
2672 /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2673 r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2674 ixge_rx_queue_to_interrupt (0)) << 0);
2675
2676 r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2677 ixge_tx_queue_to_interrupt (0)) << 8);
2678
2679 /* No use in getting too many interrupts.
2680 Limit them to one every 3/4 ring size at line rate
2681 min sized packets.
2682 No need for this since kernel/vlib main loop provides adequate interrupt
2683 limiting scheme. */
2684 if (0)
2685 {
2686 f64 line_rate_max_pps =
2687 10e9 / (8 * (64 + /* interframe padding */ 20));
2688 ixge_throttle_queue_interrupt (r, 0,
2689 .75 * xm->n_descriptors[VLIB_RX] /
2690 line_rate_max_pps);
2691 }
2692
2693 /* Accept all multicast and broadcast packets. Should really add them
2694 to the dst_ethernet_address register array. */
2695 r->filter_control |= (1 << 10) | (1 << 8);
2696
2697 /* Enable frames up to size in mac frame size register. */
2698 r->xge_mac.control |= 1 << 2;
2699 r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2700
2701 /* Enable all interrupts. */
2702 if (!IXGE_ALWAYS_POLL)
2703 r->interrupt.enable_write_1_to_set = ~0;
2704 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002705}
2706
2707static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002708ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002709{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002710 vnet_main_t *vnm = vnet_get_main ();
2711 ixge_main_t *xm = &ixge_main;
2712 ixge_device_t *xd;
2713 uword event_type, *event_data = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002714 f64 timeout, link_debounce_deadline;
2715
2716 ixge_device_init (xm);
2717
2718 /* Clear all counters. */
2719 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002720 {
2721 ixge_update_counters (xd);
Dave Barachb7b92992018-10-17 10:38:51 -04002722 clib_memset (xd->counters, 0, sizeof (xd->counters));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002723 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002724
2725 timeout = 30.0;
2726 link_debounce_deadline = 1e70;
2727
2728 while (1)
2729 {
2730 /* 36 bit stat counters could overflow in ~50 secs.
Damjan Marion00a9dca2016-08-17 17:05:46 +02002731 We poll every 30 secs to be conservative. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002732 vlib_process_wait_for_event_or_clock (vm, timeout);
2733
2734 event_type = vlib_process_get_events (vm, &event_data);
2735
Damjan Marion00a9dca2016-08-17 17:05:46 +02002736 switch (event_type)
2737 {
2738 case EVENT_SET_FLAGS:
2739 /* 1 ms */
2740 link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2741 timeout = 1e-3;
2742 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002743
Damjan Marion00a9dca2016-08-17 17:05:46 +02002744 case ~0:
2745 /* No events found: timer expired. */
2746 if (vlib_time_now (vm) > link_debounce_deadline)
2747 {
2748 vec_foreach (xd, xm->devices)
2749 {
2750 ixge_regs_t *r = xd->regs;
2751 u32 v = r->xge_mac.link_status;
2752 uword is_up = (v & (1 << 30)) != 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002753
Damjan Marion00a9dca2016-08-17 17:05:46 +02002754 vnet_hw_interface_set_flags
2755 (vnm, xd->vlib_hw_if_index,
2756 is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2757 }
2758 link_debounce_deadline = 1e70;
2759 timeout = 30.0;
2760 }
2761 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002762
Damjan Marion00a9dca2016-08-17 17:05:46 +02002763 default:
2764 ASSERT (0);
2765 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002766
2767 if (event_data)
2768 _vec_len (event_data) = 0;
2769
2770 /* Query stats every 30 secs. */
2771 {
2772 f64 now = vlib_time_now (vm);
2773 if (now - xm->time_last_stats_update > 30)
2774 {
2775 xm->time_last_stats_update = now;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002776 vec_foreach (xd, xm->devices) ixge_update_counters (xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002777 }
2778 }
2779 }
2780
2781 return 0;
2782}
2783
2784static vlib_node_registration_t ixge_process_node = {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002785 .function = ixge_process,
2786 .type = VLIB_NODE_TYPE_PROCESS,
2787 .name = "ixge-process",
Damjan Marionb4d89272016-05-12 22:14:45 +02002788};
2789
Damjan Marion00a9dca2016-08-17 17:05:46 +02002790clib_error_t *
2791ixge_init (vlib_main_t * vm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002792{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002793 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002794
2795 xm->vlib_main = vm;
Dave Barachb7b92992018-10-17 10:38:51 -04002796 clib_memset (&xm->tx_descriptor_template, 0,
2797 sizeof (xm->tx_descriptor_template));
2798 clib_memset (&xm->tx_descriptor_template_mask, 0,
2799 sizeof (xm->tx_descriptor_template_mask));
Damjan Marionb4d89272016-05-12 22:14:45 +02002800 xm->tx_descriptor_template.status0 =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002801 (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
2802 IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
2803 IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
Damjan Marionb4d89272016-05-12 22:14:45 +02002804 xm->tx_descriptor_template_mask.status0 = 0xffff;
2805 xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2806
2807 xm->tx_descriptor_template_mask.status0 &=
2808 ~(IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
2809 | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS);
2810 xm->tx_descriptor_template_mask.status1 &=
2811 ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE);
Dave Barachf8d50682019-05-14 18:01:44 -04002812 return 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002813}
2814
Dave Barachf8d50682019-05-14 18:01:44 -04002815/* *INDENT-OFF* */
2816VLIB_INIT_FUNCTION (ixge_init) =
2817{
2818 .runs_before = VLIB_INITS("pci_bus_init"),
2819};
2820/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002821
2822
2823static void
Damjan Marion23227982018-10-22 13:38:57 +02002824ixge_pci_intr_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h)
Damjan Marionb4d89272016-05-12 22:14:45 +02002825{
Damjan Marion23227982018-10-22 13:38:57 +02002826 uword private_data = vlib_pci_get_private_data (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002827
2828 vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
2829
2830 /* Let node know which device is interrupting. */
2831 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002832 vlib_node_runtime_t *rt =
2833 vlib_node_get_runtime (vm, ixge_input_node.index);
Damjan Marioncef87f12017-10-05 15:32:41 +02002834 rt->runtime_data[0] |= 1 << private_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02002835 }
2836}
2837
2838static clib_error_t *
Damjan Marioncef87f12017-10-05 15:32:41 +02002839ixge_pci_init (vlib_main_t * vm, vlib_pci_dev_handle_t h)
Damjan Marionb4d89272016-05-12 22:14:45 +02002840{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002841 ixge_main_t *xm = &ixge_main;
Damjan Marioncef87f12017-10-05 15:32:41 +02002842 clib_error_t *error = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002843 void *r;
2844 ixge_device_t *xd;
Damjan Marion23227982018-10-22 13:38:57 +02002845 vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2846 vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002847
Damjan Marion23227982018-10-22 13:38:57 +02002848 error = vlib_pci_map_region (vm, h, 0, &r);
Damjan Marionb4d89272016-05-12 22:14:45 +02002849 if (error)
2850 return error;
2851
2852 vec_add2 (xm->devices, xd, 1);
2853
2854 if (vec_len (xm->devices) == 1)
2855 {
Damjan Marion652d2e12019-02-02 00:15:27 +01002856 ixge_input_node.function = ixge_input;
Damjan Marionb4d89272016-05-12 22:14:45 +02002857 }
2858
Damjan Marioncef87f12017-10-05 15:32:41 +02002859 xd->pci_dev_handle = h;
2860 xd->device_id = d->device_id;
Damjan Marionb4d89272016-05-12 22:14:45 +02002861 xd->regs = r;
2862 xd->device_index = xd - xm->devices;
Damjan Marioncef87f12017-10-05 15:32:41 +02002863 xd->pci_function = addr->function;
Damjan Marionb4d89272016-05-12 22:14:45 +02002864 xd->per_interface_next_index = ~0;
2865
Damjan Marion23227982018-10-22 13:38:57 +02002866 vlib_pci_set_private_data (vm, h, xd->device_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002867
2868 /* Chip found so enable node. */
2869 {
2870 vlib_node_set_state (vm, ixge_input_node.index,
2871 (IXGE_ALWAYS_POLL
2872 ? VLIB_NODE_STATE_POLLING
2873 : VLIB_NODE_STATE_INTERRUPT));
2874
Damjan Marioncef87f12017-10-05 15:32:41 +02002875 //dev->private_data = xd->device_index;
Damjan Marionb4d89272016-05-12 22:14:45 +02002876 }
2877
2878 if (vec_len (xm->devices) == 1)
2879 {
2880 vlib_register_node (vm, &ixge_process_node);
2881 xm->process_node_index = ixge_process_node.index;
2882 }
2883
Damjan Marion23227982018-10-22 13:38:57 +02002884 error = vlib_pci_bus_master_enable (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002885
2886 if (error)
2887 return error;
2888
Damjan Marion23227982018-10-22 13:38:57 +02002889 return vlib_pci_intr_enable (vm, h);
Damjan Marionb4d89272016-05-12 22:14:45 +02002890}
2891
Damjan Marion00a9dca2016-08-17 17:05:46 +02002892/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002893PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2894 .init_function = ixge_pci_init,
2895 .interrupt_handler = ixge_pci_intr_handler,
2896 .supported_devices = {
2897#define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2898 foreach_ixge_pci_device_id
2899#undef _
2900 { 0 },
2901 },
2902};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002903/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002904
Damjan Marion00a9dca2016-08-17 17:05:46 +02002905void
2906ixge_set_next_node (ixge_rx_next_t next, char *name)
Damjan Marionb4d89272016-05-12 22:14:45 +02002907{
2908 vlib_node_registration_t *r = &ixge_input_node;
2909
2910 switch (next)
2911 {
2912 case IXGE_RX_NEXT_IP4_INPUT:
2913 case IXGE_RX_NEXT_IP6_INPUT:
2914 case IXGE_RX_NEXT_ETHERNET_INPUT:
2915 r->next_nodes[next] = name;
2916 break;
2917
2918 default:
2919 clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2920 break;
2921 }
2922}
Damjan Marion00a9dca2016-08-17 17:05:46 +02002923
Damjan Marion374e2c52017-03-09 20:38:15 +01002924/* *INDENT-OFF* */
2925VLIB_PLUGIN_REGISTER () = {
2926 .version = VPP_BUILD_VER,
2927 .default_disabled = 1,
Damjan Marion1bfb0dd2017-03-22 11:08:39 +01002928 .description = "Intel 82599 Family Native Driver (experimental)",
Damjan Marion374e2c52017-03-09 20:38:15 +01002929};
Damjan Marion7bee80c2017-04-26 15:32:12 +02002930#endif
Damjan Marion374e2c52017-03-09 20:38:15 +01002931
2932/* *INDENT-ON* */
Damjan Marion7bee80c2017-04-26 15:32:12 +02002933
Damjan Marion00a9dca2016-08-17 17:05:46 +02002934/*
2935 * fd.io coding-style-patch-verification: ON
2936 *
2937 * Local Variables:
2938 * eval: (c-set-style "gnu")
2939 * End:
2940 */