blob: 5fd6a901e35c1d9660955b60bfe2c222757e5022 [file] [log] [blame]
Damjan Marionb4d89272016-05-12 22:14:45 +02001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * WARNING!
18 * This driver is not intended for production use and it is unsupported.
19 * It is provided for educational use only.
20 * Please use supported DPDK driver instead.
21 */
22
Damjan Marion7bee80c2017-04-26 15:32:12 +020023#if __x86_64__ || __i386__
Damjan Marionb4d89272016-05-12 22:14:45 +020024#include <vppinfra/vector.h>
25
26#ifndef CLIB_HAVE_VEC128
27#warning HACK: ixge driver wont really work, missing u32x4
28typedef unsigned long long u32x4;
29#endif
30
31#include <vlib/vlib.h>
32#include <vlib/unix/unix.h>
33#include <vlib/pci/pci.h>
34#include <vnet/vnet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010035#include <ixge/ixge.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020036#include <vnet/ethernet/ethernet.h>
Damjan Marion374e2c52017-03-09 20:38:15 +010037#include <vnet/plugin/plugin.h>
38#include <vpp/app/version.h>
Damjan Marionb4d89272016-05-12 22:14:45 +020039
40#define IXGE_ALWAYS_POLL 0
41
42#define EVENT_SET_FLAGS 0
43#define IXGE_HWBP_RACE_ELOG 0
44
45#define PCI_VENDOR_ID_INTEL 0x8086
46
47/* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48#define XGE_PHY_DEV_TYPE_PMA_PMD 1
49#define XGE_PHY_DEV_TYPE_PHY_XS 4
50#define XGE_PHY_ID1 0x2
51#define XGE_PHY_ID2 0x3
52#define XGE_PHY_CONTROL 0x0
53#define XGE_PHY_CONTROL_RESET (1 << 15)
54
55ixge_main_t ixge_main;
56static vlib_node_registration_t ixge_input_node;
57static vlib_node_registration_t ixge_process_node;
58
Damjan Marion00a9dca2016-08-17 17:05:46 +020059static void
60ixge_semaphore_get (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020061{
Damjan Marion00a9dca2016-08-17 17:05:46 +020062 ixge_main_t *xm = &ixge_main;
63 vlib_main_t *vm = xm->vlib_main;
64 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020065 u32 i;
66
67 i = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +020068 while (!(r->software_semaphore & (1 << 0)))
Damjan Marionb4d89272016-05-12 22:14:45 +020069 {
70 if (i > 0)
71 vlib_process_suspend (vm, 100e-6);
72 i++;
73 }
Damjan Marion00a9dca2016-08-17 17:05:46 +020074 do
75 {
76 r->software_semaphore |= 1 << 1;
77 }
78 while (!(r->software_semaphore & (1 << 1)));
Damjan Marionb4d89272016-05-12 22:14:45 +020079}
80
Damjan Marion00a9dca2016-08-17 17:05:46 +020081static void
82ixge_semaphore_release (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +020083{
Damjan Marion00a9dca2016-08-17 17:05:46 +020084 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020085 r->software_semaphore &= ~3;
86}
87
Damjan Marion00a9dca2016-08-17 17:05:46 +020088static void
89ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +020090{
Damjan Marion00a9dca2016-08-17 17:05:46 +020091 ixge_main_t *xm = &ixge_main;
92 vlib_main_t *vm = xm->vlib_main;
93 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +020094 u32 fw_mask = sw_mask << 5;
95 u32 m, done = 0;
96
Damjan Marion00a9dca2016-08-17 17:05:46 +020097 while (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +020098 {
99 ixge_semaphore_get (xd);
100 m = r->software_firmware_sync;
101 done = (m & fw_mask) == 0;
102 if (done)
103 r->software_firmware_sync = m | sw_mask;
104 ixge_semaphore_release (xd);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200105 if (!done)
Damjan Marionb4d89272016-05-12 22:14:45 +0200106 vlib_process_suspend (vm, 10e-3);
107 }
108}
109
Damjan Marion00a9dca2016-08-17 17:05:46 +0200110static void
111ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
Damjan Marionb4d89272016-05-12 22:14:45 +0200112{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200113 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200114 ixge_semaphore_get (xd);
115 r->software_firmware_sync &= ~sw_mask;
116 ixge_semaphore_release (xd);
117}
118
Damjan Marion00a9dca2016-08-17 17:05:46 +0200119u32
120ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121 u32 v, u32 is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200122{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200123 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200124 const u32 busy_bit = 1 << 30;
125 u32 x;
126
127 ASSERT (xd->phy_index < 2);
128 ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129
130 ASSERT (reg_index < (1 << 16));
131 ASSERT (dev_type < (1 << 5));
Damjan Marion00a9dca2016-08-17 17:05:46 +0200132 if (!is_read)
Damjan Marionb4d89272016-05-12 22:14:45 +0200133 r->xge_mac.phy_data = v;
134
135 /* Address cycle. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200136 x =
137 reg_index | (dev_type << 16) | (xd->
138 phys[xd->phy_index].mdio_address << 21);
Damjan Marionb4d89272016-05-12 22:14:45 +0200139 r->xge_mac.phy_command = x | busy_bit;
140 /* Busy wait timed to take 28e-6 secs. No suspend. */
141 while (r->xge_mac.phy_command & busy_bit)
142 ;
143
144 r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145 while (r->xge_mac.phy_command & busy_bit)
146 ;
147
148 if (is_read)
149 v = r->xge_mac.phy_data >> 16;
150
151 ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152
153 return v;
154}
155
Damjan Marion00a9dca2016-08-17 17:05:46 +0200156static u32
157ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
Damjan Marionb4d89272016-05-12 22:14:45 +0200158{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200159 return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160 1);
161}
162
163static void
164ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165{
166 (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167 0);
168}
169
170static void
171ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172{
173 ixge_main_t *xm = &ixge_main;
174 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200175 u32 v;
176
177 v = 0;
178 v |= (sda != 0) << 3;
179 v |= (scl != 0) << 1;
180 xd->regs->i2c_control = v;
181}
182
Damjan Marion00a9dca2016-08-17 17:05:46 +0200183static void
184ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
Damjan Marionb4d89272016-05-12 22:14:45 +0200185{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200186 ixge_main_t *xm = &ixge_main;
187 ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
Damjan Marionb4d89272016-05-12 22:14:45 +0200188 u32 v;
189
190 v = xd->regs->i2c_control;
191 *sda = (v & (1 << 2)) != 0;
192 *scl = (v & (1 << 0)) != 0;
193}
194
Damjan Marion00a9dca2016-08-17 17:05:46 +0200195static u16
196ixge_read_eeprom (ixge_device_t * xd, u32 address)
Damjan Marionb4d89272016-05-12 22:14:45 +0200197{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200198 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200199 u32 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200200 r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200201 /* Wait for done bit. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200202 while (!((v = r->eeprom_read) & (1 << 1)))
Damjan Marionb4d89272016-05-12 22:14:45 +0200203 ;
204 return v >> 16;
205}
206
207static void
208ixge_sfp_enable_disable_laser (ixge_device_t * xd, uword enable)
209{
210 u32 tx_disable_bit = 1 << 3;
211 if (enable)
212 xd->regs->sdp_control &= ~tx_disable_bit;
213 else
214 xd->regs->sdp_control |= tx_disable_bit;
215}
216
217static void
218ixge_sfp_enable_disable_10g (ixge_device_t * xd, uword enable)
219{
220 u32 is_10g_bit = 1 << 5;
221 if (enable)
222 xd->regs->sdp_control |= is_10g_bit;
223 else
224 xd->regs->sdp_control &= ~is_10g_bit;
225}
226
227static clib_error_t *
228ixge_sfp_phy_init_from_eeprom (ixge_device_t * xd, u16 sfp_type)
229{
230 u16 a, id, reg_values_addr = 0;
231
232 a = ixge_read_eeprom (xd, 0x2b);
233 if (a == 0 || a == 0xffff)
234 return clib_error_create ("no init sequence in eeprom");
235
236 while (1)
237 {
238 id = ixge_read_eeprom (xd, ++a);
239 if (id == 0xffff)
240 break;
241 reg_values_addr = ixge_read_eeprom (xd, ++a);
242 if (id == sfp_type)
243 break;
244 }
245 if (id != sfp_type)
246 return clib_error_create ("failed to find id 0x%x", sfp_type);
247
248 ixge_software_firmware_sync (xd, 1 << 3);
249 while (1)
250 {
251 u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252 if (v == 0xffff)
253 break;
254 xd->regs->core_analog_config = v;
255 }
256 ixge_software_firmware_sync_release (xd, 1 << 3);
257
258 /* Make sure laser is off. We'll turn on the laser when
259 the interface is brought up. */
260 ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261 ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262
263 return 0;
264}
265
266static void
267ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
268{
269 u32 v;
270
271 if (is_up)
272 {
273 /* pma/pmd 10g serial SFI. */
274 xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275 xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276
277 v = xd->regs->xge_mac.auto_negotiation_control;
278 v &= ~(7 << 13);
279 v |= (0 << 13);
280 /* Restart autoneg. */
281 v |= (1 << 12);
282 xd->regs->xge_mac.auto_negotiation_control = v;
283
Damjan Marion00a9dca2016-08-17 17:05:46 +0200284 while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
Damjan Marionb4d89272016-05-12 22:14:45 +0200285 ;
286
287 v = xd->regs->xge_mac.auto_negotiation_control;
288
289 /* link mode 10g sfi serdes */
290 v &= ~(7 << 13);
291 v |= (3 << 13);
292
293 /* Restart autoneg. */
294 v |= (1 << 12);
295 xd->regs->xge_mac.auto_negotiation_control = v;
296
297 xd->regs->xge_mac.link_status;
298 }
299
300 ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301
302 /* Give time for link partner to notice that we're up. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200303 if (is_up && vlib_in_process_context (vlib_get_main ()))
304 {
305 vlib_process_suspend (vlib_get_main (), 300e-3);
306 }
Damjan Marionb4d89272016-05-12 22:14:45 +0200307}
308
309always_inline ixge_dma_regs_t *
310get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
311{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200312 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +0200313 ASSERT (qi < 128);
314 if (rt == VLIB_RX)
315 return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316 else
317 return &r->tx_dma[qi];
318}
319
320static clib_error_t *
321ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
322{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200323 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200324 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200325 ixge_main_t *xm = &ixge_main;
326 ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
327 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200328
329 if (is_up)
330 {
331 xd->regs->rx_enable |= 1;
332 xd->regs->tx_dma_control |= 1;
333 dr->control |= 1 << 25;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200334 while (!(dr->control & (1 << 25)))
335 ;
Damjan Marionb4d89272016-05-12 22:14:45 +0200336 }
337 else
338 {
339 xd->regs->rx_enable &= ~1;
340 xd->regs->tx_dma_control &= ~1;
341 }
342
343 ixge_sfp_device_up_down (xd, is_up);
344
345 return /* no error */ 0;
346}
347
Damjan Marion00a9dca2016-08-17 17:05:46 +0200348static void
349ixge_sfp_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200350{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200351 ixge_phy_t *phy = xd->phys + xd->phy_index;
352 i2c_bus_t *ib = &xd->i2c_bus;
Damjan Marionb4d89272016-05-12 22:14:45 +0200353
354 ib->private_data = xd->device_index;
355 ib->put_bits = ixge_i2c_put_bits;
356 ib->get_bits = ixge_i2c_get_bits;
357 vlib_i2c_init (ib);
358
Damjan Marion00a9dca2016-08-17 17:05:46 +0200359 vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
Damjan Marionb4d89272016-05-12 22:14:45 +0200360
Damjan Marion00a9dca2016-08-17 17:05:46 +0200361 if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
Damjan Marionb4d89272016-05-12 22:14:45 +0200362 xd->sfp_eeprom.id = SFP_ID_unknown;
363 else
364 {
365 /* FIXME 5 => SR/LR eeprom ID. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200366 clib_error_t *e =
367 ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
Damjan Marionb4d89272016-05-12 22:14:45 +0200368 if (e)
369 clib_error_report (e);
370 }
371
372 phy->mdio_address = ~0;
373}
374
Damjan Marion00a9dca2016-08-17 17:05:46 +0200375static void
376ixge_phy_init (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +0200377{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200378 ixge_main_t *xm = &ixge_main;
379 vlib_main_t *vm = xm->vlib_main;
380 ixge_phy_t *phy = xd->phys + xd->phy_index;
Damjan Marionb4d89272016-05-12 22:14:45 +0200381
382 switch (xd->device_id)
383 {
384 case IXGE_82599_sfp:
385 case IXGE_82599_sfp_em:
386 case IXGE_82599_sfp_fcoe:
387 /* others? */
388 return ixge_sfp_phy_init (xd);
389
390 default:
391 break;
392 }
393
394 /* Probe address of phy. */
395 {
396 u32 i, v;
397
398 phy->mdio_address = ~0;
399 for (i = 0; i < 32; i++)
400 {
401 phy->mdio_address = i;
402 v = ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1);
403 if (v != 0xffff && v != 0)
404 break;
405 }
406
407 /* No PHY found? */
408 if (i >= 32)
409 return;
410 }
411
Damjan Marion00a9dca2016-08-17 17:05:46 +0200412 phy->id =
413 ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
414 ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
Damjan Marionb4d89272016-05-12 22:14:45 +0200415
416 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200417 ELOG_TYPE_DECLARE (e) =
418 {
419 .function = (char *) __FUNCTION__,.format =
420 "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421 struct
422 {
423 u32 instance, id, address;
424 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +0200425 ed = ELOG_DATA (&vm->elog_main, e);
426 ed->instance = xd->device_index;
427 ed->id = phy->id;
428 ed->address = phy->mdio_address;
429 }
430
431 /* Reset phy. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200432 ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
433 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200434
435 /* Wait for self-clearning reset bit to clear. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200436 do
437 {
438 vlib_process_suspend (vm, 1e-3);
439 }
440 while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
441 XGE_PHY_CONTROL_RESET);
Damjan Marionb4d89272016-05-12 22:14:45 +0200442}
443
Damjan Marion00a9dca2016-08-17 17:05:46 +0200444static u8 *
445format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200446{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200447 ixge_rx_from_hw_descriptor_t *d =
448 va_arg (*va, ixge_rx_from_hw_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200449 u32 s0 = d->status[0], s2 = d->status[2];
450 u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200451 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200452
453 s = format (s, "%s-owned",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200454 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
455 "hw");
456 s =
457 format (s, ", length this descriptor %d, l3 offset %d",
458 d->n_packet_bytes_this_descriptor,
459 IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
Damjan Marionb4d89272016-05-12 22:14:45 +0200460 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
461 s = format (s, ", end-of-packet");
462
463 s = format (s, "\n%U", format_white_space, indent);
464
465 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR)
466 s = format (s, "layer2 error");
467
468 if (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2)
469 {
470 s = format (s, "layer 2 type %d", (s0 & 0x1f));
471 return s;
472 }
473
474 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN)
475 s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476 format_white_space, indent);
477
478 if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479 {
480 s = format (s, "ip4%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200481 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200483 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
484 s = format (s, " checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200485 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
486 "bad" : "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200487 }
488 if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489 s = format (s, "ip6%s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200490 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491 "");
Damjan Marionb4d89272016-05-12 22:14:45 +0200492 is_tcp = is_udp = 0;
493 if ((is_ip = (is_ip4 | is_ip6)))
494 {
495 is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496 is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497 if (is_tcp)
498 s = format (s, ", tcp");
499 if (is_udp)
500 s = format (s, ", udp");
501 }
502
503 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
504 s = format (s, ", tcp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200505 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
506 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200507 if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
508 s = format (s, ", udp checksum %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200509 (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
510 "ok");
Damjan Marionb4d89272016-05-12 22:14:45 +0200511
512 return s;
513}
514
Damjan Marion00a9dca2016-08-17 17:05:46 +0200515static u8 *
516format_ixge_tx_descriptor (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200517{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200518 ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +0200519 u32 s0 = d->status0, s1 = d->status1;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200520 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200521 u32 v;
522
523 s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
Damjan Marion00a9dca2016-08-17 17:05:46 +0200524 d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200525
526 s = format (s, "\n%U", format_white_space, indent);
527
528 if ((v = (s0 >> 0) & 3))
529 s = format (s, "reserved 0x%x, ", v);
530
531 if ((v = (s0 >> 2) & 3))
532 s = format (s, "mac 0x%x, ", v);
533
534 if ((v = (s0 >> 4) & 0xf) != 3)
535 s = format (s, "type 0x%x, ", v);
536
537 s = format (s, "%s%s%s%s%s%s%s%s",
538 (s0 & (1 << 8)) ? "eop, " : "",
539 (s0 & (1 << 9)) ? "insert-fcs, " : "",
540 (s0 & (1 << 10)) ? "reserved26, " : "",
541 (s0 & (1 << 11)) ? "report-status, " : "",
542 (s0 & (1 << 12)) ? "reserved28, " : "",
543 (s0 & (1 << 13)) ? "is-advanced, " : "",
544 (s0 & (1 << 14)) ? "vlan-enable, " : "",
545 (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546
547 if ((v = s1 & 0xf) != 0)
548 s = format (s, "status 0x%x, ", v);
549
550 if ((v = (s1 >> 4) & 0xf))
551 s = format (s, "context 0x%x, ", v);
552
553 if ((v = (s1 >> 8) & 0x3f))
554 s = format (s, "options 0x%x, ", v);
555
556 return s;
557}
558
Damjan Marion00a9dca2016-08-17 17:05:46 +0200559typedef struct
560{
Damjan Marionb4d89272016-05-12 22:14:45 +0200561 ixge_descriptor_t before, after;
562
563 u32 buffer_index;
564
565 u16 device_index;
566
567 u8 queue_index;
568
569 u8 is_start_of_packet;
570
571 /* Copy of VLIB buffer; packet data stored in pre_data. */
572 vlib_buffer_t buffer;
573} ixge_rx_dma_trace_t;
574
Damjan Marion00a9dca2016-08-17 17:05:46 +0200575static u8 *
576format_ixge_rx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200577{
578 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200579 vlib_node_t *node = va_arg (*va, vlib_node_t *);
580 vnet_main_t *vnm = vnet_get_main ();
581 ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582 ixge_main_t *xm = &ixge_main;
583 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
584 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200585 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200586
587 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200588 vnet_sw_interface_t *sw =
589 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
590 s =
591 format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200593 }
594
595 s = format (s, "\n%Ubefore: %U",
596 format_white_space, indent,
597 format_ixge_rx_from_hw_descriptor, &t->before);
598 s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200600 t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
Damjan Marionb4d89272016-05-12 22:14:45 +0200601
602 s = format (s, "\n%Ubuffer 0x%x: %U",
603 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200604 t->buffer_index, format_vlib_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200605
Damjan Marion00a9dca2016-08-17 17:05:46 +0200606 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200607
608 f = node->format_buffer;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200609 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200610 f = format_hex_bytes;
611 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612
613 return s;
614}
615
616#define foreach_ixge_error \
617 _ (none, "no error") \
618 _ (tx_full_drops, "tx ring full drops") \
619 _ (ip4_checksum_error, "ip4 checksum errors") \
620 _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621 _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622
Damjan Marion00a9dca2016-08-17 17:05:46 +0200623typedef enum
624{
Damjan Marionb4d89272016-05-12 22:14:45 +0200625#define _(f,s) IXGE_ERROR_##f,
626 foreach_ixge_error
627#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200628 IXGE_N_ERROR,
Damjan Marionb4d89272016-05-12 22:14:45 +0200629} ixge_error_t;
630
631always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200632ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
633 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200634 u8 * next0, u8 * error0, u32 * flags0)
635{
636 u8 is0_ip4, is0_ip6, n0, e0;
637 u32 f0;
638
639 e0 = IXGE_ERROR_none;
640 n0 = IXGE_RX_NEXT_ETHERNET_INPUT;
641
642 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
643 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644
645 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200646 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200647
648 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650
651 n0 = (xd->per_interface_next_index != ~0) ?
652 xd->per_interface_next_index : n0;
653
654 /* Check for error. */
655 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656
657 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
658 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200659 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200660
661 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
662 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200663 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200664
665 *error0 = e0;
666 *next0 = n0;
667 *flags0 = f0;
668}
669
670always_inline void
Damjan Marion00a9dca2016-08-17 17:05:46 +0200671ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
672 u32 s00, u32 s02,
Damjan Marionb4d89272016-05-12 22:14:45 +0200673 u32 s10, u32 s12,
674 u8 * next0, u8 * error0, u32 * flags0,
675 u8 * next1, u8 * error1, u32 * flags1)
676{
677 u8 is0_ip4, is0_ip6, n0, e0;
678 u8 is1_ip4, is1_ip6, n1, e1;
679 u32 f0, f1;
680
681 e0 = e1 = IXGE_ERROR_none;
682 n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683
684 is0_ip4 = s02 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
685 is1_ip4 = s12 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED;
686
687 n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688 n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689
690 e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200691 ? IXGE_ERROR_ip4_checksum_error : e0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200692 e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200693 ? IXGE_ERROR_ip4_checksum_error : e1);
Damjan Marionb4d89272016-05-12 22:14:45 +0200694
695 is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696 is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697
698 n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699 n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700
701 n0 = (xd->per_interface_next_index != ~0) ?
702 xd->per_interface_next_index : n0;
703 n1 = (xd->per_interface_next_index != ~0) ?
704 xd->per_interface_next_index : n1;
705
706 /* Check for error. */
707 n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708 n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709
710 *error0 = e0;
711 *error1 = e1;
712
713 *next0 = n0;
714 *next1 = n1;
715
716 f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
717 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200718 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200719 f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
720 | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200721 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
Damjan Marionb4d89272016-05-12 22:14:45 +0200722
723 f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
724 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200725 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200726 f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
727 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
Damjan Marion213b5aa2017-07-13 21:19:27 +0200728 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
Damjan Marionb4d89272016-05-12 22:14:45 +0200729
730 *flags0 = f0;
731 *flags1 = f1;
732}
733
734static void
735ixge_rx_trace (ixge_main_t * xm,
736 ixge_device_t * xd,
737 ixge_dma_queue_t * dq,
738 ixge_descriptor_t * before_descriptors,
739 u32 * before_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200740 ixge_descriptor_t * after_descriptors, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200741{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200742 vlib_main_t *vm = xm->vlib_main;
743 vlib_node_runtime_t *node = dq->rx.node;
744 ixge_rx_from_hw_descriptor_t *bd;
745 ixge_rx_to_hw_descriptor_t *ad;
746 u32 *b, n_left, is_sop, next_index_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200747
748 n_left = n_descriptors;
749 b = before_buffers;
750 bd = &before_descriptors->rx_from_hw;
751 ad = &after_descriptors->rx_to_hw;
752 is_sop = dq->rx.is_start_of_packet;
753 next_index_sop = dq->rx.saved_start_of_packet_next_index;
754
755 while (n_left >= 2)
756 {
757 u32 bi0, bi1, flags0, flags1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200758 vlib_buffer_t *b0, *b1;
759 ixge_rx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200760 u8 next0, error0, next1, error1;
761
762 bi0 = b[0];
763 bi1 = b[1];
764 n_left -= 2;
765
766 b0 = vlib_get_buffer (vm, bi0);
767 b1 = vlib_get_buffer (vm, bi1);
768
769 ixge_rx_next_and_error_from_status_x2 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200770 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200771 bd[1].status[0], bd[1].status[2],
772 &next0, &error0, &flags0,
773 &next1, &error1, &flags1);
774
775 next_index_sop = is_sop ? next0 : next_index_sop;
776 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778 t0->is_start_of_packet = is_sop;
779 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780
781 next_index_sop = is_sop ? next1 : next_index_sop;
782 vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784 t1->is_start_of_packet = is_sop;
785 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786
787 t0->queue_index = dq->queue_index;
788 t1->queue_index = dq->queue_index;
789 t0->device_index = xd->device_index;
790 t1->device_index = xd->device_index;
791 t0->before.rx_from_hw = bd[0];
792 t1->before.rx_from_hw = bd[1];
793 t0->after.rx_to_hw = ad[0];
794 t1->after.rx_to_hw = ad[1];
795 t0->buffer_index = bi0;
796 t1->buffer_index = bi1;
797 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800 sizeof (t0->buffer.pre_data));
801 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802 sizeof (t1->buffer.pre_data));
803
804 b += 2;
805 bd += 2;
806 ad += 2;
807 }
808
809 while (n_left >= 1)
810 {
811 u32 bi0, flags0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200812 vlib_buffer_t *b0;
813 ixge_rx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200814 u8 next0, error0;
815
816 bi0 = b[0];
817 n_left -= 1;
818
819 b0 = vlib_get_buffer (vm, bi0);
820
821 ixge_rx_next_and_error_from_status_x1 (xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200822 bd[0].status[0], bd[0].status[2],
Damjan Marionb4d89272016-05-12 22:14:45 +0200823 &next0, &error0, &flags0);
824
825 next_index_sop = is_sop ? next0 : next_index_sop;
826 vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828 t0->is_start_of_packet = is_sop;
829 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830
831 t0->queue_index = dq->queue_index;
832 t0->device_index = xd->device_index;
833 t0->before.rx_from_hw = bd[0];
834 t0->after.rx_to_hw = ad[0];
835 t0->buffer_index = bi0;
836 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838 sizeof (t0->buffer.pre_data));
839
840 b += 1;
841 bd += 1;
842 ad += 1;
843 }
844}
845
Damjan Marion00a9dca2016-08-17 17:05:46 +0200846typedef struct
847{
Damjan Marionb4d89272016-05-12 22:14:45 +0200848 ixge_tx_descriptor_t descriptor;
849
850 u32 buffer_index;
851
852 u16 device_index;
853
854 u8 queue_index;
855
856 u8 is_start_of_packet;
857
858 /* Copy of VLIB buffer; packet data stored in pre_data. */
859 vlib_buffer_t buffer;
860} ixge_tx_dma_trace_t;
861
Damjan Marion00a9dca2016-08-17 17:05:46 +0200862static u8 *
863format_ixge_tx_dma_trace (u8 * s, va_list * va)
Damjan Marionb4d89272016-05-12 22:14:45 +0200864{
865 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200867 ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868 vnet_main_t *vnm = vnet_get_main ();
869 ixge_main_t *xm = &ixge_main;
870 ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
871 format_function_t *f;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200872 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +0200873
874 {
Damjan Marion00a9dca2016-08-17 17:05:46 +0200875 vnet_sw_interface_t *sw =
876 vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
877 s =
878 format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879 t->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +0200880 }
881
882 s = format (s, "\n%Udescriptor: %U",
883 format_white_space, indent,
884 format_ixge_tx_descriptor, &t->descriptor);
885
886 s = format (s, "\n%Ubuffer 0x%x: %U",
887 format_white_space, indent,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200888 t->buffer_index, format_vlib_buffer, &t->buffer);
Damjan Marionb4d89272016-05-12 22:14:45 +0200889
Damjan Marion00a9dca2016-08-17 17:05:46 +0200890 s = format (s, "\n%U", format_white_space, indent);
Damjan Marionb4d89272016-05-12 22:14:45 +0200891
892 f = format_ethernet_header_with_length;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200893 if (!f || !t->is_start_of_packet)
Damjan Marionb4d89272016-05-12 22:14:45 +0200894 f = format_hex_bytes;
895 s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896
897 return s;
898}
899
Damjan Marion00a9dca2016-08-17 17:05:46 +0200900typedef struct
901{
902 vlib_node_runtime_t *node;
Damjan Marionb4d89272016-05-12 22:14:45 +0200903
904 u32 is_start_of_packet;
905
906 u32 n_bytes_in_packet;
907
Damjan Marion00a9dca2016-08-17 17:05:46 +0200908 ixge_tx_descriptor_t *start_of_packet_descriptor;
Damjan Marionb4d89272016-05-12 22:14:45 +0200909} ixge_tx_state_t;
910
911static void
912ixge_tx_trace (ixge_main_t * xm,
913 ixge_device_t * xd,
914 ixge_dma_queue_t * dq,
915 ixge_tx_state_t * tx_state,
916 ixge_tx_descriptor_t * descriptors,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200917 u32 * buffers, uword n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +0200918{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200919 vlib_main_t *vm = xm->vlib_main;
920 vlib_node_runtime_t *node = tx_state->node;
921 ixge_tx_descriptor_t *d;
922 u32 *b, n_left, is_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +0200923
924 n_left = n_descriptors;
925 b = buffers;
926 d = descriptors;
927 is_sop = tx_state->is_start_of_packet;
928
929 while (n_left >= 2)
930 {
931 u32 bi0, bi1;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200932 vlib_buffer_t *b0, *b1;
933 ixge_tx_dma_trace_t *t0, *t1;
Damjan Marionb4d89272016-05-12 22:14:45 +0200934
935 bi0 = b[0];
936 bi1 = b[1];
937 n_left -= 2;
938
939 b0 = vlib_get_buffer (vm, bi0);
940 b1 = vlib_get_buffer (vm, bi1);
941
942 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943 t0->is_start_of_packet = is_sop;
944 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945
946 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947 t1->is_start_of_packet = is_sop;
948 is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949
950 t0->queue_index = dq->queue_index;
951 t1->queue_index = dq->queue_index;
952 t0->device_index = xd->device_index;
953 t1->device_index = xd->device_index;
954 t0->descriptor = d[0];
955 t1->descriptor = d[1];
956 t0->buffer_index = bi0;
957 t1->buffer_index = bi1;
958 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959 memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961 sizeof (t0->buffer.pre_data));
962 memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963 sizeof (t1->buffer.pre_data));
964
965 b += 2;
966 d += 2;
967 }
968
969 while (n_left >= 1)
970 {
971 u32 bi0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200972 vlib_buffer_t *b0;
973 ixge_tx_dma_trace_t *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +0200974
975 bi0 = b[0];
976 n_left -= 1;
977
978 b0 = vlib_get_buffer (vm, bi0);
979
980 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981 t0->is_start_of_packet = is_sop;
982 is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983
984 t0->queue_index = dq->queue_index;
985 t0->device_index = xd->device_index;
986 t0->descriptor = d[0];
987 t0->buffer_index = bi0;
988 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989 memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990 sizeof (t0->buffer.pre_data));
991
992 b += 1;
993 d += 1;
994 }
995}
996
997always_inline uword
998ixge_ring_sub (ixge_dma_queue_t * q, u32 i0, u32 i1)
999{
1000 i32 d = i1 - i0;
1001 ASSERT (i0 < q->n_descriptors);
1002 ASSERT (i1 < q->n_descriptors);
1003 return d < 0 ? q->n_descriptors + d : d;
1004}
1005
1006always_inline uword
1007ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
1008{
1009 u32 d = i0 + i1;
1010 ASSERT (i0 < q->n_descriptors);
1011 ASSERT (i1 < q->n_descriptors);
1012 d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013 return d;
1014}
1015
1016always_inline uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02001017ixge_tx_descriptor_matches_template (ixge_main_t * xm,
1018 ixge_tx_descriptor_t * d)
Damjan Marionb4d89272016-05-12 22:14:45 +02001019{
1020 u32 cmp;
1021
1022 cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1023 ^ xm->tx_descriptor_template.status0);
1024 if (cmp)
1025 return 0;
1026 cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1027 ^ xm->tx_descriptor_template.status1);
1028 if (cmp)
1029 return 0;
1030
1031 return 1;
1032}
1033
1034static uword
1035ixge_tx_no_wrap (ixge_main_t * xm,
1036 ixge_device_t * xd,
1037 ixge_dma_queue_t * dq,
1038 u32 * buffers,
1039 u32 start_descriptor_index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001040 u32 n_descriptors, ixge_tx_state_t * tx_state)
Damjan Marionb4d89272016-05-12 22:14:45 +02001041{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001042 vlib_main_t *vm = xm->vlib_main;
1043 ixge_tx_descriptor_t *d, *d_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001044 u32 n_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001045 u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046 u32 *to_tx =
1047 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001048 u32 is_sop = tx_state->is_start_of_packet;
1049 u32 len_sop = tx_state->n_bytes_in_packet;
1050 u16 template_status = xm->tx_descriptor_template.status0;
1051 u32 descriptor_prefetch_rotor = 0;
1052
1053 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054 d = &dq->descriptors[start_descriptor_index].tx;
1055 d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056
1057 while (n_left >= 4)
1058 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001059 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001060 u32 bi0, fi0, len0;
1061 u32 bi1, fi1, len1;
1062 u8 is_eop0, is_eop1;
1063
1064 /* Prefetch next iteration. */
1065 vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066 vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067
1068 if ((descriptor_prefetch_rotor & 0x3) == 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001069 CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001070
1071 descriptor_prefetch_rotor += 2;
1072
1073 bi0 = buffers[0];
1074 bi1 = buffers[1];
1075
1076 to_free[0] = fi0 = to_tx[0];
1077 to_tx[0] = bi0;
1078 to_free += fi0 != 0;
1079
1080 to_free[0] = fi1 = to_tx[1];
1081 to_tx[1] = bi1;
1082 to_free += fi1 != 0;
1083
1084 buffers += 2;
1085 n_left -= 2;
1086 to_tx += 2;
1087
1088 b0 = vlib_get_buffer (vm, bi0);
1089 b1 = vlib_get_buffer (vm, bi1);
1090
1091 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092 is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093
1094 len0 = b0->current_length;
1095 len1 = b1->current_length;
1096
1097 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1098 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
1099
Damjan Marion00a9dca2016-08-17 17:05:46 +02001100 d[0].buffer_address =
1101 vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
1102 d[1].buffer_address =
1103 vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02001104
1105 d[0].n_bytes_this_buffer = len0;
1106 d[1].n_bytes_this_buffer = len1;
1107
Damjan Marion00a9dca2016-08-17 17:05:46 +02001108 d[0].status0 =
1109 template_status | (is_eop0 <<
1110 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
1111 d[1].status0 =
1112 template_status | (is_eop1 <<
1113 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001114
1115 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001116 d_sop[0].status1 =
1117 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001118 d += 1;
1119 d_sop = is_eop0 ? d : d_sop;
1120
1121 is_sop = is_eop0;
1122
1123 len_sop = (is_sop ? 0 : len_sop) + len1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001124 d_sop[0].status1 =
1125 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001126 d += 1;
1127 d_sop = is_eop1 ? d : d_sop;
1128
1129 is_sop = is_eop1;
1130 }
1131
1132 while (n_left > 0)
1133 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001134 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001135 u32 bi0, fi0, len0;
1136 u8 is_eop0;
1137
1138 bi0 = buffers[0];
1139
1140 to_free[0] = fi0 = to_tx[0];
1141 to_tx[0] = bi0;
1142 to_free += fi0 != 0;
1143
1144 buffers += 1;
1145 n_left -= 1;
1146 to_tx += 1;
1147
1148 b0 = vlib_get_buffer (vm, bi0);
1149
1150 is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1151
1152 len0 = b0->current_length;
1153
1154 ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
1155
Damjan Marion00a9dca2016-08-17 17:05:46 +02001156 d[0].buffer_address =
1157 vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
Damjan Marionb4d89272016-05-12 22:14:45 +02001158
1159 d[0].n_bytes_this_buffer = len0;
1160
Damjan Marion00a9dca2016-08-17 17:05:46 +02001161 d[0].status0 =
1162 template_status | (is_eop0 <<
1163 IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
Damjan Marionb4d89272016-05-12 22:14:45 +02001164
1165 len_sop = (is_sop ? 0 : len_sop) + len0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001166 d_sop[0].status1 =
1167 IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001168 d += 1;
1169 d_sop = is_eop0 ? d : d_sop;
1170
1171 is_sop = is_eop0;
1172 }
1173
1174 if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1175 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001176 to_tx =
1177 vec_elt_at_index (dq->descriptor_buffer_indices,
1178 start_descriptor_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001179 ixge_tx_trace (xm, xd, dq, tx_state,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001180 &dq->descriptors[start_descriptor_index].tx, to_tx,
Damjan Marionb4d89272016-05-12 22:14:45 +02001181 n_descriptors);
1182 }
1183
Damjan Marion00a9dca2016-08-17 17:05:46 +02001184 _vec_len (xm->tx_buffers_pending_free) =
1185 to_free - xm->tx_buffers_pending_free;
Damjan Marionb4d89272016-05-12 22:14:45 +02001186
1187 /* When we are done d_sop can point to end of ring. Wrap it if so. */
1188 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001189 ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001190
1191 ASSERT (d_sop - d_start <= dq->n_descriptors);
1192 d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1193 }
1194
1195 tx_state->is_start_of_packet = is_sop;
1196 tx_state->start_of_packet_descriptor = d_sop;
1197 tx_state->n_bytes_in_packet = len_sop;
1198
1199 return n_descriptors;
1200}
1201
1202static uword
1203ixge_interface_tx (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001204 vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02001205{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001206 ixge_main_t *xm = &ixge_main;
1207 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1208 ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
1209 ixge_dma_queue_t *dq;
1210 u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001211 u32 queue_index = 0; /* fixme parameter */
1212 ixge_tx_state_t tx_state;
1213
1214 tx_state.node = node;
1215 tx_state.is_start_of_packet = 1;
1216 tx_state.start_of_packet_descriptor = 0;
1217 tx_state.n_bytes_in_packet = 0;
1218
1219 from = vlib_frame_vector_args (f);
1220
1221 dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1222
1223 dq->head_index = dq->tx.head_index_write_back[0];
1224
1225 /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1226 n_left_tx = dq->n_descriptors - 1;
1227 n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1228
1229 _vec_len (xm->tx_buffers_pending_free) = 0;
1230
1231 n_descriptors_to_tx = f->n_vectors;
1232 n_tail_drop = 0;
1233 if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1234 {
1235 i32 i, n_ok, i_eop, i_sop;
1236
1237 i_sop = i_eop = ~0;
1238 for (i = n_left_tx - 1; i >= 0; i--)
1239 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001240 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1241 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marionb4d89272016-05-12 22:14:45 +02001242 {
1243 if (i_sop != ~0 && i_eop != ~0)
1244 break;
1245 i_eop = i;
1246 i_sop = i + 1;
1247 }
1248 }
1249 if (i == 0)
1250 n_ok = 0;
1251 else
1252 n_ok = i_eop + 1;
1253
1254 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001255 ELOG_TYPE_DECLARE (e) =
1256 {
1257 .function = (char *) __FUNCTION__,.format =
1258 "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1259 "i2i2i2i2",};
1260 struct
1261 {
1262 u16 instance, to_tx, head, tail;
1263 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001264 ed = ELOG_DATA (&vm->elog_main, e);
1265 ed->instance = xd->device_index;
1266 ed->to_tx = n_descriptors_to_tx;
1267 ed->head = dq->head_index;
1268 ed->tail = dq->tail_index;
1269 }
1270
1271 if (n_ok < n_descriptors_to_tx)
1272 {
1273 n_tail_drop = n_descriptors_to_tx - n_ok;
1274 vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001275 vlib_error_count (vm, ixge_input_node.index,
1276 IXGE_ERROR_tx_full_drops, n_tail_drop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001277 }
1278
1279 n_descriptors_to_tx = n_ok;
1280 }
1281
1282 dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1283
1284 /* Process from tail to end of descriptor ring. */
1285 if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1286 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001287 u32 n =
1288 clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
Damjan Marionb4d89272016-05-12 22:14:45 +02001289 n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1290 from += n;
1291 n_descriptors_to_tx -= n;
1292 dq->tail_index += n;
1293 ASSERT (dq->tail_index <= dq->n_descriptors);
1294 if (dq->tail_index == dq->n_descriptors)
1295 dq->tail_index = 0;
1296 }
1297
1298 if (n_descriptors_to_tx > 0)
1299 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001300 u32 n =
1301 ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
Damjan Marionb4d89272016-05-12 22:14:45 +02001302 from += n;
1303 ASSERT (n == n_descriptors_to_tx);
1304 dq->tail_index += n;
1305 ASSERT (dq->tail_index <= dq->n_descriptors);
1306 if (dq->tail_index == dq->n_descriptors)
1307 dq->tail_index = 0;
1308 }
1309
1310 /* We should only get full packets. */
1311 ASSERT (tx_state.is_start_of_packet);
1312
1313 /* Report status when last descriptor is done. */
1314 {
1315 u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001316 ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
Damjan Marionb4d89272016-05-12 22:14:45 +02001317 d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
1318 }
1319
1320 /* Give new descriptors to hardware. */
1321 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001322 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001323
1324 CLIB_MEMORY_BARRIER ();
1325
1326 dr->tail_index = dq->tail_index;
1327 }
1328
1329 /* Free any buffers that are done. */
1330 {
1331 u32 n = _vec_len (xm->tx_buffers_pending_free);
1332 if (n > 0)
1333 {
1334 vlib_buffer_free_no_next (vm, xm->tx_buffers_pending_free, n);
1335 _vec_len (xm->tx_buffers_pending_free) = 0;
1336 ASSERT (dq->tx.n_buffers_on_ring >= n);
1337 dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1338 }
1339 }
1340
1341 return f->n_vectors;
1342}
1343
1344static uword
1345ixge_rx_queue_no_wrap (ixge_main_t * xm,
1346 ixge_device_t * xd,
1347 ixge_dma_queue_t * dq,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001348 u32 start_descriptor_index, u32 n_descriptors)
Damjan Marionb4d89272016-05-12 22:14:45 +02001349{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001350 vlib_main_t *vm = xm->vlib_main;
1351 vlib_node_runtime_t *node = dq->rx.node;
1352 ixge_descriptor_t *d;
1353 static ixge_descriptor_t *d_trace_save;
1354 static u32 *d_trace_buffers;
Damjan Marionb4d89272016-05-12 22:14:45 +02001355 u32 n_descriptors_left = n_descriptors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001356 u32 *to_rx =
1357 vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1358 u32 *to_add;
Damjan Marionb4d89272016-05-12 22:14:45 +02001359 u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1360 u32 bi_last = dq->rx.saved_last_buffer_index;
1361 u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1362 u32 is_sop = dq->rx.is_start_of_packet;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001363 u32 next_index, n_left_to_next, *to_next;
Damjan Marionb4d89272016-05-12 22:14:45 +02001364 u32 n_packets = 0;
1365 u32 n_bytes = 0;
1366 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001367 vlib_buffer_t *b_last, b_dummy;
Damjan Marionb4d89272016-05-12 22:14:45 +02001368
1369 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1370 d = &dq->descriptors[start_descriptor_index];
1371
1372 b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
1373 next_index = dq->rx.next_index;
1374
1375 if (n_trace > 0)
1376 {
1377 u32 n = clib_min (n_trace, n_descriptors);
1378 if (d_trace_save)
1379 {
1380 _vec_len (d_trace_save) = 0;
1381 _vec_len (d_trace_buffers) = 0;
1382 }
1383 vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1384 vec_add (d_trace_buffers, to_rx, n);
1385 }
1386
1387 {
1388 uword l = vec_len (xm->rx_buffers_to_add);
1389
1390 if (l < n_descriptors_left)
1391 {
1392 u32 n_to_alloc = 2 * dq->n_descriptors - l;
1393 u32 n_allocated;
1394
1395 vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1396
1397 _vec_len (xm->rx_buffers_to_add) = l;
1398 n_allocated = vlib_buffer_alloc_from_free_list
1399 (vm, xm->rx_buffers_to_add + l, n_to_alloc,
1400 xm->vlib_buffer_free_list_index);
1401 _vec_len (xm->rx_buffers_to_add) += n_allocated;
1402
Damjan Marion00a9dca2016-08-17 17:05:46 +02001403 /* Handle transient allocation failure */
1404 if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
Damjan Marionb4d89272016-05-12 22:14:45 +02001405 {
1406 if (n_allocated == 0)
1407 vlib_error_count (vm, ixge_input_node.index,
1408 IXGE_ERROR_rx_alloc_no_physmem, 1);
1409 else
1410 vlib_error_count (vm, ixge_input_node.index,
1411 IXGE_ERROR_rx_alloc_fail, 1);
1412
1413 n_descriptors_left = l + n_allocated;
1414 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001415 n_descriptors = n_descriptors_left;
Damjan Marionb4d89272016-05-12 22:14:45 +02001416 }
1417
1418 /* Add buffers from end of vector going backwards. */
1419 to_add = vec_end (xm->rx_buffers_to_add) - 1;
1420 }
1421
1422 while (n_descriptors_left > 0)
1423 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001424 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Damjan Marionb4d89272016-05-12 22:14:45 +02001425
1426 while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1427 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001428 vlib_buffer_t *b0, *b1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001429 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1430 u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1431 u8 is_eop0, error0, next0;
1432 u8 is_eop1, error1, next1;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001433 ixge_descriptor_t d0, d1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001434
1435 vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1436 vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1437
Damjan Marion00a9dca2016-08-17 17:05:46 +02001438 CLIB_PREFETCH (d + 2, 32, STORE);
Damjan Marionb4d89272016-05-12 22:14:45 +02001439
Damjan Marion00a9dca2016-08-17 17:05:46 +02001440 d0.as_u32x4 = d[0].as_u32x4;
1441 d1.as_u32x4 = d[1].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001442
1443 s20 = d0.rx_from_hw.status[2];
1444 s21 = d1.rx_from_hw.status[2];
1445
1446 s00 = d0.rx_from_hw.status[0];
1447 s01 = d1.rx_from_hw.status[0];
1448
Damjan Marion00a9dca2016-08-17 17:05:46 +02001449 if (!
1450 ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001451 goto found_hw_owned_descriptor_x2;
1452
1453 bi0 = to_rx[0];
1454 bi1 = to_rx[1];
1455
1456 ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1457 fi0 = to_add[0];
1458 fi1 = to_add[-1];
1459
1460 to_rx[0] = fi0;
1461 to_rx[1] = fi1;
1462 to_rx += 2;
1463 to_add -= 2;
1464
Damjan Marion00a9dca2016-08-17 17:05:46 +02001465 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1466 vlib_buffer_is_known (vm, bi0));
1467 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1468 vlib_buffer_is_known (vm, bi1));
1469 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1470 vlib_buffer_is_known (vm, fi0));
1471 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1472 vlib_buffer_is_known (vm, fi1));
Damjan Marionb4d89272016-05-12 22:14:45 +02001473
1474 b0 = vlib_get_buffer (vm, bi0);
1475 b1 = vlib_get_buffer (vm, bi1);
1476
Damjan Marion00a9dca2016-08-17 17:05:46 +02001477 /*
1478 * Turn this on if you run into
1479 * "bad monkey" contexts, and you want to know exactly
1480 * which nodes they've visited... See main.c...
1481 */
1482 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1483 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001484
1485 CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
1486 CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
1487
1488 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1489 is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1490
1491 ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1492 &next0, &error0, &flags0,
1493 &next1, &error1, &flags1);
1494
1495 next0 = is_sop ? next0 : next_index_sop;
1496 next1 = is_eop0 ? next1 : next0;
1497 next_index_sop = next1;
1498
1499 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1500 b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1501
1502 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1503 vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001504 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1505 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001506
1507 b0->error = node->errors[error0];
1508 b1->error = node->errors[error1];
1509
1510 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1511 len1 = d1.rx_from_hw.n_packet_bytes_this_descriptor;
1512 n_bytes += len0 + len1;
1513 n_packets += is_eop0 + is_eop1;
1514
1515 /* Give new buffers to hardware. */
1516 d0.rx_to_hw.tail_address =
Damjan Marion00a9dca2016-08-17 17:05:46 +02001517 vlib_get_buffer_data_physical_address (vm, fi0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001518 d1.rx_to_hw.tail_address =
Damjan Marion00a9dca2016-08-17 17:05:46 +02001519 vlib_get_buffer_data_physical_address (vm, fi1);
Damjan Marionb4d89272016-05-12 22:14:45 +02001520 d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
1521 d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001522 d[0].as_u32x4 = d0.as_u32x4;
1523 d[1].as_u32x4 = d1.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001524
1525 d += 2;
1526 n_descriptors_left -= 2;
1527
1528 /* Point to either l2 or l3 header depending on next. */
1529 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001530 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001531 l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001532 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001533
1534 b0->current_length = len0 - l3_offset0;
1535 b1->current_length = len1 - l3_offset1;
1536 b0->current_data = l3_offset0;
1537 b1->current_data = l3_offset1;
1538
1539 b_last->next_buffer = is_sop ? ~0 : bi0;
1540 b0->next_buffer = is_eop0 ? ~0 : bi1;
1541 bi_last = bi1;
1542 b_last = b1;
1543
1544 if (CLIB_DEBUG > 0)
1545 {
1546 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1547 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1548
1549 if (is_eop0)
1550 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001551 u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1552 /* follow_buffer_next */ 1);
1553 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001554 }
1555 if (is_eop1)
1556 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001557 u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1558 /* follow_buffer_next */ 1);
1559 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001560 }
1561 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001562 if (0) /* "Dave" version */
1563 {
1564 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1565 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001566
Damjan Marion00a9dca2016-08-17 17:05:46 +02001567 if (is_eop0)
1568 {
1569 to_next[0] = bi_sop0;
1570 to_next++;
1571 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001572
Damjan Marion00a9dca2016-08-17 17:05:46 +02001573 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1574 to_next, n_left_to_next,
1575 bi_sop0, next0);
1576 }
1577 if (is_eop1)
1578 {
1579 to_next[0] = bi_sop1;
1580 to_next++;
1581 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001582
Damjan Marion00a9dca2016-08-17 17:05:46 +02001583 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1584 to_next, n_left_to_next,
1585 bi_sop1, next1);
1586 }
1587 is_sop = is_eop1;
1588 bi_sop = bi_sop1;
1589 }
1590 if (1) /* "Eliot" version */
1591 {
1592 /* Speculatively enqueue to cached next. */
1593 u8 saved_is_sop = is_sop;
1594 u32 bi_sop_save = bi_sop;
Damjan Marionb4d89272016-05-12 22:14:45 +02001595
Damjan Marion00a9dca2016-08-17 17:05:46 +02001596 bi_sop = saved_is_sop ? bi0 : bi_sop;
1597 to_next[0] = bi_sop;
1598 to_next += is_eop0;
1599 n_left_to_next -= is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001600
Damjan Marion00a9dca2016-08-17 17:05:46 +02001601 bi_sop = is_eop0 ? bi1 : bi_sop;
1602 to_next[0] = bi_sop;
1603 to_next += is_eop1;
1604 n_left_to_next -= is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001605
Damjan Marion00a9dca2016-08-17 17:05:46 +02001606 is_sop = is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001607
Damjan Marion00a9dca2016-08-17 17:05:46 +02001608 if (PREDICT_FALSE
1609 (!(next0 == next_index && next1 == next_index)))
1610 {
1611 /* Undo speculation. */
1612 to_next -= is_eop0 + is_eop1;
1613 n_left_to_next += is_eop0 + is_eop1;
Damjan Marionb4d89272016-05-12 22:14:45 +02001614
Damjan Marion00a9dca2016-08-17 17:05:46 +02001615 /* Re-do both descriptors being careful about where we enqueue. */
1616 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1617 if (is_eop0)
1618 {
1619 if (next0 != next_index)
1620 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1621 else
1622 {
1623 to_next[0] = bi_sop;
1624 to_next += 1;
1625 n_left_to_next -= 1;
1626 }
1627 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001628
Damjan Marion00a9dca2016-08-17 17:05:46 +02001629 bi_sop = is_eop0 ? bi1 : bi_sop;
1630 if (is_eop1)
1631 {
1632 if (next1 != next_index)
1633 vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1634 else
1635 {
1636 to_next[0] = bi_sop;
1637 to_next += 1;
1638 n_left_to_next -= 1;
1639 }
1640 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001641
Damjan Marion00a9dca2016-08-17 17:05:46 +02001642 /* Switch cached next index when next for both packets is the same. */
1643 if (is_eop0 && is_eop1 && next0 == next1)
1644 {
1645 vlib_put_next_frame (vm, node, next_index,
1646 n_left_to_next);
1647 next_index = next0;
1648 vlib_get_next_frame (vm, node, next_index,
1649 to_next, n_left_to_next);
1650 }
1651 }
1652 }
Damjan Marionb4d89272016-05-12 22:14:45 +02001653 }
1654
Damjan Marion00a9dca2016-08-17 17:05:46 +02001655 /* Bail out of dual loop and proceed with single loop. */
Damjan Marionb4d89272016-05-12 22:14:45 +02001656 found_hw_owned_descriptor_x2:
1657
1658 while (n_descriptors_left > 0 && n_left_to_next > 0)
1659 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001660 vlib_buffer_t *b0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001661 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1662 u8 is_eop0, error0, next0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001663 ixge_descriptor_t d0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001664
Damjan Marion00a9dca2016-08-17 17:05:46 +02001665 d0.as_u32x4 = d[0].as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001666
1667 s20 = d0.rx_from_hw.status[2];
1668 s00 = d0.rx_from_hw.status[0];
1669
Damjan Marion00a9dca2016-08-17 17:05:46 +02001670 if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
Damjan Marionb4d89272016-05-12 22:14:45 +02001671 goto found_hw_owned_descriptor_x1;
1672
1673 bi0 = to_rx[0];
1674 ASSERT (to_add >= xm->rx_buffers_to_add);
1675 fi0 = to_add[0];
1676
1677 to_rx[0] = fi0;
1678 to_rx += 1;
1679 to_add -= 1;
1680
Damjan Marion00a9dca2016-08-17 17:05:46 +02001681 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1682 vlib_buffer_is_known (vm, bi0));
1683 ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
1684 vlib_buffer_is_known (vm, fi0));
Damjan Marionb4d89272016-05-12 22:14:45 +02001685
1686 b0 = vlib_get_buffer (vm, bi0);
1687
Damjan Marion00a9dca2016-08-17 17:05:46 +02001688 /*
1689 * Turn this on if you run into
1690 * "bad monkey" contexts, and you want to know exactly
1691 * which nodes they've visited...
1692 */
1693 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001694
1695 is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1696 ixge_rx_next_and_error_from_status_x1
Damjan Marion00a9dca2016-08-17 17:05:46 +02001697 (xd, s00, s20, &next0, &error0, &flags0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001698
1699 next0 = is_sop ? next0 : next_index_sop;
1700 next_index_sop = next0;
1701
1702 b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1703
1704 vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001705 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001706
1707 b0->error = node->errors[error0];
1708
1709 len0 = d0.rx_from_hw.n_packet_bytes_this_descriptor;
1710 n_bytes += len0;
1711 n_packets += is_eop0;
1712
1713 /* Give new buffer to hardware. */
1714 d0.rx_to_hw.tail_address =
Damjan Marion00a9dca2016-08-17 17:05:46 +02001715 vlib_get_buffer_data_physical_address (vm, fi0);
Damjan Marionb4d89272016-05-12 22:14:45 +02001716 d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001717 d[0].as_u32x4 = d0.as_u32x4;
Damjan Marionb4d89272016-05-12 22:14:45 +02001718
1719 d += 1;
1720 n_descriptors_left -= 1;
1721
1722 /* Point to either l2 or l3 header depending on next. */
1723 l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001724 ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001725 b0->current_length = len0 - l3_offset0;
1726 b0->current_data = l3_offset0;
1727
1728 b_last->next_buffer = is_sop ? ~0 : bi0;
1729 bi_last = bi0;
1730 b_last = b0;
1731
1732 bi_sop = is_sop ? bi0 : bi_sop;
1733
1734 if (CLIB_DEBUG > 0 && is_eop0)
1735 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001736 u8 *msg =
1737 vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1738 ASSERT (!msg);
Damjan Marionb4d89272016-05-12 22:14:45 +02001739 }
1740
Damjan Marion00a9dca2016-08-17 17:05:46 +02001741 if (0) /* "Dave" version */
1742 {
1743 if (is_eop0)
1744 {
1745 to_next[0] = bi_sop;
1746 to_next++;
1747 n_left_to_next--;
Damjan Marionb4d89272016-05-12 22:14:45 +02001748
Damjan Marion00a9dca2016-08-17 17:05:46 +02001749 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1750 to_next, n_left_to_next,
1751 bi_sop, next0);
1752 }
1753 }
1754 if (1) /* "Eliot" version */
1755 {
1756 if (PREDICT_TRUE (next0 == next_index))
1757 {
1758 to_next[0] = bi_sop;
1759 to_next += is_eop0;
1760 n_left_to_next -= is_eop0;
1761 }
1762 else
1763 {
1764 if (next0 != next_index && is_eop0)
1765 vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
Damjan Marionb4d89272016-05-12 22:14:45 +02001766
Damjan Marion00a9dca2016-08-17 17:05:46 +02001767 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1768 next_index = next0;
1769 vlib_get_next_frame (vm, node, next_index,
1770 to_next, n_left_to_next);
1771 }
1772 }
1773 is_sop = is_eop0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001774 }
1775 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1776 }
1777
Damjan Marion00a9dca2016-08-17 17:05:46 +02001778found_hw_owned_descriptor_x1:
Damjan Marionb4d89272016-05-12 22:14:45 +02001779 if (n_descriptors_left > 0)
1780 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1781
1782 _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1783
1784 {
1785 u32 n_done = n_descriptors - n_descriptors_left;
1786
1787 if (n_trace > 0 && n_done > 0)
1788 {
1789 u32 n = clib_min (n_trace, n_done);
1790 ixge_rx_trace (xm, xd, dq,
1791 d_trace_save,
1792 d_trace_buffers,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001793 &dq->descriptors[start_descriptor_index], n);
Damjan Marionb4d89272016-05-12 22:14:45 +02001794 vlib_set_trace_count (vm, node, n_trace - n);
1795 }
1796 if (d_trace_save)
1797 {
1798 _vec_len (d_trace_save) = 0;
1799 _vec_len (d_trace_buffers) = 0;
1800 }
1801
1802 /* Don't keep a reference to b_last if we don't have to.
1803 Otherwise we can over-write a next_buffer pointer after already haven
1804 enqueued a packet. */
1805 if (is_sop)
1806 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001807 b_last->next_buffer = ~0;
1808 bi_last = ~0;
Damjan Marionb4d89272016-05-12 22:14:45 +02001809 }
1810
1811 dq->rx.n_descriptors_done_this_call = n_done;
1812 dq->rx.n_descriptors_done_total += n_done;
1813 dq->rx.is_start_of_packet = is_sop;
1814 dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1815 dq->rx.saved_last_buffer_index = bi_last;
1816 dq->rx.saved_start_of_packet_next_index = next_index_sop;
1817 dq->rx.next_index = next_index;
1818 dq->rx.n_bytes += n_bytes;
1819
1820 return n_packets;
1821 }
1822}
1823
1824static uword
1825ixge_rx_queue (ixge_main_t * xm,
1826 ixge_device_t * xd,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001827 vlib_node_runtime_t * node, u32 queue_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02001828{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001829 ixge_dma_queue_t *dq =
1830 vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1831 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02001832 uword n_packets = 0;
1833 u32 hw_head_index, sw_head_index;
1834
1835 /* One time initialization. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02001836 if (!dq->rx.node)
Damjan Marionb4d89272016-05-12 22:14:45 +02001837 {
1838 dq->rx.node = node;
1839 dq->rx.is_start_of_packet = 1;
1840 dq->rx.saved_start_of_packet_buffer_index = ~0;
1841 dq->rx.saved_last_buffer_index = ~0;
1842 }
1843
1844 dq->rx.next_index = node->cached_next_index;
1845
1846 dq->rx.n_descriptors_done_total = 0;
1847 dq->rx.n_descriptors_done_this_call = 0;
1848 dq->rx.n_bytes = 0;
1849
1850 /* Fetch head from hardware and compare to where we think we are. */
1851 hw_head_index = dr->head_index;
1852 sw_head_index = dq->head_index;
1853
1854 if (hw_head_index == sw_head_index)
1855 goto done;
1856
1857 if (hw_head_index < sw_head_index)
1858 {
1859 u32 n_tried = dq->n_descriptors - sw_head_index;
1860 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001861 sw_head_index =
1862 ixge_ring_add (dq, sw_head_index,
1863 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001864
1865 if (dq->rx.n_descriptors_done_this_call != n_tried)
1866 goto done;
1867 }
1868 if (hw_head_index >= sw_head_index)
1869 {
1870 u32 n_tried = hw_head_index - sw_head_index;
1871 n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001872 sw_head_index =
1873 ixge_ring_add (dq, sw_head_index,
1874 dq->rx.n_descriptors_done_this_call);
Damjan Marionb4d89272016-05-12 22:14:45 +02001875 }
1876
Damjan Marion00a9dca2016-08-17 17:05:46 +02001877done:
Damjan Marionb4d89272016-05-12 22:14:45 +02001878 dq->head_index = sw_head_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001879 dq->tail_index =
1880 ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
Damjan Marionb4d89272016-05-12 22:14:45 +02001881
1882 /* Give tail back to hardware. */
1883 CLIB_MEMORY_BARRIER ();
1884
1885 dr->tail_index = dq->tail_index;
1886
Damjan Marion00a9dca2016-08-17 17:05:46 +02001887 vlib_increment_combined_counter (vnet_main.
1888 interface_main.combined_sw_if_counters +
1889 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001890 0 /* thread_index */ ,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001891 xd->vlib_sw_if_index, n_packets,
Damjan Marionb4d89272016-05-12 22:14:45 +02001892 dq->rx.n_bytes);
1893
1894 return n_packets;
1895}
1896
Damjan Marion00a9dca2016-08-17 17:05:46 +02001897static void
1898ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
Damjan Marionb4d89272016-05-12 22:14:45 +02001899{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001900 vlib_main_t *vm = xm->vlib_main;
1901 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02001902
1903 if (i != 20)
1904 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02001905 ELOG_TYPE_DECLARE (e) =
1906 {
1907 .function = (char *) __FUNCTION__,.format =
1908 "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1909 16,.enum_strings =
1910 {
1911 "flow director",
1912 "rx miss",
1913 "pci exception",
1914 "mailbox",
1915 "link status change",
1916 "linksec key exchange",
1917 "manageability event",
1918 "reserved23",
1919 "sdp0",
1920 "sdp1",
1921 "sdp2",
1922 "sdp3",
1923 "ecc", "descriptor handler error", "tcp timer", "other",},};
1924 struct
1925 {
1926 u8 instance;
1927 u8 index;
1928 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001929 ed = ELOG_DATA (&vm->elog_main, e);
1930 ed->instance = xd->device_index;
1931 ed->index = i - 16;
1932 }
1933 else
1934 {
1935 u32 v = r->xge_mac.link_status;
1936 uword is_up = (v & (1 << 30)) != 0;
1937
Damjan Marion00a9dca2016-08-17 17:05:46 +02001938 ELOG_TYPE_DECLARE (e) =
1939 {
1940 .function = (char *) __FUNCTION__,.format =
1941 "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1942 struct
1943 {
1944 u32 instance, link_status;
1945 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02001946 ed = ELOG_DATA (&vm->elog_main, e);
1947 ed->instance = xd->device_index;
1948 ed->link_status = v;
1949 xd->link_status_at_last_link_change = v;
1950
1951 vlib_process_signal_event (vm, ixge_process_node.index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001952 EVENT_SET_FLAGS,
1953 ((is_up << 31) | xd->vlib_hw_if_index));
Damjan Marionb4d89272016-05-12 22:14:45 +02001954 }
1955}
1956
1957always_inline u32
1958clean_block (u32 * b, u32 * t, u32 n_left)
1959{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001960 u32 *t0 = t;
Damjan Marionb4d89272016-05-12 22:14:45 +02001961
1962 while (n_left >= 4)
1963 {
1964 u32 bi0, bi1, bi2, bi3;
1965
1966 t[0] = bi0 = b[0];
1967 b[0] = 0;
1968 t += bi0 != 0;
1969
1970 t[0] = bi1 = b[1];
1971 b[1] = 0;
1972 t += bi1 != 0;
1973
1974 t[0] = bi2 = b[2];
1975 b[2] = 0;
1976 t += bi2 != 0;
1977
1978 t[0] = bi3 = b[3];
1979 b[3] = 0;
1980 t += bi3 != 0;
1981
1982 b += 4;
1983 n_left -= 4;
1984 }
1985
1986 while (n_left > 0)
1987 {
1988 u32 bi0;
1989
1990 t[0] = bi0 = b[0];
1991 b[0] = 0;
1992 t += bi0 != 0;
1993 b += 1;
1994 n_left -= 1;
1995 }
1996
1997 return t - t0;
1998}
1999
2000static void
2001ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
2002{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002003 vlib_main_t *vm = xm->vlib_main;
2004 ixge_dma_queue_t *dq =
2005 vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2006 u32 n_clean, *b, *t, *t0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002007 i32 n_hw_owned_descriptors;
2008 i32 first_to_clean, last_to_clean;
2009 u64 hwbp_race = 0;
2010
2011 /* Handle case where head write back pointer update
2012 * arrives after the interrupt during high PCI bus loads.
2013 */
2014 while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2015 dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2016 {
2017 hwbp_race++;
2018 if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2019 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002020 ELOG_TYPE_DECLARE (e) =
2021 {
2022 .function = (char *) __FUNCTION__,.format =
2023 "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2024 = "i4i4i4i4",};
2025 struct
2026 {
2027 u32 instance, head_index, tail_index, n_buffers_on_ring;
2028 } *ed;
Damjan Marionb4d89272016-05-12 22:14:45 +02002029 ed = ELOG_DATA (&vm->elog_main, e);
2030 ed->instance = xd->device_index;
2031 ed->head_index = dq->head_index;
2032 ed->tail_index = dq->tail_index;
2033 ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2034 }
2035 }
2036
2037 dq->head_index = dq->tx.head_index_write_back[0];
2038 n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002039 ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002040 n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2041
2042 if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2043 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002044 ELOG_TYPE_DECLARE (e) =
2045 {
2046 .function = (char *) __FUNCTION__,.format =
2047 "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2048 = "i4i4i4i4i4",};
2049 struct
2050 {
2051 u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2052 } *ed;
2053 ed = ELOG_DATA (&vm->elog_main, e);
2054 ed->instance = xd->device_index;
2055 ed->head_index = dq->head_index;
2056 ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2057 ed->n_clean = n_clean;
2058 ed->retries = hwbp_race;
Damjan Marionb4d89272016-05-12 22:14:45 +02002059 }
2060
2061 /*
2062 * This function used to wait until hardware owned zero descriptors.
2063 * At high PPS rates, that doesn't happen until the TX ring is
2064 * completely full of descriptors which need to be cleaned up.
2065 * That, in turn, causes TX ring-full drops and/or long RX service
2066 * interruptions.
2067 */
2068 if (n_clean == 0)
2069 return;
2070
2071 /* Clean the n_clean descriptors prior to the reported hardware head */
2072 last_to_clean = dq->head_index - 1;
2073 last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002074 last_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002075
2076 first_to_clean = (last_to_clean) - (n_clean - 1);
2077 first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
Damjan Marion00a9dca2016-08-17 17:05:46 +02002078 first_to_clean;
Damjan Marionb4d89272016-05-12 22:14:45 +02002079
2080 vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2081 t0 = t = xm->tx_buffers_pending_free;
2082 b = dq->descriptor_buffer_indices + first_to_clean;
2083
2084 /* Wrap case: clean from first to end, then start to last */
2085 if (first_to_clean > last_to_clean)
2086 {
2087 t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2088 first_to_clean = 0;
2089 b = dq->descriptor_buffer_indices;
2090 }
2091
2092 /* Typical case: clean from first to last */
2093 if (first_to_clean <= last_to_clean)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002094 t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
Damjan Marionb4d89272016-05-12 22:14:45 +02002095
2096 if (t > t0)
2097 {
2098 u32 n = t - t0;
2099 vlib_buffer_free_no_next (vm, t0, n);
2100 ASSERT (dq->tx.n_buffers_on_ring >= n);
2101 dq->tx.n_buffers_on_ring -= n;
2102 _vec_len (xm->tx_buffers_pending_free) = 0;
2103 }
2104}
2105
2106/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002107always_inline uword
2108ixge_interrupt_is_rx_queue (uword i)
2109{
2110 return i < 8;
2111}
Damjan Marionb4d89272016-05-12 22:14:45 +02002112
Damjan Marion00a9dca2016-08-17 17:05:46 +02002113always_inline uword
2114ixge_interrupt_is_tx_queue (uword i)
2115{
2116 return i >= 8 && i < 16;
2117}
Damjan Marionb4d89272016-05-12 22:14:45 +02002118
Damjan Marion00a9dca2016-08-17 17:05:46 +02002119always_inline uword
2120ixge_tx_queue_to_interrupt (uword i)
2121{
2122 return 8 + i;
2123}
Damjan Marionb4d89272016-05-12 22:14:45 +02002124
Damjan Marion00a9dca2016-08-17 17:05:46 +02002125always_inline uword
2126ixge_rx_queue_to_interrupt (uword i)
2127{
2128 return 0 + i;
2129}
Damjan Marionb4d89272016-05-12 22:14:45 +02002130
Damjan Marion00a9dca2016-08-17 17:05:46 +02002131always_inline uword
2132ixge_interrupt_rx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002133{
2134 ASSERT (ixge_interrupt_is_rx_queue (i));
2135 return i - 0;
2136}
2137
Damjan Marion00a9dca2016-08-17 17:05:46 +02002138always_inline uword
2139ixge_interrupt_tx_queue (uword i)
Damjan Marionb4d89272016-05-12 22:14:45 +02002140{
2141 ASSERT (ixge_interrupt_is_tx_queue (i));
2142 return i - 8;
2143}
2144
2145static uword
2146ixge_device_input (ixge_main_t * xm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002147 ixge_device_t * xd, vlib_node_runtime_t * node)
Damjan Marionb4d89272016-05-12 22:14:45 +02002148{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002149 ixge_regs_t *r = xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002150 u32 i, s;
2151 uword n_rx_packets = 0;
2152
2153 s = r->interrupt.status_write_1_to_set;
2154 if (s)
2155 r->interrupt.status_write_1_to_clear = s;
2156
Damjan Marion00a9dca2016-08-17 17:05:46 +02002157 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002158 foreach_set_bit (i, s, ({
2159 if (ixge_interrupt_is_rx_queue (i))
2160 n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2161
2162 else if (ixge_interrupt_is_tx_queue (i))
2163 ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2164
2165 else
2166 ixge_interrupt (xm, xd, i);
2167 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002168 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002169
2170 return n_rx_packets;
2171}
2172
2173static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002174ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002175{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002176 ixge_main_t *xm = &ixge_main;
2177 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002178 uword n_rx_packets = 0;
2179
2180 if (node->state == VLIB_NODE_STATE_INTERRUPT)
2181 {
2182 uword i;
2183
2184 /* Loop over devices with interrupts. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002185 /* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002186 foreach_set_bit (i, node->runtime_data[0], ({
2187 xd = vec_elt_at_index (xm->devices, i);
2188 n_rx_packets += ixge_device_input (xm, xd, node);
2189
2190 /* Re-enable interrupts since we're going to stay in interrupt mode. */
2191 if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2192 xd->regs->interrupt.enable_write_1_to_set = ~0;
2193 }));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002194 /* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002195
2196 /* Clear mask of devices with pending interrupts. */
2197 node->runtime_data[0] = 0;
2198 }
2199 else
2200 {
2201 /* Poll all devices for input/interrupts. */
2202 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002203 {
2204 n_rx_packets += ixge_device_input (xm, xd, node);
Damjan Marionb4d89272016-05-12 22:14:45 +02002205
Damjan Marion00a9dca2016-08-17 17:05:46 +02002206 /* Re-enable interrupts when switching out of polling mode. */
2207 if (node->flags &
2208 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
2209 xd->regs->interrupt.enable_write_1_to_set = ~0;
2210 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002211 }
2212
2213 return n_rx_packets;
2214}
2215
Damjan Marion00a9dca2016-08-17 17:05:46 +02002216static char *ixge_error_strings[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002217#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002218 foreach_ixge_error
Damjan Marionb4d89272016-05-12 22:14:45 +02002219#undef _
2220};
2221
Damjan Marion00a9dca2016-08-17 17:05:46 +02002222/* *INDENT-OFF* */
Damjan Marion98897e22016-06-17 16:42:02 +02002223VLIB_REGISTER_NODE (ixge_input_node, static) = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002224 .function = ixge_input,
2225 .type = VLIB_NODE_TYPE_INPUT,
2226 .name = "ixge-input",
2227
2228 /* Will be enabled if/when hardware is detected. */
2229 .state = VLIB_NODE_STATE_DISABLED,
2230
2231 .format_buffer = format_ethernet_header_with_length,
2232 .format_trace = format_ixge_rx_dma_trace,
2233
2234 .n_errors = IXGE_N_ERROR,
2235 .error_strings = ixge_error_strings,
2236
2237 .n_next_nodes = IXGE_RX_N_NEXT,
2238 .next_nodes = {
2239 [IXGE_RX_NEXT_DROP] = "error-drop",
2240 [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2241 [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2242 [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2243 },
2244};
2245
2246VLIB_NODE_FUNCTION_MULTIARCH_CLONE (ixge_input)
2247CLIB_MULTIARCH_SELECT_FN (ixge_input)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002248/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002249
Damjan Marion00a9dca2016-08-17 17:05:46 +02002250static u8 *
2251format_ixge_device_name (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002252{
2253 u32 i = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002254 ixge_main_t *xm = &ixge_main;
2255 ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
Damjan Marionb4d89272016-05-12 22:14:45 +02002256 return format (s, "TenGigabitEthernet%U",
2257 format_vlib_pci_handle, &xd->pci_device.bus_address);
2258}
2259
2260#define IXGE_COUNTER_IS_64_BIT (1 << 0)
2261#define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2262
2263static u8 ixge_counter_flags[] = {
2264#define _(a,f) 0,
2265#define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2266 foreach_ixge_counter
2267#undef _
2268#undef _64
2269};
2270
Damjan Marion00a9dca2016-08-17 17:05:46 +02002271static void
2272ixge_update_counters (ixge_device_t * xd)
Damjan Marionb4d89272016-05-12 22:14:45 +02002273{
2274 /* Byte offset for counter registers. */
2275 static u32 reg_offsets[] = {
2276#define _(a,f) (a) / sizeof (u32),
2277#define _64(a,f) _(a,f)
2278 foreach_ixge_counter
2279#undef _
2280#undef _64
2281 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002282 volatile u32 *r = (volatile u32 *) xd->regs;
Damjan Marionb4d89272016-05-12 22:14:45 +02002283 int i;
2284
2285 for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2286 {
2287 u32 o = reg_offsets[i];
2288 xd->counters[i] += r[o];
2289 if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
2290 r[o] = 0;
2291 if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002292 xd->counters[i] += (u64) r[o + 1] << (u64) 32;
Damjan Marionb4d89272016-05-12 22:14:45 +02002293 }
2294}
2295
Damjan Marion00a9dca2016-08-17 17:05:46 +02002296static u8 *
2297format_ixge_device_id (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002298{
2299 u32 device_id = va_arg (*args, u32);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002300 char *t = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002301 switch (device_id)
2302 {
2303#define _(f,n) case n: t = #f; break;
2304 foreach_ixge_pci_device_id;
2305#undef _
2306 default:
2307 t = 0;
2308 break;
2309 }
2310 if (t == 0)
2311 s = format (s, "unknown 0x%x", device_id);
2312 else
2313 s = format (s, "%s", t);
2314 return s;
2315}
2316
Damjan Marion00a9dca2016-08-17 17:05:46 +02002317static u8 *
2318format_ixge_link_status (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002319{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002320 ixge_device_t *xd = va_arg (*args, ixge_device_t *);
Damjan Marionb4d89272016-05-12 22:14:45 +02002321 u32 v = xd->link_status_at_last_link_change;
2322
2323 s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2324
2325 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002326 char *modes[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002327 "1g", "10g parallel", "10g serial", "autoneg",
2328 };
Damjan Marion00a9dca2016-08-17 17:05:46 +02002329 char *speeds[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002330 "unknown", "100m", "1g", "10g",
2331 };
2332 s = format (s, ", mode %s, speed %s",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002333 modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002334 }
2335
2336 return s;
2337}
2338
Damjan Marion00a9dca2016-08-17 17:05:46 +02002339static u8 *
2340format_ixge_device (u8 * s, va_list * args)
Damjan Marionb4d89272016-05-12 22:14:45 +02002341{
2342 u32 dev_instance = va_arg (*args, u32);
2343 CLIB_UNUSED (int verbose) = va_arg (*args, int);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002344 ixge_main_t *xm = &ixge_main;
2345 ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2346 ixge_phy_t *phy = xd->phys + xd->phy_index;
Christophe Fontained3c008d2017-10-02 18:10:54 +02002347 u32 indent = format_get_indent (s);
Damjan Marionb4d89272016-05-12 22:14:45 +02002348
2349 ixge_update_counters (xd);
2350 xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status;
2351
2352 s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2353 format_ixge_device_id, xd->device_id,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002354 format_white_space, indent + 2, format_ixge_link_status, xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002355
2356 {
2357
2358 s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2359 format_vlib_pci_link_speed, &xd->pci_device);
2360 }
2361
2362 s = format (s, "\n%U", format_white_space, indent + 2);
2363 if (phy->mdio_address != ~0)
2364 s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2365 else if (xd->sfp_eeprom.id == SFP_ID_sfp)
2366 s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2367 else
2368 s = format (s, "PHY not found");
2369
2370 /* FIXME */
2371 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002372 ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
2373 ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
Damjan Marionb4d89272016-05-12 22:14:45 +02002374 u32 hw_head_index = dr->head_index;
2375 u32 sw_head_index = dq->head_index;
2376 u32 nitems;
2377
2378 nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2379 s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002380 format_white_space, indent + 2, nitems, dq->n_descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002381
2382 s = format (s, "\n%U%d buffers in driver rx cache",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002383 format_white_space, indent + 2,
2384 vec_len (xm->rx_buffers_to_add));
Damjan Marionb4d89272016-05-12 22:14:45 +02002385
2386 s = format (s, "\n%U%d buffers on tx queue 0 ring",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002387 format_white_space, indent + 2,
2388 xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
Damjan Marionb4d89272016-05-12 22:14:45 +02002389 }
2390 {
2391 u32 i;
2392 u64 v;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002393 static char *names[] = {
Damjan Marionb4d89272016-05-12 22:14:45 +02002394#define _(a,f) #f,
2395#define _64(a,f) _(a,f)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002396 foreach_ixge_counter
Damjan Marionb4d89272016-05-12 22:14:45 +02002397#undef _
2398#undef _64
2399 };
2400
2401 for (i = 0; i < ARRAY_LEN (names); i++)
2402 {
2403 v = xd->counters[i] - xd->counters_last_clear[i];
2404 if (v != 0)
2405 s = format (s, "\n%U%-40U%16Ld",
2406 format_white_space, indent + 2,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002407 format_c_identifier, names[i], v);
Damjan Marionb4d89272016-05-12 22:14:45 +02002408 }
2409 }
2410
2411 return s;
2412}
2413
Damjan Marion00a9dca2016-08-17 17:05:46 +02002414static void
2415ixge_clear_hw_interface_counters (u32 instance)
Damjan Marionb4d89272016-05-12 22:14:45 +02002416{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002417 ixge_main_t *xm = &ixge_main;
2418 ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002419 ixge_update_counters (xd);
2420 memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2421}
2422
2423/*
2424 * Dynamically redirect all pkts from a specific interface
2425 * to the specified node
2426 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002427static void
2428ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
2429 u32 node_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002430{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002431 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002432 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002433 ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
Damjan Marionb4d89272016-05-12 22:14:45 +02002434
2435 /* Shut off redirection */
2436 if (node_index == ~0)
2437 {
2438 xd->per_interface_next_index = node_index;
2439 return;
2440 }
2441
2442 xd->per_interface_next_index =
2443 vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2444}
2445
2446
Damjan Marion00a9dca2016-08-17 17:05:46 +02002447/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002448VNET_DEVICE_CLASS (ixge_device_class) = {
2449 .name = "ixge",
2450 .tx_function = ixge_interface_tx,
2451 .format_device_name = format_ixge_device_name,
2452 .format_device = format_ixge_device,
2453 .format_tx_trace = format_ixge_tx_dma_trace,
2454 .clear_counters = ixge_clear_hw_interface_counters,
2455 .admin_up_down_function = ixge_interface_admin_up_down,
2456 .rx_redirect_to_node = ixge_set_interface_next_node,
2457};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002458/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002459
Damjan Marion00a9dca2016-08-17 17:05:46 +02002460#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
Damjan Marionb4d89272016-05-12 22:14:45 +02002461
2462static clib_error_t *
2463ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
2464{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002465 ixge_main_t *xm = &ixge_main;
2466 vlib_main_t *vm = xm->vlib_main;
2467 ixge_dma_queue_t *dq;
2468 clib_error_t *error = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002469
2470 vec_validate (xd->dma_queues[rt], queue_index);
2471 dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2472
Damjan Marion00a9dca2016-08-17 17:05:46 +02002473 if (!xm->n_descriptors_per_cache_line)
2474 xm->n_descriptors_per_cache_line =
2475 CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
Damjan Marionb4d89272016-05-12 22:14:45 +02002476
Damjan Marion00a9dca2016-08-17 17:05:46 +02002477 if (!xm->n_bytes_in_rx_buffer)
Damjan Marionb4d89272016-05-12 22:14:45 +02002478 xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
2479 xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002480 if (!xm->vlib_buffer_free_list_index)
Damjan Marionb4d89272016-05-12 22:14:45 +02002481 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002482 xm->vlib_buffer_free_list_index =
2483 vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer,
2484 "ixge rx");
Damjan Marionb4d89272016-05-12 22:14:45 +02002485 ASSERT (xm->vlib_buffer_free_list_index != 0);
2486 }
2487
Damjan Marion00a9dca2016-08-17 17:05:46 +02002488 if (!xm->n_descriptors[rt])
Damjan Marionb4d89272016-05-12 22:14:45 +02002489 xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2490
2491 dq->queue_index = queue_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002492 dq->n_descriptors =
2493 round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
Damjan Marionb4d89272016-05-12 22:14:45 +02002494 dq->head_index = dq->tail_index = 0;
2495
Damjan Marion49d66f12017-07-20 18:10:35 +02002496 dq->descriptors =
2497 vlib_physmem_alloc_aligned (vm, xm->physmem_region, &error,
2498 dq->n_descriptors *
2499 sizeof (dq->descriptors[0]),
2500 128 /* per chip spec */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002501 if (error)
2502 return error;
2503
Damjan Marion00a9dca2016-08-17 17:05:46 +02002504 memset (dq->descriptors, 0,
2505 dq->n_descriptors * sizeof (dq->descriptors[0]));
Damjan Marionb4d89272016-05-12 22:14:45 +02002506 vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
2507
2508 if (rt == VLIB_RX)
2509 {
2510 u32 n_alloc, i;
2511
2512 n_alloc = vlib_buffer_alloc_from_free_list
Damjan Marion00a9dca2016-08-17 17:05:46 +02002513 (vm, dq->descriptor_buffer_indices,
2514 vec_len (dq->descriptor_buffer_indices),
Damjan Marionb4d89272016-05-12 22:14:45 +02002515 xm->vlib_buffer_free_list_index);
2516 ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2517 for (i = 0; i < n_alloc; i++)
2518 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002519 vlib_buffer_t *b =
2520 vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
2521 dq->descriptors[i].rx_to_hw.tail_address =
Damjan Marion49d66f12017-07-20 18:10:35 +02002522 vlib_physmem_virtual_to_physical (vm, xm->physmem_region,
2523 b->data);
Damjan Marionb4d89272016-05-12 22:14:45 +02002524 }
2525 }
2526 else
2527 {
2528 u32 i;
2529
Damjan Marion00a9dca2016-08-17 17:05:46 +02002530 dq->tx.head_index_write_back =
Damjan Marion49d66f12017-07-20 18:10:35 +02002531 vlib_physmem_alloc (vm, vm->buffer_main->physmem_region, &error,
2532 CLIB_CACHE_LINE_BYTES);
Damjan Marionb4d89272016-05-12 22:14:45 +02002533
2534 for (i = 0; i < dq->n_descriptors; i++)
2535 dq->descriptors[i].tx = xm->tx_descriptor_template;
2536
2537 vec_validate (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
2538 }
2539
2540 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002541 ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002542 u64 a;
2543
Damjan Marion49d66f12017-07-20 18:10:35 +02002544 a =
2545 vlib_physmem_virtual_to_physical (vm, vm->buffer_main->physmem_region,
2546 dq->descriptors);
Damjan Marionb4d89272016-05-12 22:14:45 +02002547 dr->descriptor_address[0] = a & 0xFFFFFFFF;
2548 dr->descriptor_address[1] = a >> (u64) 32;
2549 dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2550 dq->head_index = dq->tail_index = 0;
2551
2552 if (rt == VLIB_RX)
2553 {
2554 ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2555 dr->rx_split_control =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002556 ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2557 | ( /* lo free descriptor threshold (units of 64 descriptors) */
2558 (1 << 22)) | ( /* descriptor type: advanced one buffer */
2559 (1 << 25)) | ( /* drop if no descriptors available */
2560 (1 << 28)));
Damjan Marionb4d89272016-05-12 22:14:45 +02002561
2562 /* Give hardware all but last 16 cache lines' worth of descriptors. */
2563 dq->tail_index = dq->n_descriptors -
Damjan Marion00a9dca2016-08-17 17:05:46 +02002564 16 * xm->n_descriptors_per_cache_line;
Damjan Marionb4d89272016-05-12 22:14:45 +02002565 }
2566 else
2567 {
2568 /* Make sure its initialized before hardware can get to it. */
2569 dq->tx.head_index_write_back[0] = dq->head_index;
2570
Damjan Marion00a9dca2016-08-17 17:05:46 +02002571 a =
Damjan Marion49d66f12017-07-20 18:10:35 +02002572 vlib_physmem_virtual_to_physical (vm,
2573 vm->buffer_main->physmem_region,
2574 dq->tx.head_index_write_back);
Damjan Marionb4d89272016-05-12 22:14:45 +02002575 dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2576 dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2577 }
2578
2579 /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2580 and [12] undocumented set. */
2581 if (rt == VLIB_RX)
2582 dr->dca_control &= ~((1 << 13) | (1 << 12));
2583
2584 CLIB_MEMORY_BARRIER ();
2585
2586 if (rt == VLIB_TX)
2587 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002588 xd->regs->tx_dma_control |= (1 << 0);
2589 dr->control |= ((32 << 0) /* prefetch threshold */
2590 | (64 << 8) /* host threshold */
2591 | (0 << 16) /* writeback threshold */ );
Damjan Marionb4d89272016-05-12 22:14:45 +02002592 }
2593
2594 /* Enable this queue and wait for hardware to initialize
2595 before adding to tail. */
2596 if (rt == VLIB_TX)
2597 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002598 dr->control |= 1 << 25;
2599 while (!(dr->control & (1 << 25)))
2600 ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002601 }
2602
2603 /* Set head/tail indices and enable DMA. */
2604 dr->head_index = dq->head_index;
2605 dr->tail_index = dq->tail_index;
2606 }
2607
2608 return error;
2609}
2610
Damjan Marion00a9dca2016-08-17 17:05:46 +02002611static u32
2612ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
Damjan Marionb4d89272016-05-12 22:14:45 +02002613{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002614 ixge_device_t *xd;
2615 ixge_regs_t *r;
Damjan Marionb4d89272016-05-12 22:14:45 +02002616 u32 old;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002617 ixge_main_t *xm = &ixge_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002618
2619 xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2620 r = xd->regs;
2621
2622 old = r->filter_control;
2623
2624 if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002625 r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
Damjan Marionb4d89272016-05-12 22:14:45 +02002626 else
2627 r->filter_control = old & ~(1 << 9);
2628
2629 return old;
2630}
2631
Damjan Marion00a9dca2016-08-17 17:05:46 +02002632static void
2633ixge_device_init (ixge_main_t * xm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002634{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002635 vnet_main_t *vnm = vnet_get_main ();
2636 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002637
2638 /* Reset chip(s). */
2639 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002640 {
2641 ixge_regs_t *r = xd->regs;
2642 const u32 reset_bit = (1 << 26) | (1 << 3);
2643
2644 r->control |= reset_bit;
2645
2646 /* No need to suspend. Timed to take ~1e-6 secs */
2647 while (r->control & reset_bit)
2648 ;
2649
2650 /* Software loaded. */
2651 r->extended_control |= (1 << 28);
2652
2653 ixge_phy_init (xd);
2654
2655 /* Register ethernet interface. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002656 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002657 u8 addr8[6];
2658 u32 i, addr32[2];
2659 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002660
Damjan Marion00a9dca2016-08-17 17:05:46 +02002661 addr32[0] = r->rx_ethernet_address0[0][0];
2662 addr32[1] = r->rx_ethernet_address0[0][1];
2663 for (i = 0; i < 6; i++)
2664 addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
Damjan Marionb4d89272016-05-12 22:14:45 +02002665
Damjan Marion00a9dca2016-08-17 17:05:46 +02002666 error = ethernet_register_interface
2667 (vnm, ixge_device_class.index, xd->device_index,
2668 /* ethernet address */ addr8,
2669 &xd->vlib_hw_if_index, ixge_flag_change);
2670 if (error)
2671 clib_error_report (error);
Damjan Marionb4d89272016-05-12 22:14:45 +02002672 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002673
2674 {
2675 vnet_sw_interface_t *sw =
2676 vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
2677 xd->vlib_sw_if_index = sw->sw_if_index;
2678 }
2679
2680 ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2681
2682 xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
2683
2684 ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2685
2686 /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2687 r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2688 ixge_rx_queue_to_interrupt (0)) << 0);
2689
2690 r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2691 ixge_tx_queue_to_interrupt (0)) << 8);
2692
2693 /* No use in getting too many interrupts.
2694 Limit them to one every 3/4 ring size at line rate
2695 min sized packets.
2696 No need for this since kernel/vlib main loop provides adequate interrupt
2697 limiting scheme. */
2698 if (0)
2699 {
2700 f64 line_rate_max_pps =
2701 10e9 / (8 * (64 + /* interframe padding */ 20));
2702 ixge_throttle_queue_interrupt (r, 0,
2703 .75 * xm->n_descriptors[VLIB_RX] /
2704 line_rate_max_pps);
2705 }
2706
2707 /* Accept all multicast and broadcast packets. Should really add them
2708 to the dst_ethernet_address register array. */
2709 r->filter_control |= (1 << 10) | (1 << 8);
2710
2711 /* Enable frames up to size in mac frame size register. */
2712 r->xge_mac.control |= 1 << 2;
2713 r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2714
2715 /* Enable all interrupts. */
2716 if (!IXGE_ALWAYS_POLL)
2717 r->interrupt.enable_write_1_to_set = ~0;
2718 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002719}
2720
2721static uword
Damjan Marion00a9dca2016-08-17 17:05:46 +02002722ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
Damjan Marionb4d89272016-05-12 22:14:45 +02002723{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002724 vnet_main_t *vnm = vnet_get_main ();
2725 ixge_main_t *xm = &ixge_main;
2726 ixge_device_t *xd;
2727 uword event_type, *event_data = 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002728 f64 timeout, link_debounce_deadline;
2729
2730 ixge_device_init (xm);
2731
2732 /* Clear all counters. */
2733 vec_foreach (xd, xm->devices)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002734 {
2735 ixge_update_counters (xd);
2736 memset (xd->counters, 0, sizeof (xd->counters));
2737 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002738
2739 timeout = 30.0;
2740 link_debounce_deadline = 1e70;
2741
2742 while (1)
2743 {
2744 /* 36 bit stat counters could overflow in ~50 secs.
Damjan Marion00a9dca2016-08-17 17:05:46 +02002745 We poll every 30 secs to be conservative. */
Damjan Marionb4d89272016-05-12 22:14:45 +02002746 vlib_process_wait_for_event_or_clock (vm, timeout);
2747
2748 event_type = vlib_process_get_events (vm, &event_data);
2749
Damjan Marion00a9dca2016-08-17 17:05:46 +02002750 switch (event_type)
2751 {
2752 case EVENT_SET_FLAGS:
2753 /* 1 ms */
2754 link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2755 timeout = 1e-3;
2756 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002757
Damjan Marion00a9dca2016-08-17 17:05:46 +02002758 case ~0:
2759 /* No events found: timer expired. */
2760 if (vlib_time_now (vm) > link_debounce_deadline)
2761 {
2762 vec_foreach (xd, xm->devices)
2763 {
2764 ixge_regs_t *r = xd->regs;
2765 u32 v = r->xge_mac.link_status;
2766 uword is_up = (v & (1 << 30)) != 0;
Damjan Marionb4d89272016-05-12 22:14:45 +02002767
Damjan Marion00a9dca2016-08-17 17:05:46 +02002768 vnet_hw_interface_set_flags
2769 (vnm, xd->vlib_hw_if_index,
2770 is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2771 }
2772 link_debounce_deadline = 1e70;
2773 timeout = 30.0;
2774 }
2775 break;
Damjan Marionb4d89272016-05-12 22:14:45 +02002776
Damjan Marion00a9dca2016-08-17 17:05:46 +02002777 default:
2778 ASSERT (0);
2779 }
Damjan Marionb4d89272016-05-12 22:14:45 +02002780
2781 if (event_data)
2782 _vec_len (event_data) = 0;
2783
2784 /* Query stats every 30 secs. */
2785 {
2786 f64 now = vlib_time_now (vm);
2787 if (now - xm->time_last_stats_update > 30)
2788 {
2789 xm->time_last_stats_update = now;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002790 vec_foreach (xd, xm->devices) ixge_update_counters (xd);
Damjan Marionb4d89272016-05-12 22:14:45 +02002791 }
2792 }
2793 }
2794
2795 return 0;
2796}
2797
2798static vlib_node_registration_t ixge_process_node = {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002799 .function = ixge_process,
2800 .type = VLIB_NODE_TYPE_PROCESS,
2801 .name = "ixge-process",
Damjan Marionb4d89272016-05-12 22:14:45 +02002802};
2803
Damjan Marion00a9dca2016-08-17 17:05:46 +02002804clib_error_t *
2805ixge_init (vlib_main_t * vm)
Damjan Marionb4d89272016-05-12 22:14:45 +02002806{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002807 ixge_main_t *xm = &ixge_main;
2808 clib_error_t *error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002809
2810 xm->vlib_main = vm;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002811 memset (&xm->tx_descriptor_template, 0,
2812 sizeof (xm->tx_descriptor_template));
2813 memset (&xm->tx_descriptor_template_mask, 0,
2814 sizeof (xm->tx_descriptor_template_mask));
Damjan Marionb4d89272016-05-12 22:14:45 +02002815 xm->tx_descriptor_template.status0 =
Damjan Marion00a9dca2016-08-17 17:05:46 +02002816 (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
2817 IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
2818 IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
Damjan Marionb4d89272016-05-12 22:14:45 +02002819 xm->tx_descriptor_template_mask.status0 = 0xffff;
2820 xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2821
2822 xm->tx_descriptor_template_mask.status0 &=
2823 ~(IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
2824 | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS);
2825 xm->tx_descriptor_template_mask.status1 &=
2826 ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE);
2827
2828 error = vlib_call_init_function (vm, pci_bus_init);
2829
2830 return error;
2831}
2832
2833VLIB_INIT_FUNCTION (ixge_init);
2834
2835
2836static void
Damjan Marion00a9dca2016-08-17 17:05:46 +02002837ixge_pci_intr_handler (vlib_pci_device_t * dev)
Damjan Marionb4d89272016-05-12 22:14:45 +02002838{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002839 ixge_main_t *xm = &ixge_main;
2840 vlib_main_t *vm = xm->vlib_main;
Damjan Marionb4d89272016-05-12 22:14:45 +02002841
2842 vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
2843
2844 /* Let node know which device is interrupting. */
2845 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002846 vlib_node_runtime_t *rt =
2847 vlib_node_get_runtime (vm, ixge_input_node.index);
Damjan Marionb4d89272016-05-12 22:14:45 +02002848 rt->runtime_data[0] |= 1 << dev->private_data;
2849 }
2850}
2851
2852static clib_error_t *
2853ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
2854{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002855 ixge_main_t *xm = &ixge_main;
2856 clib_error_t *error;
2857 void *r;
2858 ixge_device_t *xd;
Damjan Marionb4d89272016-05-12 22:14:45 +02002859
Damjan Marion49d66f12017-07-20 18:10:35 +02002860 /* Allocate physmem region for DMA buffers */
2861 error = vlib_physmem_region_alloc (vm, "ixge decriptors", 2 << 20, 0,
2862 VLIB_PHYSMEM_F_INIT_MHEAP,
2863 &xm->physmem_region);
2864 if (error)
2865 return error;
Damjan Marionb4d89272016-05-12 22:14:45 +02002866
2867 error = vlib_pci_map_resource (dev, 0, &r);
2868 if (error)
2869 return error;
2870
2871 vec_add2 (xm->devices, xd, 1);
2872
2873 if (vec_len (xm->devices) == 1)
2874 {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002875 ixge_input_node.function = ixge_input_multiarch_select ();
Damjan Marionb4d89272016-05-12 22:14:45 +02002876 }
2877
2878 xd->pci_device = dev[0];
2879 xd->device_id = xd->pci_device.config0.header.device_id;
2880 xd->regs = r;
2881 xd->device_index = xd - xm->devices;
2882 xd->pci_function = dev->bus_address.function;
2883 xd->per_interface_next_index = ~0;
2884
2885
2886 /* Chip found so enable node. */
2887 {
2888 vlib_node_set_state (vm, ixge_input_node.index,
2889 (IXGE_ALWAYS_POLL
2890 ? VLIB_NODE_STATE_POLLING
2891 : VLIB_NODE_STATE_INTERRUPT));
2892
2893 dev->private_data = xd->device_index;
2894 }
2895
2896 if (vec_len (xm->devices) == 1)
2897 {
2898 vlib_register_node (vm, &ixge_process_node);
2899 xm->process_node_index = ixge_process_node.index;
2900 }
2901
Damjan Marion00a9dca2016-08-17 17:05:46 +02002902 error = vlib_pci_bus_master_enable (dev);
Damjan Marionb4d89272016-05-12 22:14:45 +02002903
2904 if (error)
2905 return error;
2906
Damjan Marion00a9dca2016-08-17 17:05:46 +02002907 return vlib_pci_intr_enable (dev);
Damjan Marionb4d89272016-05-12 22:14:45 +02002908}
2909
Damjan Marion00a9dca2016-08-17 17:05:46 +02002910/* *INDENT-OFF* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002911PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2912 .init_function = ixge_pci_init,
2913 .interrupt_handler = ixge_pci_intr_handler,
2914 .supported_devices = {
2915#define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2916 foreach_ixge_pci_device_id
2917#undef _
2918 { 0 },
2919 },
2920};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002921/* *INDENT-ON* */
Damjan Marionb4d89272016-05-12 22:14:45 +02002922
Damjan Marion00a9dca2016-08-17 17:05:46 +02002923void
2924ixge_set_next_node (ixge_rx_next_t next, char *name)
Damjan Marionb4d89272016-05-12 22:14:45 +02002925{
2926 vlib_node_registration_t *r = &ixge_input_node;
2927
2928 switch (next)
2929 {
2930 case IXGE_RX_NEXT_IP4_INPUT:
2931 case IXGE_RX_NEXT_IP6_INPUT:
2932 case IXGE_RX_NEXT_ETHERNET_INPUT:
2933 r->next_nodes[next] = name;
2934 break;
2935
2936 default:
2937 clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2938 break;
2939 }
2940}
Damjan Marion00a9dca2016-08-17 17:05:46 +02002941
Damjan Marion374e2c52017-03-09 20:38:15 +01002942/* *INDENT-OFF* */
2943VLIB_PLUGIN_REGISTER () = {
2944 .version = VPP_BUILD_VER,
2945 .default_disabled = 1,
Damjan Marion1bfb0dd2017-03-22 11:08:39 +01002946 .description = "Intel 82599 Family Native Driver (experimental)",
Damjan Marion374e2c52017-03-09 20:38:15 +01002947};
Damjan Marion7bee80c2017-04-26 15:32:12 +02002948#endif
Damjan Marion374e2c52017-03-09 20:38:15 +01002949
2950/* *INDENT-ON* */
Damjan Marion7bee80c2017-04-26 15:32:12 +02002951
Damjan Marion00a9dca2016-08-17 17:05:46 +02002952/*
2953 * fd.io coding-style-patch-verification: ON
2954 *
2955 * Local Variables:
2956 * eval: (c-set-style "gnu")
2957 * End:
2958 */