blob: 8ba3f99022f8f7497e236bf37ff3ec958aaefb64 [file] [log] [blame]
Steven Luongdf7f8e82018-03-18 08:01:27 -07001/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#include <vlib/vlib.h>
19#include <vlib/unix/unix.h>
20#include <vlib/pci/pci.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vnet/devices/devices.h>
Steven Luong007abe72020-09-15 09:48:38 -070023#include <vnet/ip/ip6_packet.h>
24#include <vnet/ip/ip4_packet.h>
Steven Luongdf7f8e82018-03-18 08:01:27 -070025
26#include <vmxnet3/vmxnet3.h>
27
28static_always_inline void
29vmxnet3_tx_comp_ring_advance_next (vmxnet3_txq_t * txq)
30{
31 vmxnet3_tx_comp_ring *comp_ring = &txq->tx_comp_ring;
32
33 comp_ring->next++;
34 if (PREDICT_FALSE (comp_ring->next == txq->size))
35 {
36 comp_ring->next = 0;
37 comp_ring->gen ^= VMXNET3_TXCF_GEN;
38 }
39}
40
41static_always_inline void
42vmxnet3_tx_ring_advance_produce (vmxnet3_txq_t * txq)
43{
44 txq->tx_ring.produce++;
45 if (PREDICT_FALSE (txq->tx_ring.produce == txq->size))
46 {
47 txq->tx_ring.produce = 0;
48 txq->tx_ring.gen ^= VMXNET3_TXF_GEN;
49 }
50}
51
52static_always_inline void
53vmxnet3_tx_ring_advance_consume (vmxnet3_txq_t * txq)
54{
55 txq->tx_ring.consume++;
56 txq->tx_ring.consume &= txq->size - 1;
57}
58
59static_always_inline void
60vmxnet3_txq_release (vlib_main_t * vm, vmxnet3_device_t * vd,
61 vmxnet3_txq_t * txq)
62{
63 vmxnet3_tx_comp *tx_comp;
Steven Luongdf7f8e82018-03-18 08:01:27 -070064 vmxnet3_tx_comp_ring *comp_ring;
Steven Luongdf7f8e82018-03-18 08:01:27 -070065
66 comp_ring = &txq->tx_comp_ring;
67 tx_comp = &txq->tx_comp[comp_ring->next];
68
69 while ((tx_comp->flags & VMXNET3_TXCF_GEN) == comp_ring->gen)
70 {
Steven744da652018-10-03 15:25:13 -070071 u16 eop_idx = tx_comp->index & VMXNET3_TXC_INDEX;
72 u32 bi0 = txq->tx_ring.bufs[txq->tx_ring.consume];
73
74 vlib_buffer_free_one (vm, bi0);
75 while (txq->tx_ring.consume != eop_idx)
Steven Luongdf7f8e82018-03-18 08:01:27 -070076 {
Steven Luongdf7f8e82018-03-18 08:01:27 -070077 vmxnet3_tx_ring_advance_consume (txq);
78 }
Steven744da652018-10-03 15:25:13 -070079 vmxnet3_tx_ring_advance_consume (txq);
Steven Luongdf7f8e82018-03-18 08:01:27 -070080
81 vmxnet3_tx_comp_ring_advance_next (txq);
82 tx_comp = &txq->tx_comp[comp_ring->next];
83 }
84}
85
86static_always_inline u16
87vmxnet3_tx_ring_space_left (vmxnet3_txq_t * txq)
88{
89 u16 count;
90
91 count = (txq->tx_ring.consume - txq->tx_ring.produce - 1);
92 /* Wrapped? */
93 if (txq->tx_ring.produce >= txq->tx_ring.consume)
94 count += txq->size;
95 return count;
96}
97
98VNET_DEVICE_CLASS_TX_FN (vmxnet3_device_class) (vlib_main_t * vm,
99 vlib_node_runtime_t * node,
100 vlib_frame_t * frame)
101{
102 vmxnet3_main_t *vmxm = &vmxnet3_main;
103 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
104 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, rd->dev_instance);
Damjan Mariona3d59862018-11-10 10:23:00 +0100105 u32 *buffers = vlib_frame_vector_args (frame);
Steven Luongdf7f8e82018-03-18 08:01:27 -0700106 u32 bi0;
107 vlib_buffer_t *b0;
Steven3c6015b2018-10-02 14:36:13 -0700108 vmxnet3_tx_desc *txd = 0;
Steven Luongdf7f8e82018-03-18 08:01:27 -0700109 u32 desc_idx, generation, first_idx;
110 u16 space_left;
111 u16 n_left = frame->n_vectors;
112 vmxnet3_txq_t *txq;
Steven Luonga57a7002021-06-17 08:50:32 -0700113 vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
114 u16 qid = tf->queue_id, produce;
Steven Luongdf7f8e82018-03-18 08:01:27 -0700115
Stevenb7020d62018-09-21 07:55:07 -0700116 if (PREDICT_FALSE (!(vd->flags & VMXNET3_DEVICE_F_LINK_UP)))
117 {
118 vlib_buffer_free (vm, buffers, n_left);
119 vlib_error_count (vm, node->node_index, VMXNET3_TX_ERROR_LINK_DOWN,
120 n_left);
121 return (0);
122 }
Steven Luongdf7f8e82018-03-18 08:01:27 -0700123
Steven Luong77329112019-01-29 15:13:31 -0800124 txq = vec_elt_at_index (vd->txqs, qid);
Steven Luonga57a7002021-06-17 08:50:32 -0700125 if (tf->shared_queue)
126 clib_spinlock_lock (&txq->lock);
Steven Luongdf7f8e82018-03-18 08:01:27 -0700127
Steven Luongdf7f8e82018-03-18 08:01:27 -0700128 vmxnet3_txq_release (vm, vd, txq);
129
Steven005e4d52018-11-02 16:28:52 -0700130 produce = txq->tx_ring.produce;
131 while (PREDICT_TRUE (n_left))
Steven Luongdf7f8e82018-03-18 08:01:27 -0700132 {
Steven3c6015b2018-10-02 14:36:13 -0700133 u16 space_needed = 1, i;
Steven Luongdd373722019-02-25 12:10:53 -0800134 u32 gso_size = 0;
Steven Luong007abe72020-09-15 09:48:38 -0700135 u32 l4_hdr_sz;
Steven3c6015b2018-10-02 14:36:13 -0700136 vlib_buffer_t *b;
Steven Luongdd373722019-02-25 12:10:53 -0800137 u32 hdr_len = 0;
Steven3c6015b2018-10-02 14:36:13 -0700138
Steven Luongdf7f8e82018-03-18 08:01:27 -0700139 bi0 = buffers[0];
Steven3c6015b2018-10-02 14:36:13 -0700140 b0 = vlib_get_buffer (vm, bi0);
141 b = b0;
142
143 space_left = vmxnet3_tx_ring_space_left (txq);
144 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
145 {
146 u32 next_buffer = b->next_buffer;
147
148 b = vlib_get_buffer (vm, next_buffer);
149 space_needed++;
150 }
151 if (PREDICT_FALSE (space_left < space_needed))
152 {
Steven8b099532018-10-09 21:12:25 -0700153 vmxnet3_txq_release (vm, vd, txq);
154 space_left = vmxnet3_tx_ring_space_left (txq);
155
156 if (PREDICT_FALSE (space_left < space_needed))
157 {
158 vlib_buffer_free_one (vm, bi0);
159 vlib_error_count (vm, node->node_index,
160 VMXNET3_TX_ERROR_NO_FREE_SLOTS, 1);
161 buffers++;
162 n_left--;
163 /*
164 * Drop this packet. But we may have enough room for the next
165 * packet
166 */
167 continue;
168 }
Steven3c6015b2018-10-02 14:36:13 -0700169 }
Steven Luongdf7f8e82018-03-18 08:01:27 -0700170
171 /*
172 * Toggle the generation bit for SOP fragment to avoid device starts
173 * reading incomplete packet
174 */
175 generation = txq->tx_ring.gen ^ VMXNET3_TXF_GEN;
176 first_idx = txq->tx_ring.produce;
Steven3c6015b2018-10-02 14:36:13 -0700177 for (i = 0; i < space_needed; i++)
Steven Luongdf7f8e82018-03-18 08:01:27 -0700178 {
179 b0 = vlib_get_buffer (vm, bi0);
Steven Luongdf7f8e82018-03-18 08:01:27 -0700180
Steven Luongdf7f8e82018-03-18 08:01:27 -0700181 desc_idx = txq->tx_ring.produce;
182
183 vmxnet3_tx_ring_advance_produce (txq);
184 txq->tx_ring.bufs[desc_idx] = bi0;
185
186 txd = &txq->tx_desc[desc_idx];
Steven Luongdd373722019-02-25 12:10:53 -0800187
Damjan Marion8f499362018-10-22 13:07:02 +0200188 txd->address = vlib_buffer_get_current_pa (vm, b0);
Steven Luongdf7f8e82018-03-18 08:01:27 -0700189
190 txd->flags[0] = generation | b0->current_length;
Steven Luongdd373722019-02-25 12:10:53 -0800191 txd->flags[1] = 0;
192 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
193 {
194 /*
195 * We should not be getting GSO outbound traffic unless it is
196 * lro is enable
197 */
Steven Luong2985e0a2019-08-29 12:47:57 -0700198 ASSERT (vd->gso_enable == 1);
Steven Luongdd373722019-02-25 12:10:53 -0800199 gso_size = vnet_buffer2 (b0)->gso_size;
Steven Luong007abe72020-09-15 09:48:38 -0700200 l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
201 if (b0->flags & VNET_BUFFER_F_IS_IP6)
202 hdr_len = sizeof (ethernet_header_t) + sizeof (ip6_header_t) +
203 l4_hdr_sz;
204 else
205 hdr_len = sizeof (ethernet_header_t) + sizeof (ip4_header_t) +
206 l4_hdr_sz;
Steven Luongdd373722019-02-25 12:10:53 -0800207 }
Steven Luongdf7f8e82018-03-18 08:01:27 -0700208
209 generation = txq->tx_ring.gen;
Steven3c6015b2018-10-02 14:36:13 -0700210 bi0 = b0->next_buffer;
Steven Luongdf7f8e82018-03-18 08:01:27 -0700211 }
Steven Luongdd373722019-02-25 12:10:53 -0800212 if (PREDICT_FALSE (gso_size != 0))
213 {
Steven Luong007abe72020-09-15 09:48:38 -0700214 txq->tx_desc[first_idx].flags[1] = hdr_len;
215 txq->tx_desc[first_idx].flags[1] |= VMXNET3_TXF_OM (VMXNET3_OM_TSO);
216 txq->tx_desc[first_idx].flags[0] |= VMXNET3_TXF_MSSCOF (gso_size);
Steven Luongdd373722019-02-25 12:10:53 -0800217 }
218 txd->flags[1] |= VMXNET3_TXF_CQ | VMXNET3_TXF_EOP;
Steven3c6015b2018-10-02 14:36:13 -0700219 asm volatile ("":::"memory");
220 /*
221 * Now toggle back the generation bit for the first segment.
222 * Device can start reading the packet
223 */
224 txq->tx_desc[first_idx].flags[0] ^= VMXNET3_TXF_GEN;
Steven Luongdf7f8e82018-03-18 08:01:27 -0700225
226 buffers++;
227 n_left--;
228 }
229
Steven005e4d52018-11-02 16:28:52 -0700230 if (PREDICT_TRUE (produce != txq->tx_ring.produce))
Steven Luong77329112019-01-29 15:13:31 -0800231 vmxnet3_reg_write_inline (vd, 0, txq->reg_txprod, txq->tx_ring.produce);
Steven005e4d52018-11-02 16:28:52 -0700232
Steven Luonga57a7002021-06-17 08:50:32 -0700233 if (tf->shared_queue)
234 clib_spinlock_unlock (&txq->lock);
Steven Luongdf7f8e82018-03-18 08:01:27 -0700235
236 return (frame->n_vectors - n_left);
237}
238
239/*
240 * fd.io coding-style-patch-verification: ON
241 *
242 * Local Variables:
243 * eval: (c-set-style "gnu")
244 * End:
245 */