blob: ecc8b8c1aa79d4e4f7203c746a440bc775df70bc [file] [log] [blame]
Damjan Marion8389fb92017-10-13 18:29:53 +02001/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
Damjan Marion8389fb92017-10-13 18:29:53 +020021
22#include <vlib/vlib.h>
23#include <vlib/unix/unix.h>
24#include <vnet/ethernet/ethernet.h>
Mohsin Kazmi6d4af892020-01-03 15:11:53 +000025#include <vnet/gso/gso.h>
Milan Lenco73e7f422017-12-14 10:04:25 +010026#include <vnet/ip/ip4_packet.h>
27#include <vnet/ip/ip6_packet.h>
Mohsin Kazmi6d4af892020-01-03 15:11:53 +000028#include <vnet/tcp/tcp_packet.h>
29#include <vnet/udp/udp_packet.h>
Damjan Marion8389fb92017-10-13 18:29:53 +020030#include <vnet/devices/virtio/virtio.h>
31
32#define foreach_virtio_tx_func_error \
33_(NO_FREE_SLOTS, "no free tx slots") \
34_(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
35_(PENDING_MSGS, "pending msgs in tx ring") \
36_(NO_TX_QUEUES, "no tx queues")
37
38typedef enum
39{
Mohsin Kazmid6c15af2018-10-23 18:00:47 +020040#define _(f,s) VIRTIO_TX_ERROR_##f,
Damjan Marion8389fb92017-10-13 18:29:53 +020041 foreach_virtio_tx_func_error
42#undef _
Mohsin Kazmid6c15af2018-10-23 18:00:47 +020043 VIRTIO_TX_N_ERROR,
Damjan Marion8389fb92017-10-13 18:29:53 +020044} virtio_tx_func_error_t;
45
46static char *virtio_tx_func_error_strings[] = {
47#define _(n,s) s,
48 foreach_virtio_tx_func_error
49#undef _
50};
51
Damjan Marion8389fb92017-10-13 18:29:53 +020052static u8 *
53format_virtio_device (u8 * s, va_list * args)
54{
55 u32 dev_instance = va_arg (*args, u32);
56 int verbose = va_arg (*args, int);
57 u32 indent = format_get_indent (s);
58
59 s = format (s, "VIRTIO interface");
60 if (verbose)
61 {
62 s = format (s, "\n%U instance %u", format_white_space, indent + 2,
63 dev_instance);
64 }
65 return s;
66}
67
68static u8 *
69format_virtio_tx_trace (u8 * s, va_list * args)
70{
71 s = format (s, "Unimplemented...");
72 return s;
73}
74
Mohsin Kazmiaea0df32019-05-23 14:32:58 +020075static_always_inline void
76virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring)
Damjan Marion8389fb92017-10-13 18:29:53 +020077{
78 u16 used = vring->desc_in_use;
79 u16 sz = vring->size;
80 u16 mask = sz - 1;
81 u16 last = vring->last_used_idx;
82 u16 n_left = vring->used->idx - last;
83
84 if (n_left == 0)
85 return;
86
87 while (n_left)
88 {
89 struct vring_used_elem *e = &vring->used->ring[last & mask];
Mohsin Kazmiaea0df32019-05-23 14:32:58 +020090 u16 slot, n_buffers;
91 slot = n_buffers = e->id;
Damjan Marion8389fb92017-10-13 18:29:53 +020092
Mohsin Kazmiaea0df32019-05-23 14:32:58 +020093 while (e->id == n_buffers)
94 {
95 n_left--;
96 last++;
97 n_buffers++;
98 if (n_left == 0)
99 break;
100 e = &vring->used->ring[last & mask];
101 }
102 vlib_buffer_free_from_ring (vm, vring->buffers, slot,
103 sz, (n_buffers - slot));
104 used -= (n_buffers - slot);
105
106 if (n_left > 0)
107 {
108 slot = e->id;
109
110 vlib_buffer_free (vm, &vring->buffers[slot], 1);
111 used--;
112 last++;
113 n_left--;
114 }
Damjan Marion8389fb92017-10-13 18:29:53 +0200115 }
116 vring->desc_in_use = used;
117 vring->last_used_idx = last;
118}
119
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000120static_always_inline void
121set_checksum_offsets (vlib_main_t * vm, virtio_if_t * vif, vlib_buffer_t * b,
122 struct virtio_net_hdr_v1 *hdr)
123{
124 if (b->flags & VNET_BUFFER_F_IS_IP4)
125 {
126 ip4_header_t *ip4;
127 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
128 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
129 hdr->csum_start = gho.l4_hdr_offset; // 0x22;
130 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
131 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
132 else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
133 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
134
135 /*
136 * virtio devices do not support IP4 checksum offload. So driver takes care
137 * of it while doing tx.
138 */
139 ip4 =
140 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
141 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
142 ip4->checksum = ip4_header_checksum (ip4);
143 }
144 else if (b->flags & VNET_BUFFER_F_IS_IP6)
145 {
146 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
147 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
148 hdr->csum_start = gho.l4_hdr_offset; // 0x36;
149 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
150 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
151 else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
152 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
153 }
154}
155
Damjan Marion8389fb92017-10-13 18:29:53 +0200156static_always_inline u16
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200157add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
158 virtio_vring_t * vring, u32 bi, u16 avail, u16 next,
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000159 u16 mask, int do_gso, int csum_offload)
Damjan Marion8389fb92017-10-13 18:29:53 +0200160{
161 u16 n_added = 0;
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200162 int hdr_sz = vif->virtio_net_hdr_sz;
Damjan Marion8389fb92017-10-13 18:29:53 +0200163 struct vring_desc *d;
164 d = &vring->desc[next];
165 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Damjan Marion508cabc2018-02-08 19:49:22 +0100166 struct virtio_net_hdr_v1 *hdr = vlib_buffer_get_current (b) - hdr_sz;
167
Dave Barachb7b92992018-10-17 10:38:51 -0400168 clib_memset (hdr, 0, hdr_sz);
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000169
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200170 if (do_gso && (b->flags & VNET_BUFFER_F_GSO))
171 {
172 if (b->flags & VNET_BUFFER_F_IS_IP4)
173 {
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000174 ip4_header_t *ip4;
175 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200176 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
177 hdr->gso_size = vnet_buffer2 (b)->gso_size;
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000178 hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200179 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000180 hdr->csum_start = gho.l4_hdr_offset; // 0x22;
181 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
182 ip4 =
183 (ip4_header_t *) (vlib_buffer_get_current (b) +
184 gho.l3_hdr_offset);
185 /*
186 * virtio devices do not support IP4 checksum offload. So driver takes care
187 * of it while doing tx.
188 */
189 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
190 ip4->checksum = ip4_header_checksum (ip4);
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200191 }
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000192 else if (b->flags & VNET_BUFFER_F_IS_IP6)
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200193 {
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000194 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200195 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
196 hdr->gso_size = vnet_buffer2 (b)->gso_size;
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000197 hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200198 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000199 hdr->csum_start = gho.l4_hdr_offset; // 0x36;
200 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200201 }
202 }
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000203 else if (csum_offload
204 && (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
205 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))
206 {
207 set_checksum_offsets (vm, vif, b, hdr);
208 }
Damjan Marion8389fb92017-10-13 18:29:53 +0200209
210 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
211 {
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200212 d->addr =
213 ((vif->type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
214 b) :
215 pointer_to_uword (vlib_buffer_get_current (b))) - hdr_sz;
Damjan Marion8389fb92017-10-13 18:29:53 +0200216 d->len = b->current_length + hdr_sz;
217 d->flags = 0;
218 }
219 else
220 {
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200221 /*
222 * We are using single vlib_buffer_t for indirect descriptor(s)
223 * chain. Single descriptor is 16 bytes and vlib_buffer_t
224 * has 2048 bytes space. So maximum long chain can have 128
225 * (=2048/16) indirect descriptors.
226 * It can easily support 65535 bytes of Jumbo frames with
227 * each data buffer size of 512 bytes minimum.
228 */
Mohsin Kazmiaea0df32019-05-23 14:32:58 +0200229 u32 indirect_buffer = 0;
230 if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
231 return n_added;
232
233 vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200234 indirect_desc->current_data = 0;
Mohsin Kazmiaea0df32019-05-23 14:32:58 +0200235 indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
236 indirect_desc->next_buffer = bi;
237 bi = indirect_buffer;
Damjan Marion8389fb92017-10-13 18:29:53 +0200238
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200239 struct vring_desc *id =
240 (struct vring_desc *) vlib_buffer_get_current (indirect_desc);
241 u32 count = 1;
242 if (vif->type == VIRTIO_IF_TYPE_PCI)
Damjan Marion8389fb92017-10-13 18:29:53 +0200243 {
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200244 d->addr = vlib_physmem_get_pa (vm, id);
245 id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
Damjan Marion8389fb92017-10-13 18:29:53 +0200246
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200247 /*
248 * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
249 * should be presented in separate descriptor and data will start
250 * from next descriptor.
251 */
252 if (PREDICT_TRUE
253 (vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)))
254 id->len = b->current_length + hdr_sz;
255 else
256 {
257 id->len = hdr_sz;
258 id->flags = VRING_DESC_F_NEXT;
259 id->next = count;
260 count++;
261 id++;
262 id->addr = vlib_buffer_get_current_pa (vm, b);
263 id->len = b->current_length;
264 }
265 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
266 {
267 id->flags = VRING_DESC_F_NEXT;
268 id->next = count;
269 count++;
270 id++;
271 b = vlib_get_buffer (vm, b->next_buffer);
272 id->addr = vlib_buffer_get_current_pa (vm, b);
273 id->len = b->current_length;
274 }
275 }
276 else /* VIRTIO_IF_TYPE_TAP */
277 {
278 d->addr = pointer_to_uword (id);
279 /* first buffer in chain */
280 id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
281 id->len = b->current_length + hdr_sz;
282
283 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
284 {
285 id->flags = VRING_DESC_F_NEXT;
286 id->next = count;
287 count++;
288 id++;
289 b = vlib_get_buffer (vm, b->next_buffer);
290 id->addr = pointer_to_uword (vlib_buffer_get_current (b));
291 id->len = b->current_length;
292 }
293 }
294 id->flags = 0;
295 id->next = 0;
296 d->len = count * sizeof (struct vring_desc);
Damjan Marion8389fb92017-10-13 18:29:53 +0200297 d->flags = VRING_DESC_F_INDIRECT;
298 }
299 vring->buffers[next] = bi;
300 vring->avail->ring[avail & mask] = next;
301 n_added++;
302 return n_added;
303}
304
Damjan Marion8389fb92017-10-13 18:29:53 +0200305static_always_inline uword
306virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200307 vlib_frame_t * frame, virtio_if_t * vif,
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000308 int do_gso, int csum_offload)
Damjan Marion8389fb92017-10-13 18:29:53 +0200309{
Damjan Marion8389fb92017-10-13 18:29:53 +0200310 u16 n_left = frame->n_vectors;
Mohsin Kazmi09a3bc52019-04-02 11:45:08 +0000311 virtio_vring_t *vring;
312 u16 qid = vm->thread_index % vif->num_txqs;
313 vring = vec_elt_at_index (vif->txq_vrings, qid);
Damjan Marion8389fb92017-10-13 18:29:53 +0200314 u16 used, next, avail;
315 u16 sz = vring->size;
316 u16 mask = sz - 1;
Damjan Mariona3d59862018-11-10 10:23:00 +0100317 u32 *buffers = vlib_frame_vector_args (frame);
Damjan Marion8389fb92017-10-13 18:29:53 +0200318
Mohsin Kazmi09a3bc52019-04-02 11:45:08 +0000319 clib_spinlock_lock_if_init (&vring->lockp);
Damjan Marion829ee532018-02-16 16:13:32 +0100320
Damjan Marione40231b2018-12-20 10:44:47 +0100321 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0 &&
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200322 (vring->last_kick_avail_idx != vring->avail->idx))
323 virtio_kick (vm, vring, vif);
Damjan Marione40231b2018-12-20 10:44:47 +0100324
Damjan Marion8389fb92017-10-13 18:29:53 +0200325 /* free consumed buffers */
Mohsin Kazmiaea0df32019-05-23 14:32:58 +0200326 virtio_free_used_device_desc (vm, vring);
Damjan Marion8389fb92017-10-13 18:29:53 +0200327
328 used = vring->desc_in_use;
329 next = vring->desc_next;
330 avail = vring->avail->idx;
331
332 while (n_left && used < sz)
333 {
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200334 u16 n_added = 0;
335 n_added =
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200336 add_buffer_to_slot (vm, vif, vring, buffers[0], avail, next, mask,
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000337 do_gso, csum_offload);
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200338 if (!n_added)
339 break;
Damjan Marion8389fb92017-10-13 18:29:53 +0200340 avail += n_added;
341 next = (next + n_added) & mask;
342 used += n_added;
343 buffers++;
344 n_left--;
345 }
346
347 if (n_left != frame->n_vectors)
348 {
349 CLIB_MEMORY_STORE_BARRIER ();
350 vring->avail->idx = avail;
351 vring->desc_next = next;
352 vring->desc_in_use = used;
353 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0)
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200354 virtio_kick (vm, vring, vif);
Damjan Marion8389fb92017-10-13 18:29:53 +0200355 }
356
Damjan Marion8389fb92017-10-13 18:29:53 +0200357 if (n_left)
358 {
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200359 vlib_error_count (vm, node->node_index, VIRTIO_TX_ERROR_NO_FREE_SLOTS,
Damjan Marion8389fb92017-10-13 18:29:53 +0200360 n_left);
361 vlib_buffer_free (vm, buffers, n_left);
362 }
363
Mohsin Kazmi09a3bc52019-04-02 11:45:08 +0000364 clib_spinlock_unlock_if_init (&vring->lockp);
Damjan Marion829ee532018-02-16 16:13:32 +0100365
Damjan Marion8389fb92017-10-13 18:29:53 +0200366 return frame->n_vectors - n_left;
367}
368
Filip Tehlaraee73642019-03-13 05:50:44 -0700369VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
370 vlib_node_runtime_t * node,
371 vlib_frame_t * frame)
Damjan Marion8389fb92017-10-13 18:29:53 +0200372{
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000373 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion8389fb92017-10-13 18:29:53 +0200374 virtio_main_t *nm = &virtio_main;
375 vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
376 virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000377 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
Mohsin Kazmi09a3bc52019-04-02 11:45:08 +0000378
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000379 if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
380 return virtio_interface_tx_inline (vm, node, frame, vif, 1 /* do_gso */ ,
381 1);
382 else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
383 return virtio_interface_tx_inline (vm, node, frame, vif,
384 0 /* no do_gso */ , 1);
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200385 else
386 return virtio_interface_tx_inline (vm, node, frame, vif,
Mohsin Kazmi6d4af892020-01-03 15:11:53 +0000387 0 /* no do_gso */ , 0);
Damjan Marion8389fb92017-10-13 18:29:53 +0200388}
389
390static void
391virtio_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
392 u32 node_index)
393{
394 virtio_main_t *apm = &virtio_main;
395 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
396 virtio_if_t *vif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
397
398 /* Shut off redirection */
399 if (node_index == ~0)
400 {
401 vif->per_interface_next_index = node_index;
402 return;
403 }
404
405 vif->per_interface_next_index =
406 vlib_node_add_next (vlib_get_main (), virtio_input_node.index,
407 node_index);
408}
409
410static void
411virtio_clear_hw_interface_counters (u32 instance)
412{
413 /* Nothing for now */
414}
415
416static clib_error_t *
417virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
418 vnet_hw_interface_rx_mode mode)
419{
420 virtio_main_t *mm = &virtio_main;
421 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
422 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
Mohsin Kazmi09a3bc52019-04-02 11:45:08 +0000423 virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
Damjan Marion8389fb92017-10-13 18:29:53 +0200424
Mohsin Kazmib74fe322019-01-31 13:50:56 +0000425 if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
426 {
427 vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
428 return clib_error_return (0, "interrupt mode is not supported");
429 }
430
Damjan Marion8389fb92017-10-13 18:29:53 +0200431 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
432 vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
433 else
434 vring->avail->flags &= ~VIRTIO_RING_FLAG_MASK_INT;
435
436 return 0;
437}
438
439static clib_error_t *
440virtio_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
441{
442 virtio_main_t *mm = &virtio_main;
443 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
444 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
Damjan Marion8389fb92017-10-13 18:29:53 +0200445
446 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
447 vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
448 else
449 vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
450
Damjan Marion8389fb92017-10-13 18:29:53 +0200451 return 0;
452}
453
454static clib_error_t *
455virtio_subif_add_del_function (vnet_main_t * vnm,
456 u32 hw_if_index,
457 struct vnet_sw_interface_t *st, int is_add)
458{
459 /* Nothing for now */
460 return 0;
461}
462
463/* *INDENT-OFF* */
464VNET_DEVICE_CLASS (virtio_device_class) = {
465 .name = "virtio",
Damjan Marion8389fb92017-10-13 18:29:53 +0200466 .format_device_name = format_virtio_device_name,
467 .format_device = format_virtio_device,
468 .format_tx_trace = format_virtio_tx_trace,
Mohsin Kazmid6c15af2018-10-23 18:00:47 +0200469 .tx_function_n_errors = VIRTIO_TX_N_ERROR,
Damjan Marion8389fb92017-10-13 18:29:53 +0200470 .tx_function_error_strings = virtio_tx_func_error_strings,
471 .rx_redirect_to_node = virtio_set_interface_next_node,
472 .clear_counters = virtio_clear_hw_interface_counters,
473 .admin_up_down_function = virtio_interface_admin_up_down,
474 .subif_add_del_function = virtio_subif_add_del_function,
475 .rx_mode_change_function = virtio_interface_rx_mode_change,
476};
Damjan Marion8389fb92017-10-13 18:29:53 +0200477/* *INDENT-ON* */
478
479/*
480 * fd.io coding-style-patch-verification: ON
481 *
482 * Local Variables:
483 * eval: (c-set-style "gnu")
484 * End:
485 */