Mohsin Kazmi | b7e4e6d | 2021-12-13 18:32:42 +0000 | [diff] [blame] | 1 | /* |
| 2 | *------------------------------------------------------------------ |
| 3 | * Copyright (c) 2021 Cisco and/or its affiliates. |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at: |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | *------------------------------------------------------------------ |
| 16 | */ |
| 17 | |
| 18 | #include <vlib/vlib.h> |
| 19 | #include <vnet/gso/gro_func.h> |
| 20 | #include <vnet/interface/tx_queue_funcs.h> |
| 21 | #include <vnet/devices/virtio/virtio.h> |
| 22 | #include <vnet/devices/virtio/virtio_inline.h> |
| 23 | |
| 24 | static_always_inline uword |
| 25 | virtio_pre_input_inline (vlib_main_t *vm, virtio_vring_t *txq_vring, |
| 26 | vnet_hw_if_tx_queue_t *txq, u8 packet_coalesce, |
| 27 | u8 packet_buffering) |
| 28 | { |
| 29 | if (txq->shared_queue) |
| 30 | { |
| 31 | if (clib_spinlock_trylock (&txq_vring->lockp)) |
| 32 | { |
| 33 | if (virtio_txq_is_scheduled (txq_vring)) |
| 34 | return 0; |
| 35 | if (packet_coalesce) |
| 36 | vnet_gro_flow_table_schedule_node_on_dispatcher ( |
| 37 | vm, txq, txq_vring->flow_table); |
| 38 | else if (packet_buffering) |
| 39 | virtio_vring_buffering_schedule_node_on_dispatcher ( |
| 40 | vm, txq, txq_vring->buffering); |
| 41 | virtio_txq_set_scheduled (txq_vring); |
| 42 | clib_spinlock_unlock (&txq_vring->lockp); |
| 43 | } |
| 44 | } |
| 45 | else |
| 46 | { |
| 47 | if (packet_coalesce) |
| 48 | vnet_gro_flow_table_schedule_node_on_dispatcher ( |
| 49 | vm, txq, txq_vring->flow_table); |
| 50 | else if (packet_buffering) |
| 51 | virtio_vring_buffering_schedule_node_on_dispatcher ( |
| 52 | vm, txq, txq_vring->buffering); |
| 53 | } |
| 54 | return 0; |
| 55 | } |
| 56 | |
| 57 | static uword |
| 58 | virtio_pre_input (vlib_main_t *vm, vlib_node_runtime_t *node, |
| 59 | vlib_frame_t *frame) |
| 60 | { |
| 61 | virtio_main_t *vim = &virtio_main; |
| 62 | vnet_main_t *vnm = vnet_get_main (); |
| 63 | virtio_if_t *vif; |
| 64 | |
| 65 | pool_foreach (vif, vim->interfaces) |
| 66 | { |
| 67 | if (vif->packet_coalesce || vif->packet_buffering) |
| 68 | { |
| 69 | virtio_vring_t *txq_vring; |
| 70 | vec_foreach (txq_vring, vif->txq_vrings) |
| 71 | { |
| 72 | vnet_hw_if_tx_queue_t *txq = |
| 73 | vnet_hw_if_get_tx_queue (vnm, txq_vring->queue_index); |
| 74 | if (clib_bitmap_get (txq->threads, vm->thread_index) == 1) |
| 75 | virtio_pre_input_inline (vm, txq_vring, txq, |
| 76 | vif->packet_coalesce, |
| 77 | vif->packet_buffering); |
| 78 | } |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | /** |
| 86 | * virtio interfaces support packet coalescing and buffering which |
| 87 | * depends on timer expiry to flush the stored packets periodically. |
| 88 | * Previously, virtio input node checked timer expiry and scheduled |
| 89 | * tx queue accordingly. |
| 90 | * |
| 91 | * In poll mode, timer expiry was handled naturally, as input node |
| 92 | * runs periodically. In interrupt mode, virtio input node was dependent |
| 93 | * on the interrupts send from backend. Stored packets could starve, |
| 94 | * if there would not be interrupts to input node. |
| 95 | * |
| 96 | * This problem had been solved through a dedicated process node which |
| 97 | * periodically sends interrupt to virtio input node given coalescing |
| 98 | * or buffering feature were enabled on an interface. |
| 99 | * |
| 100 | * But that approach worked with following limitations: |
| 101 | * 1) Each VPP thread should have (atleast) 1 rx queue of an interface |
| 102 | * (with buffering enabled). And rxqs and txqs should be placed on the |
| 103 | * same thread. |
| 104 | * |
| 105 | * New design provides solution to above problem(s) without any limitation |
| 106 | * through (dedicated) pre-input node running on each VPP thread when |
| 107 | * atleast 1 virtio interface is enabled with coalescing or buffering. |
| 108 | */ |
| 109 | VLIB_REGISTER_NODE (virtio_pre_input_node) = { |
| 110 | .function = virtio_pre_input, |
| 111 | .type = VLIB_NODE_TYPE_PRE_INPUT, |
| 112 | .name = "virtio-pre-input", |
| 113 | .state = VLIB_NODE_STATE_DISABLED, |
| 114 | }; |
| 115 | |
| 116 | void |
| 117 | virtio_pre_input_node_enable (vlib_main_t *vm, virtio_if_t *vif) |
| 118 | { |
| 119 | virtio_main_t *vim = &virtio_main; |
| 120 | if (vif->packet_coalesce || vif->packet_buffering) |
| 121 | { |
| 122 | vim->gro_or_buffering_if_count++; |
| 123 | if (vim->gro_or_buffering_if_count == 1) |
| 124 | { |
| 125 | foreach_vlib_main () |
| 126 | { |
| 127 | vlib_node_set_state (this_vlib_main, virtio_pre_input_node.index, |
| 128 | VLIB_NODE_STATE_POLLING); |
| 129 | } |
| 130 | } |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | void |
| 135 | virtio_pre_input_node_disable (vlib_main_t *vm, virtio_if_t *vif) |
| 136 | { |
| 137 | virtio_main_t *vim = &virtio_main; |
| 138 | if (vif->packet_coalesce || vif->packet_buffering) |
| 139 | { |
| 140 | if (vim->gro_or_buffering_if_count > 0) |
| 141 | vim->gro_or_buffering_if_count--; |
| 142 | if (vim->gro_or_buffering_if_count == 0) |
| 143 | { |
| 144 | foreach_vlib_main () |
| 145 | { |
| 146 | vlib_node_set_state (this_vlib_main, virtio_pre_input_node.index, |
| 147 | VLIB_NODE_STATE_DISABLED); |
| 148 | } |
| 149 | } |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * fd.io coding-style-patch-verification: ON |
| 155 | * |
| 156 | * Local Variables: |
| 157 | * eval: (c-set-style "gnu") |
| 158 | * End: |
| 159 | */ |