Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 1 | |
| 2 | /* SPDX-License-Identifier: Apache-2.0 |
| 3 | * Copyright (c) 2023 Cisco Systems, Inc. |
| 4 | */ |
| 5 | |
| 6 | #include "vppinfra/bitmap.h" |
| 7 | #include "vppinfra/lock.h" |
| 8 | #include <vnet/vnet.h> |
| 9 | #include <vnet/dev/dev.h> |
| 10 | #include <vnet/dev/log.h> |
| 11 | |
| 12 | VLIB_REGISTER_LOG_CLASS (dev_log, static) = { |
| 13 | .class_name = "dev", |
| 14 | .subclass_name = "runtime", |
| 15 | }; |
| 16 | |
| 17 | static vnet_dev_rt_op_t *rt_ops; |
| 18 | |
| 19 | static void |
| 20 | _vnet_dev_rt_exec_op (vlib_main_t *vm, vnet_dev_rt_op_t *op) |
| 21 | { |
| 22 | if (op->type == VNET_DEV_RT_OP_TYPE_RX_QUEUE) |
| 23 | { |
| 24 | vnet_dev_rx_node_runtime_t *rtd; |
| 25 | vnet_dev_rx_queue_t *rxq = op->rx_queue; |
| 26 | u32 i, node_index = rxq->port->intf.rx_node_index; |
| 27 | |
| 28 | rtd = vlib_node_get_runtime_data (vm, node_index); |
| 29 | |
| 30 | if (op->action == VNET_DEV_RT_OP_ACTION_START) |
| 31 | { |
| 32 | for (i = 0; i < rtd->n_rx_queues; i++) |
| 33 | ASSERT (rtd->rx_queues[i] != op->rx_queue); |
| 34 | rtd->rx_queues[rtd->n_rx_queues++] = op->rx_queue; |
| 35 | } |
| 36 | |
| 37 | else if (op->action == VNET_DEV_RT_OP_ACTION_STOP) |
| 38 | { |
| 39 | for (i = 0; i < rtd->n_rx_queues; i++) |
| 40 | if (rtd->rx_queues[i] == op->rx_queue) |
| 41 | break; |
| 42 | ASSERT (i < rtd->n_rx_queues); |
| 43 | rtd->n_rx_queues--; |
| 44 | for (; i < rtd->n_rx_queues; i++) |
| 45 | rtd->rx_queues[i] = rtd->rx_queues[i + 1]; |
| 46 | } |
| 47 | |
| 48 | if (rtd->n_rx_queues == 1) |
| 49 | vlib_node_set_state (vm, node_index, VLIB_NODE_STATE_POLLING); |
| 50 | else if (rtd->n_rx_queues == 0) |
| 51 | vlib_node_set_state (vm, node_index, VLIB_NODE_STATE_DISABLED); |
| 52 | |
| 53 | __atomic_store_n (&op->completed, 1, __ATOMIC_RELEASE); |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | static int |
| 58 | _vnet_dev_rt_op_not_occured_before (vnet_dev_rt_op_t *first, |
| 59 | vnet_dev_rt_op_t *current) |
| 60 | { |
| 61 | for (vnet_dev_rt_op_t *op = first; op < current; op++) |
| 62 | if (op->rx_queue == current->rx_queue && op->completed == 0) |
| 63 | return 0; |
| 64 | return 1; |
| 65 | } |
| 66 | |
| 67 | static uword |
| 68 | vnet_dev_rt_mgmt_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, |
| 69 | vlib_frame_t *frame) |
| 70 | { |
| 71 | u16 thread_index = vm->thread_index; |
| 72 | vnet_dev_rt_op_t *ops = __atomic_load_n (&rt_ops, __ATOMIC_ACQUIRE); |
| 73 | vnet_dev_rt_op_t *op; |
| 74 | int come_back = 0; |
| 75 | uword rv = 0; |
| 76 | |
| 77 | vec_foreach (op, ops) |
| 78 | if (op->thread_index == thread_index) |
| 79 | { |
| 80 | if (_vnet_dev_rt_op_not_occured_before (ops, op)) |
| 81 | { |
| 82 | _vnet_dev_rt_exec_op (vm, op); |
| 83 | rv++; |
| 84 | } |
| 85 | else |
| 86 | come_back = 1; |
| 87 | } |
| 88 | |
| 89 | if (come_back) |
| 90 | vlib_node_set_interrupt_pending (vm, node->node_index); |
| 91 | |
| 92 | return rv; |
| 93 | } |
| 94 | |
| 95 | VLIB_REGISTER_NODE (vnet_dev_rt_mgmt_node, static) = { |
| 96 | .function = vnet_dev_rt_mgmt_node_fn, |
| 97 | .name = "dev-rt-mgmt", |
| 98 | .type = VLIB_NODE_TYPE_PRE_INPUT, |
| 99 | .state = VLIB_NODE_STATE_INTERRUPT, |
| 100 | }; |
| 101 | |
| 102 | u8 * |
| 103 | format_vnet_dev_mgmt_op (u8 *s, va_list *args) |
| 104 | { |
| 105 | vnet_dev_rt_op_t *op = va_arg (*args, vnet_dev_rt_op_t *); |
| 106 | |
| 107 | char *types[] = { |
| 108 | [VNET_DEV_RT_OP_TYPE_RX_QUEUE] = "rx queue", |
| 109 | }; |
| 110 | char *actions[] = { |
| 111 | [VNET_DEV_RT_OP_ACTION_START] = "start", |
| 112 | [VNET_DEV_RT_OP_ACTION_STOP] = "stop", |
| 113 | }; |
| 114 | |
| 115 | return format (s, "port %u %s %u %s on thread %u", |
| 116 | op->rx_queue->port->port_id, types[op->type], |
| 117 | op->rx_queue->queue_id, actions[op->action], |
| 118 | op->thread_index); |
| 119 | } |
| 120 | |
| 121 | vnet_dev_rv_t |
| 122 | vnet_dev_rt_exec_ops (vlib_main_t *vm, vnet_dev_t *dev, vnet_dev_rt_op_t *ops, |
| 123 | u32 n_ops) |
| 124 | { |
| 125 | vnet_dev_rt_op_t *op = ops; |
| 126 | vnet_dev_rt_op_t *remote_ops = 0; |
| 127 | clib_bitmap_t *remote_bmp = 0; |
| 128 | u32 i; |
| 129 | |
| 130 | ASSERT (rt_ops == 0); |
| 131 | |
| 132 | for (op = ops; op < (ops + n_ops); op++) |
| 133 | { |
| 134 | vlib_main_t *tvm = vlib_get_main_by_index (op->thread_index); |
| 135 | |
| 136 | if ((vlib_worker_thread_barrier_held ()) || |
| 137 | (op->thread_index == vm->thread_index && |
| 138 | _vnet_dev_rt_op_not_occured_before (ops, op))) |
| 139 | { |
| 140 | _vnet_dev_rt_exec_op (tvm, op); |
| 141 | log_debug (dev, "%U executed locally", format_vnet_dev_mgmt_op, op); |
| 142 | continue; |
| 143 | } |
| 144 | |
| 145 | vec_add1 (remote_ops, *op); |
| 146 | log_debug (dev, "%U enqueued for remote execution", |
| 147 | format_vnet_dev_mgmt_op, op); |
| 148 | remote_bmp = clib_bitmap_set (remote_bmp, op->thread_index, 1); |
| 149 | } |
| 150 | |
| 151 | if (remote_ops == 0) |
| 152 | return VNET_DEV_OK; |
| 153 | |
| 154 | __atomic_store_n (&rt_ops, remote_ops, __ATOMIC_RELEASE); |
| 155 | |
| 156 | clib_bitmap_foreach (i, remote_bmp) |
| 157 | { |
| 158 | vlib_node_set_interrupt_pending (vlib_get_main_by_index (i), |
| 159 | vnet_dev_rt_mgmt_node.index); |
| 160 | log_debug (dev, "interrupt sent to %s node on thread %u", |
| 161 | vnet_dev_rt_mgmt_node.name, i); |
| 162 | } |
| 163 | |
| 164 | vec_foreach (op, remote_ops) |
| 165 | { |
| 166 | while (op->completed == 0) |
| 167 | CLIB_PAUSE (); |
| 168 | } |
| 169 | |
| 170 | __atomic_store_n (&rt_ops, 0, __ATOMIC_RELAXED); |
| 171 | vec_free (remote_ops); |
| 172 | clib_bitmap_free (remote_bmp); |
| 173 | return VNET_DEV_OK; |
| 174 | } |