Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: Apache-2.0 |
| 2 | * Copyright (c) 2023 Cisco Systems, Inc. |
| 3 | */ |
| 4 | |
| 5 | #ifndef _VNET_DEV_FUNCS_H_ |
| 6 | #define _VNET_DEV_FUNCS_H_ |
| 7 | |
| 8 | #include <vppinfra/clib.h> |
| 9 | #include <vnet/dev/dev.h> |
| 10 | |
| 11 | static_always_inline void * |
| 12 | vnet_dev_get_data (vnet_dev_t *dev) |
| 13 | { |
| 14 | return dev->data; |
| 15 | } |
| 16 | |
| 17 | static_always_inline vnet_dev_t * |
| 18 | vnet_dev_from_data (void *p) |
| 19 | { |
| 20 | return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data)); |
| 21 | } |
| 22 | |
| 23 | static_always_inline void * |
| 24 | vnet_dev_get_port_data (vnet_dev_port_t *port) |
| 25 | { |
| 26 | return port->data; |
| 27 | } |
| 28 | |
| 29 | static_always_inline void * |
| 30 | vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq) |
| 31 | { |
| 32 | return rxq->data; |
| 33 | } |
| 34 | |
| 35 | static_always_inline void * |
| 36 | vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq) |
| 37 | { |
| 38 | return txq->data; |
| 39 | } |
| 40 | |
| 41 | static_always_inline vnet_dev_t * |
| 42 | vnet_dev_get_by_index (u32 index) |
| 43 | { |
| 44 | vnet_dev_main_t *dm = &vnet_dev_main; |
| 45 | return pool_elt_at_index (dm->devices, index)[0]; |
| 46 | } |
| 47 | |
| 48 | static_always_inline vnet_dev_port_t * |
| 49 | vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index) |
| 50 | { |
| 51 | return pool_elt_at_index (dev->ports, index)[0]; |
| 52 | } |
| 53 | |
| 54 | static_always_inline vnet_dev_port_t * |
| 55 | vnet_dev_get_port_from_dev_instance (u32 dev_instance) |
| 56 | { |
| 57 | vnet_dev_main_t *dm = &vnet_dev_main; |
| 58 | if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance)) |
| 59 | return 0; |
| 60 | return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0]; |
| 61 | } |
| 62 | |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 63 | static_always_inline vnet_dev_port_t * |
| 64 | vnet_dev_get_port_from_hw_if_index (u32 hw_if_index) |
| 65 | { |
| 66 | vnet_hw_interface_t *hw; |
| 67 | vnet_dev_port_t *port; |
| 68 | hw = vnet_get_hw_interface (vnet_get_main (), hw_if_index); |
| 69 | port = vnet_dev_get_port_from_dev_instance (hw->dev_instance); |
| 70 | |
| 71 | if (!port || port->intf.hw_if_index != hw_if_index) |
| 72 | return 0; |
| 73 | |
| 74 | return port; |
| 75 | } |
| 76 | |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 77 | static_always_inline vnet_dev_t * |
| 78 | vnet_dev_by_id (char *id) |
| 79 | { |
| 80 | vnet_dev_main_t *dm = &vnet_dev_main; |
| 81 | uword *p = hash_get (dm->device_index_by_id, id); |
| 82 | if (p) |
| 83 | return *pool_elt_at_index (dm->devices, p[0]); |
| 84 | return 0; |
| 85 | } |
| 86 | |
| 87 | static_always_inline uword |
| 88 | vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p) |
| 89 | { |
| 90 | return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p); |
| 91 | } |
| 92 | |
| 93 | static_always_inline void * |
| 94 | vnet_dev_get_bus_data (vnet_dev_t *dev) |
| 95 | { |
| 96 | return (void *) dev->bus_data; |
| 97 | } |
| 98 | |
| 99 | static_always_inline vnet_dev_bus_t * |
| 100 | vnet_dev_get_bus (vnet_dev_t *dev) |
| 101 | { |
| 102 | vnet_dev_main_t *dm = &vnet_dev_main; |
| 103 | return pool_elt_at_index (dm->buses, dev->bus_index); |
| 104 | } |
| 105 | |
| 106 | static_always_inline void |
| 107 | vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev) |
| 108 | { |
| 109 | ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm)); |
| 110 | ASSERT (vm->thread_index == 0); |
| 111 | } |
| 112 | |
| 113 | static_always_inline void |
| 114 | vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port) |
| 115 | { |
| 116 | ASSERT (port->dev->process_node_index == |
| 117 | vlib_get_current_process_node_index (vm)); |
| 118 | ASSERT (vm->thread_index == 0); |
| 119 | } |
| 120 | |
| 121 | static_always_inline u32 |
| 122 | vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port) |
| 123 | { |
| 124 | return port->intf.sw_if_index; |
| 125 | } |
| 126 | |
| 127 | static_always_inline vnet_dev_port_t * |
| 128 | vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id) |
| 129 | { |
| 130 | foreach_vnet_dev_port (p, dev) |
| 131 | if (p->port_id == port_id) |
| 132 | return p; |
| 133 | return 0; |
| 134 | } |
| 135 | |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 136 | static_always_inline vnet_dev_rx_queue_t * |
| 137 | vnet_dev_port_get_rx_queue_by_id (vnet_dev_port_t *port, |
| 138 | vnet_dev_queue_id_t queue_id) |
| 139 | { |
| 140 | foreach_vnet_dev_port_rx_queue (q, port) |
| 141 | if (q->queue_id == queue_id) |
| 142 | return q; |
| 143 | return 0; |
| 144 | } |
| 145 | |
| 146 | static_always_inline vnet_dev_tx_queue_t * |
| 147 | vnet_dev_port_get_tx_queue_by_id (vnet_dev_port_t *port, |
| 148 | vnet_dev_queue_id_t queue_id) |
| 149 | { |
| 150 | foreach_vnet_dev_port_tx_queue (q, port) |
| 151 | if (q->queue_id == queue_id) |
| 152 | return q; |
| 153 | return 0; |
| 154 | } |
| 155 | |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 156 | static_always_inline void * |
| 157 | vnet_dev_alloc_with_data (u32 sz, u32 data_sz) |
| 158 | { |
| 159 | void *p; |
| 160 | sz += data_sz; |
| 161 | sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES); |
| 162 | p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES); |
| 163 | clib_memset (p, 0, sz); |
| 164 | return p; |
| 165 | } |
| 166 | |
| 167 | static_always_inline void |
| 168 | vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq) |
| 169 | { |
| 170 | u8 free = 0; |
| 171 | |
| 172 | if (!txq->lock_needed) |
| 173 | return; |
| 174 | |
| 175 | while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0, |
| 176 | __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) |
| 177 | { |
| 178 | while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED)) |
| 179 | CLIB_PAUSE (); |
| 180 | free = 0; |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | static_always_inline void |
| 185 | vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq) |
| 186 | { |
| 187 | if (!txq->lock_needed) |
| 188 | return; |
| 189 | __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE); |
| 190 | } |
| 191 | |
| 192 | static_always_inline u8 |
| 193 | vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq) |
| 194 | { |
| 195 | return rxq->buffer_template.buffer_pool_index; |
| 196 | } |
| 197 | |
Damjan Marion | e596ca1 | 2023-11-08 19:12:27 +0000 | [diff] [blame] | 198 | static_always_inline u32 |
| 199 | vnet_dev_get_rx_queue_buffer_data_size (vlib_main_t *vm, |
| 200 | vnet_dev_rx_queue_t *rxq) |
| 201 | { |
| 202 | u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq); |
| 203 | return vlib_get_buffer_pool (vm, bpi)->data_size; |
| 204 | } |
| 205 | |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 206 | static_always_inline void |
| 207 | vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq, |
| 208 | vnet_dev_rx_queue_rt_req_t req) |
| 209 | { |
| 210 | __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number, |
| 211 | __ATOMIC_RELEASE); |
| 212 | } |
| 213 | |
| 214 | static_always_inline vnet_dev_rx_node_runtime_t * |
| 215 | vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node) |
| 216 | { |
| 217 | return (void *) node->runtime_data; |
| 218 | } |
| 219 | |
| 220 | static_always_inline vnet_dev_tx_node_runtime_t * |
| 221 | vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node) |
| 222 | { |
| 223 | return (void *) node->runtime_data; |
| 224 | } |
| 225 | |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 226 | static_always_inline vnet_dev_rx_queue_t * |
| 227 | foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node, |
| 228 | vnet_dev_rx_queue_t *rxq) |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 229 | { |
| 230 | vnet_dev_port_t *port; |
| 231 | vnet_dev_rx_queue_rt_req_t req; |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 232 | |
| 233 | if (rxq == 0) |
| 234 | rxq = vnet_dev_get_rx_node_runtime (node)->first_rx_queue; |
| 235 | else |
| 236 | next: |
| 237 | rxq = rxq->next_on_thread; |
| 238 | |
| 239 | if (PREDICT_FALSE (rxq == 0)) |
| 240 | return 0; |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 241 | |
| 242 | if (PREDICT_TRUE (rxq->runtime_request.as_number == 0)) |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 243 | return rxq; |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 244 | |
| 245 | req.as_number = |
| 246 | __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE); |
| 247 | |
| 248 | port = rxq->port; |
| 249 | if (req.update_next_index) |
| 250 | rxq->next_index = port->intf.rx_next_index; |
| 251 | |
| 252 | if (req.update_feature_arc) |
| 253 | { |
| 254 | vlib_buffer_template_t *bt = &rxq->buffer_template; |
| 255 | bt->current_config_index = port->intf.current_config_index; |
| 256 | vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index; |
| 257 | } |
| 258 | |
| 259 | if (req.suspend_on) |
| 260 | { |
| 261 | rxq->suspended = 1; |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 262 | goto next; |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | if (req.suspend_off) |
| 266 | rxq->suspended = 0; |
| 267 | |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 268 | return rxq; |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 269 | } |
| 270 | |
Damjan Marion | b8dd981 | 2023-11-03 13:47:05 +0000 | [diff] [blame] | 271 | #define foreach_vnet_dev_rx_queue_runtime(q, node) \ |
| 272 | for (vnet_dev_rx_queue_t * (q) = \ |
| 273 | foreach_vnet_dev_rx_queue_runtime_helper (node, 0); \ |
| 274 | q; (q) = foreach_vnet_dev_rx_queue_runtime_helper (node, q)) |
| 275 | |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 276 | static_always_inline void * |
| 277 | vnet_dev_get_rt_temp_space (vlib_main_t *vm) |
| 278 | { |
| 279 | return vnet_dev_main.runtime_temp_spaces + |
| 280 | ((uword) vm->thread_index |
| 281 | << vnet_dev_main.log2_runtime_temp_space_sz); |
| 282 | } |
| 283 | |
| 284 | static_always_inline void |
| 285 | vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr) |
| 286 | { |
| 287 | vnet_dev_hw_addr_t ha = {}; |
| 288 | clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac)); |
| 289 | *addr = ha; |
| 290 | } |
| 291 | |
Damjan Marion | 69768d9 | 2023-11-13 17:33:32 +0000 | [diff] [blame] | 292 | static_always_inline vnet_dev_arg_t * |
| 293 | vnet_dev_get_port_arg_by_id (vnet_dev_port_t *port, u32 id) |
| 294 | { |
| 295 | foreach_vnet_dev_port_args (a, port) |
| 296 | if (a->id == id) |
| 297 | return a; |
| 298 | return 0; |
| 299 | } |
| 300 | |
| 301 | static_always_inline int |
| 302 | vnet_dev_arg_get_bool (vnet_dev_arg_t *arg) |
| 303 | { |
| 304 | ASSERT (arg->type == VNET_DEV_ARG_TYPE_BOOL); |
| 305 | return arg->val_set ? arg->val.boolean : arg->default_val.boolean; |
| 306 | } |
| 307 | |
| 308 | static_always_inline u32 |
| 309 | vnet_dev_arg_get_uint32 (vnet_dev_arg_t *arg) |
| 310 | { |
| 311 | ASSERT (arg->type == VNET_DEV_ARG_TYPE_UINT32); |
| 312 | return arg->val_set ? arg->val.uint32 : arg->default_val.uint32; |
| 313 | } |
| 314 | |
| 315 | static_always_inline u8 * |
| 316 | vnet_dev_arg_get_string (vnet_dev_arg_t *arg) |
| 317 | { |
| 318 | ASSERT (arg->type == VNET_DEV_ARG_TYPE_STRING); |
| 319 | return arg->val_set ? arg->val.string : arg->default_val.string; |
| 320 | } |
| 321 | |
Damjan Marion | 38c6191 | 2023-10-17 16:06:26 +0000 | [diff] [blame] | 322 | #endif /* _VNET_DEV_FUNCS_H_ */ |