blob: 521157abbec4a3821c686a6ef47ee4ffae480853 [file] [log] [blame]
Damjan Marion38c61912023-10-17 16:06:26 +00001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
3 */
4
5#ifndef _VNET_DEV_FUNCS_H_
6#define _VNET_DEV_FUNCS_H_
7
8#include <vppinfra/clib.h>
9#include <vnet/dev/dev.h>
10
11static_always_inline void *
12vnet_dev_get_data (vnet_dev_t *dev)
13{
14 return dev->data;
15}
16
17static_always_inline vnet_dev_t *
18vnet_dev_from_data (void *p)
19{
20 return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
21}
22
23static_always_inline void *
24vnet_dev_get_port_data (vnet_dev_port_t *port)
25{
26 return port->data;
27}
28
29static_always_inline void *
30vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
31{
32 return rxq->data;
33}
34
35static_always_inline void *
36vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
37{
38 return txq->data;
39}
40
41static_always_inline vnet_dev_t *
42vnet_dev_get_by_index (u32 index)
43{
44 vnet_dev_main_t *dm = &vnet_dev_main;
45 return pool_elt_at_index (dm->devices, index)[0];
46}
47
48static_always_inline vnet_dev_port_t *
49vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
50{
51 return pool_elt_at_index (dev->ports, index)[0];
52}
53
54static_always_inline vnet_dev_port_t *
55vnet_dev_get_port_from_dev_instance (u32 dev_instance)
56{
57 vnet_dev_main_t *dm = &vnet_dev_main;
58 if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
59 return 0;
60 return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
61}
62
Damjan Marionb8dd9812023-11-03 13:47:05 +000063static_always_inline vnet_dev_port_t *
64vnet_dev_get_port_from_hw_if_index (u32 hw_if_index)
65{
66 vnet_hw_interface_t *hw;
67 vnet_dev_port_t *port;
68 hw = vnet_get_hw_interface (vnet_get_main (), hw_if_index);
69 port = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
70
71 if (!port || port->intf.hw_if_index != hw_if_index)
72 return 0;
73
74 return port;
75}
76
Damjan Marion38c61912023-10-17 16:06:26 +000077static_always_inline vnet_dev_t *
Damjan Marionddf6cec2023-11-22 16:25:55 +000078vnet_dev_by_index (u32 index)
79{
80 vnet_dev_main_t *dm = &vnet_dev_main;
81 if (pool_is_free_index (dm->devices, index))
82 return 0;
83
84 return *pool_elt_at_index (dm->devices, index);
85}
86
87static_always_inline vnet_dev_t *
Damjan Marion38c61912023-10-17 16:06:26 +000088vnet_dev_by_id (char *id)
89{
90 vnet_dev_main_t *dm = &vnet_dev_main;
91 uword *p = hash_get (dm->device_index_by_id, id);
92 if (p)
93 return *pool_elt_at_index (dm->devices, p[0]);
94 return 0;
95}
96
97static_always_inline uword
98vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
99{
100 return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
101}
102
103static_always_inline void *
104vnet_dev_get_bus_data (vnet_dev_t *dev)
105{
106 return (void *) dev->bus_data;
107}
108
109static_always_inline vnet_dev_bus_t *
110vnet_dev_get_bus (vnet_dev_t *dev)
111{
112 vnet_dev_main_t *dm = &vnet_dev_main;
113 return pool_elt_at_index (dm->buses, dev->bus_index);
114}
115
116static_always_inline void
117vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
118{
119 ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
120 ASSERT (vm->thread_index == 0);
121}
122
123static_always_inline void
124vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
125{
126 ASSERT (port->dev->process_node_index ==
127 vlib_get_current_process_node_index (vm));
128 ASSERT (vm->thread_index == 0);
129}
130
131static_always_inline u32
132vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
133{
134 return port->intf.sw_if_index;
135}
136
137static_always_inline vnet_dev_port_t *
138vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
139{
140 foreach_vnet_dev_port (p, dev)
141 if (p->port_id == port_id)
142 return p;
143 return 0;
144}
145
Damjan Marionb8dd9812023-11-03 13:47:05 +0000146static_always_inline vnet_dev_rx_queue_t *
147vnet_dev_port_get_rx_queue_by_id (vnet_dev_port_t *port,
148 vnet_dev_queue_id_t queue_id)
149{
150 foreach_vnet_dev_port_rx_queue (q, port)
151 if (q->queue_id == queue_id)
152 return q;
153 return 0;
154}
155
156static_always_inline vnet_dev_tx_queue_t *
157vnet_dev_port_get_tx_queue_by_id (vnet_dev_port_t *port,
158 vnet_dev_queue_id_t queue_id)
159{
160 foreach_vnet_dev_port_tx_queue (q, port)
161 if (q->queue_id == queue_id)
162 return q;
163 return 0;
164}
165
Damjan Marion38c61912023-10-17 16:06:26 +0000166static_always_inline void *
167vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
168{
169 void *p;
170 sz += data_sz;
171 sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
172 p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
173 clib_memset (p, 0, sz);
174 return p;
175}
176
177static_always_inline void
178vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
179{
180 u8 free = 0;
181
182 if (!txq->lock_needed)
183 return;
184
185 while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
186 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
187 {
188 while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
189 CLIB_PAUSE ();
190 free = 0;
191 }
192}
193
194static_always_inline void
195vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
196{
197 if (!txq->lock_needed)
198 return;
199 __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
200}
201
202static_always_inline u8
203vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
204{
205 return rxq->buffer_template.buffer_pool_index;
206}
207
Damjan Marione596ca12023-11-08 19:12:27 +0000208static_always_inline u32
209vnet_dev_get_rx_queue_buffer_data_size (vlib_main_t *vm,
210 vnet_dev_rx_queue_t *rxq)
211{
212 u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
213 return vlib_get_buffer_pool (vm, bpi)->data_size;
214}
215
Damjan Marion38c61912023-10-17 16:06:26 +0000216static_always_inline void
217vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
218 vnet_dev_rx_queue_rt_req_t req)
219{
220 __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
221 __ATOMIC_RELEASE);
222}
223
224static_always_inline vnet_dev_rx_node_runtime_t *
225vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
226{
227 return (void *) node->runtime_data;
228}
229
230static_always_inline vnet_dev_tx_node_runtime_t *
231vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
232{
233 return (void *) node->runtime_data;
234}
235
Damjan Marionb8dd9812023-11-03 13:47:05 +0000236static_always_inline vnet_dev_rx_queue_t *
237foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node,
238 vnet_dev_rx_queue_t *rxq)
Damjan Marion38c61912023-10-17 16:06:26 +0000239{
240 vnet_dev_port_t *port;
241 vnet_dev_rx_queue_rt_req_t req;
Damjan Marionb8dd9812023-11-03 13:47:05 +0000242
243 if (rxq == 0)
244 rxq = vnet_dev_get_rx_node_runtime (node)->first_rx_queue;
245 else
246 next:
247 rxq = rxq->next_on_thread;
248
249 if (PREDICT_FALSE (rxq == 0))
250 return 0;
Damjan Marion38c61912023-10-17 16:06:26 +0000251
252 if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
Damjan Marionb8dd9812023-11-03 13:47:05 +0000253 return rxq;
Damjan Marion38c61912023-10-17 16:06:26 +0000254
255 req.as_number =
256 __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
257
258 port = rxq->port;
259 if (req.update_next_index)
260 rxq->next_index = port->intf.rx_next_index;
261
262 if (req.update_feature_arc)
263 {
264 vlib_buffer_template_t *bt = &rxq->buffer_template;
265 bt->current_config_index = port->intf.current_config_index;
266 vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
267 }
268
269 if (req.suspend_on)
270 {
271 rxq->suspended = 1;
Damjan Marionb8dd9812023-11-03 13:47:05 +0000272 goto next;
Damjan Marion38c61912023-10-17 16:06:26 +0000273 }
274
275 if (req.suspend_off)
276 rxq->suspended = 0;
277
Damjan Marionb8dd9812023-11-03 13:47:05 +0000278 return rxq;
Damjan Marion38c61912023-10-17 16:06:26 +0000279}
280
Damjan Marionb8dd9812023-11-03 13:47:05 +0000281#define foreach_vnet_dev_rx_queue_runtime(q, node) \
282 for (vnet_dev_rx_queue_t * (q) = \
283 foreach_vnet_dev_rx_queue_runtime_helper (node, 0); \
284 q; (q) = foreach_vnet_dev_rx_queue_runtime_helper (node, q))
285
Damjan Marion38c61912023-10-17 16:06:26 +0000286static_always_inline void *
287vnet_dev_get_rt_temp_space (vlib_main_t *vm)
288{
289 return vnet_dev_main.runtime_temp_spaces +
290 ((uword) vm->thread_index
291 << vnet_dev_main.log2_runtime_temp_space_sz);
292}
293
294static_always_inline void
295vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
296{
297 vnet_dev_hw_addr_t ha = {};
298 clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
299 *addr = ha;
300}
301
Damjan Marion69768d92023-11-13 17:33:32 +0000302static_always_inline vnet_dev_arg_t *
303vnet_dev_get_port_arg_by_id (vnet_dev_port_t *port, u32 id)
304{
305 foreach_vnet_dev_port_args (a, port)
306 if (a->id == id)
307 return a;
308 return 0;
309}
310
311static_always_inline int
312vnet_dev_arg_get_bool (vnet_dev_arg_t *arg)
313{
314 ASSERT (arg->type == VNET_DEV_ARG_TYPE_BOOL);
315 return arg->val_set ? arg->val.boolean : arg->default_val.boolean;
316}
317
318static_always_inline u32
319vnet_dev_arg_get_uint32 (vnet_dev_arg_t *arg)
320{
321 ASSERT (arg->type == VNET_DEV_ARG_TYPE_UINT32);
322 return arg->val_set ? arg->val.uint32 : arg->default_val.uint32;
323}
324
325static_always_inline u8 *
326vnet_dev_arg_get_string (vnet_dev_arg_t *arg)
327{
328 ASSERT (arg->type == VNET_DEV_ARG_TYPE_STRING);
329 return arg->val_set ? arg->val.string : arg->default_val.string;
330}
331
Damjan Marion38c61912023-10-17 16:06:26 +0000332#endif /* _VNET_DEV_FUNCS_H_ */