blob: a74d3399511688b7332a332b0bdbfa630460dd84 [file] [log] [blame]
Damjan Marion38c61912023-10-17 16:06:26 +00001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
3 */
4
5#ifndef _VNET_DEV_FUNCS_H_
6#define _VNET_DEV_FUNCS_H_
7
8#include <vppinfra/clib.h>
9#include <vnet/dev/dev.h>
10
11static_always_inline void *
12vnet_dev_get_data (vnet_dev_t *dev)
13{
14 return dev->data;
15}
16
17static_always_inline vnet_dev_t *
18vnet_dev_from_data (void *p)
19{
20 return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
21}
22
23static_always_inline void *
24vnet_dev_get_port_data (vnet_dev_port_t *port)
25{
26 return port->data;
27}
28
29static_always_inline void *
30vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
31{
32 return rxq->data;
33}
34
35static_always_inline void *
36vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
37{
38 return txq->data;
39}
40
41static_always_inline vnet_dev_t *
42vnet_dev_get_by_index (u32 index)
43{
44 vnet_dev_main_t *dm = &vnet_dev_main;
45 return pool_elt_at_index (dm->devices, index)[0];
46}
47
48static_always_inline vnet_dev_port_t *
49vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
50{
51 return pool_elt_at_index (dev->ports, index)[0];
52}
53
54static_always_inline vnet_dev_port_t *
55vnet_dev_get_port_from_dev_instance (u32 dev_instance)
56{
57 vnet_dev_main_t *dm = &vnet_dev_main;
58 if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
59 return 0;
60 return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
61}
62
Damjan Marionb8dd9812023-11-03 13:47:05 +000063static_always_inline vnet_dev_port_t *
64vnet_dev_get_port_from_hw_if_index (u32 hw_if_index)
65{
66 vnet_hw_interface_t *hw;
67 vnet_dev_port_t *port;
68 hw = vnet_get_hw_interface (vnet_get_main (), hw_if_index);
69 port = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
70
71 if (!port || port->intf.hw_if_index != hw_if_index)
72 return 0;
73
74 return port;
75}
76
Damjan Marion38c61912023-10-17 16:06:26 +000077static_always_inline vnet_dev_t *
78vnet_dev_by_id (char *id)
79{
80 vnet_dev_main_t *dm = &vnet_dev_main;
81 uword *p = hash_get (dm->device_index_by_id, id);
82 if (p)
83 return *pool_elt_at_index (dm->devices, p[0]);
84 return 0;
85}
86
87static_always_inline uword
88vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
89{
90 return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
91}
92
93static_always_inline void *
94vnet_dev_get_bus_data (vnet_dev_t *dev)
95{
96 return (void *) dev->bus_data;
97}
98
99static_always_inline vnet_dev_bus_t *
100vnet_dev_get_bus (vnet_dev_t *dev)
101{
102 vnet_dev_main_t *dm = &vnet_dev_main;
103 return pool_elt_at_index (dm->buses, dev->bus_index);
104}
105
106static_always_inline void
107vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
108{
109 ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
110 ASSERT (vm->thread_index == 0);
111}
112
113static_always_inline void
114vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
115{
116 ASSERT (port->dev->process_node_index ==
117 vlib_get_current_process_node_index (vm));
118 ASSERT (vm->thread_index == 0);
119}
120
121static_always_inline u32
122vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
123{
124 return port->intf.sw_if_index;
125}
126
127static_always_inline vnet_dev_port_t *
128vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
129{
130 foreach_vnet_dev_port (p, dev)
131 if (p->port_id == port_id)
132 return p;
133 return 0;
134}
135
Damjan Marionb8dd9812023-11-03 13:47:05 +0000136static_always_inline vnet_dev_rx_queue_t *
137vnet_dev_port_get_rx_queue_by_id (vnet_dev_port_t *port,
138 vnet_dev_queue_id_t queue_id)
139{
140 foreach_vnet_dev_port_rx_queue (q, port)
141 if (q->queue_id == queue_id)
142 return q;
143 return 0;
144}
145
146static_always_inline vnet_dev_tx_queue_t *
147vnet_dev_port_get_tx_queue_by_id (vnet_dev_port_t *port,
148 vnet_dev_queue_id_t queue_id)
149{
150 foreach_vnet_dev_port_tx_queue (q, port)
151 if (q->queue_id == queue_id)
152 return q;
153 return 0;
154}
155
Damjan Marion38c61912023-10-17 16:06:26 +0000156static_always_inline void *
157vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
158{
159 void *p;
160 sz += data_sz;
161 sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
162 p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
163 clib_memset (p, 0, sz);
164 return p;
165}
166
167static_always_inline void
168vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
169{
170 u8 free = 0;
171
172 if (!txq->lock_needed)
173 return;
174
175 while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
176 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
177 {
178 while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
179 CLIB_PAUSE ();
180 free = 0;
181 }
182}
183
184static_always_inline void
185vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
186{
187 if (!txq->lock_needed)
188 return;
189 __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
190}
191
192static_always_inline u8
193vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
194{
195 return rxq->buffer_template.buffer_pool_index;
196}
197
Damjan Marione596ca12023-11-08 19:12:27 +0000198static_always_inline u32
199vnet_dev_get_rx_queue_buffer_data_size (vlib_main_t *vm,
200 vnet_dev_rx_queue_t *rxq)
201{
202 u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
203 return vlib_get_buffer_pool (vm, bpi)->data_size;
204}
205
Damjan Marion38c61912023-10-17 16:06:26 +0000206static_always_inline void
207vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
208 vnet_dev_rx_queue_rt_req_t req)
209{
210 __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
211 __ATOMIC_RELEASE);
212}
213
214static_always_inline vnet_dev_rx_node_runtime_t *
215vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
216{
217 return (void *) node->runtime_data;
218}
219
220static_always_inline vnet_dev_tx_node_runtime_t *
221vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
222{
223 return (void *) node->runtime_data;
224}
225
Damjan Marionb8dd9812023-11-03 13:47:05 +0000226static_always_inline vnet_dev_rx_queue_t *
227foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node,
228 vnet_dev_rx_queue_t *rxq)
Damjan Marion38c61912023-10-17 16:06:26 +0000229{
230 vnet_dev_port_t *port;
231 vnet_dev_rx_queue_rt_req_t req;
Damjan Marionb8dd9812023-11-03 13:47:05 +0000232
233 if (rxq == 0)
234 rxq = vnet_dev_get_rx_node_runtime (node)->first_rx_queue;
235 else
236 next:
237 rxq = rxq->next_on_thread;
238
239 if (PREDICT_FALSE (rxq == 0))
240 return 0;
Damjan Marion38c61912023-10-17 16:06:26 +0000241
242 if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
Damjan Marionb8dd9812023-11-03 13:47:05 +0000243 return rxq;
Damjan Marion38c61912023-10-17 16:06:26 +0000244
245 req.as_number =
246 __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
247
248 port = rxq->port;
249 if (req.update_next_index)
250 rxq->next_index = port->intf.rx_next_index;
251
252 if (req.update_feature_arc)
253 {
254 vlib_buffer_template_t *bt = &rxq->buffer_template;
255 bt->current_config_index = port->intf.current_config_index;
256 vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
257 }
258
259 if (req.suspend_on)
260 {
261 rxq->suspended = 1;
Damjan Marionb8dd9812023-11-03 13:47:05 +0000262 goto next;
Damjan Marion38c61912023-10-17 16:06:26 +0000263 }
264
265 if (req.suspend_off)
266 rxq->suspended = 0;
267
Damjan Marionb8dd9812023-11-03 13:47:05 +0000268 return rxq;
Damjan Marion38c61912023-10-17 16:06:26 +0000269}
270
Damjan Marionb8dd9812023-11-03 13:47:05 +0000271#define foreach_vnet_dev_rx_queue_runtime(q, node) \
272 for (vnet_dev_rx_queue_t * (q) = \
273 foreach_vnet_dev_rx_queue_runtime_helper (node, 0); \
274 q; (q) = foreach_vnet_dev_rx_queue_runtime_helper (node, q))
275
Damjan Marion38c61912023-10-17 16:06:26 +0000276static_always_inline void *
277vnet_dev_get_rt_temp_space (vlib_main_t *vm)
278{
279 return vnet_dev_main.runtime_temp_spaces +
280 ((uword) vm->thread_index
281 << vnet_dev_main.log2_runtime_temp_space_sz);
282}
283
284static_always_inline void
285vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
286{
287 vnet_dev_hw_addr_t ha = {};
288 clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
289 *addr = ha;
290}
291
Damjan Marion69768d92023-11-13 17:33:32 +0000292static_always_inline vnet_dev_arg_t *
293vnet_dev_get_port_arg_by_id (vnet_dev_port_t *port, u32 id)
294{
295 foreach_vnet_dev_port_args (a, port)
296 if (a->id == id)
297 return a;
298 return 0;
299}
300
301static_always_inline int
302vnet_dev_arg_get_bool (vnet_dev_arg_t *arg)
303{
304 ASSERT (arg->type == VNET_DEV_ARG_TYPE_BOOL);
305 return arg->val_set ? arg->val.boolean : arg->default_val.boolean;
306}
307
308static_always_inline u32
309vnet_dev_arg_get_uint32 (vnet_dev_arg_t *arg)
310{
311 ASSERT (arg->type == VNET_DEV_ARG_TYPE_UINT32);
312 return arg->val_set ? arg->val.uint32 : arg->default_val.uint32;
313}
314
315static_always_inline u8 *
316vnet_dev_arg_get_string (vnet_dev_arg_t *arg)
317{
318 ASSERT (arg->type == VNET_DEV_ARG_TYPE_STRING);
319 return arg->val_set ? arg->val.string : arg->default_val.string;
320}
321
Damjan Marion38c61912023-10-17 16:06:26 +0000322#endif /* _VNET_DEV_FUNCS_H_ */