blob: e725dd405648a6f91f886843efdb6881ec88ab1a [file] [log] [blame]
Damjan Marion38c61912023-10-17 16:06:26 +00001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
3 */
4
5#ifndef _VNET_DEV_FUNCS_H_
6#define _VNET_DEV_FUNCS_H_
7
8#include <vppinfra/clib.h>
9#include <vnet/dev/dev.h>
10
11static_always_inline void *
12vnet_dev_get_data (vnet_dev_t *dev)
13{
14 return dev->data;
15}
16
17static_always_inline vnet_dev_t *
18vnet_dev_from_data (void *p)
19{
20 return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
21}
22
23static_always_inline void *
24vnet_dev_get_port_data (vnet_dev_port_t *port)
25{
26 return port->data;
27}
28
29static_always_inline void *
30vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
31{
32 return rxq->data;
33}
34
35static_always_inline void *
36vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
37{
38 return txq->data;
39}
40
41static_always_inline vnet_dev_t *
42vnet_dev_get_by_index (u32 index)
43{
44 vnet_dev_main_t *dm = &vnet_dev_main;
45 return pool_elt_at_index (dm->devices, index)[0];
46}
47
48static_always_inline vnet_dev_port_t *
49vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
50{
51 return pool_elt_at_index (dev->ports, index)[0];
52}
53
54static_always_inline vnet_dev_port_t *
55vnet_dev_get_port_from_dev_instance (u32 dev_instance)
56{
57 vnet_dev_main_t *dm = &vnet_dev_main;
58 if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
59 return 0;
60 return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
61}
62
Damjan Marionb8dd9812023-11-03 13:47:05 +000063static_always_inline vnet_dev_port_t *
64vnet_dev_get_port_from_hw_if_index (u32 hw_if_index)
65{
66 vnet_hw_interface_t *hw;
67 vnet_dev_port_t *port;
68 hw = vnet_get_hw_interface (vnet_get_main (), hw_if_index);
69 port = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
70
71 if (!port || port->intf.hw_if_index != hw_if_index)
72 return 0;
73
74 return port;
75}
76
Damjan Marion38c61912023-10-17 16:06:26 +000077static_always_inline vnet_dev_t *
78vnet_dev_by_id (char *id)
79{
80 vnet_dev_main_t *dm = &vnet_dev_main;
81 uword *p = hash_get (dm->device_index_by_id, id);
82 if (p)
83 return *pool_elt_at_index (dm->devices, p[0]);
84 return 0;
85}
86
87static_always_inline uword
88vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
89{
90 return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
91}
92
93static_always_inline void *
94vnet_dev_get_bus_data (vnet_dev_t *dev)
95{
96 return (void *) dev->bus_data;
97}
98
99static_always_inline vnet_dev_bus_t *
100vnet_dev_get_bus (vnet_dev_t *dev)
101{
102 vnet_dev_main_t *dm = &vnet_dev_main;
103 return pool_elt_at_index (dm->buses, dev->bus_index);
104}
105
106static_always_inline void
107vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
108{
109 ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
110 ASSERT (vm->thread_index == 0);
111}
112
113static_always_inline void
114vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
115{
116 ASSERT (port->dev->process_node_index ==
117 vlib_get_current_process_node_index (vm));
118 ASSERT (vm->thread_index == 0);
119}
120
121static_always_inline u32
122vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
123{
124 return port->intf.sw_if_index;
125}
126
127static_always_inline vnet_dev_port_t *
128vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
129{
130 foreach_vnet_dev_port (p, dev)
131 if (p->port_id == port_id)
132 return p;
133 return 0;
134}
135
Damjan Marionb8dd9812023-11-03 13:47:05 +0000136static_always_inline vnet_dev_rx_queue_t *
137vnet_dev_port_get_rx_queue_by_id (vnet_dev_port_t *port,
138 vnet_dev_queue_id_t queue_id)
139{
140 foreach_vnet_dev_port_rx_queue (q, port)
141 if (q->queue_id == queue_id)
142 return q;
143 return 0;
144}
145
146static_always_inline vnet_dev_tx_queue_t *
147vnet_dev_port_get_tx_queue_by_id (vnet_dev_port_t *port,
148 vnet_dev_queue_id_t queue_id)
149{
150 foreach_vnet_dev_port_tx_queue (q, port)
151 if (q->queue_id == queue_id)
152 return q;
153 return 0;
154}
155
Damjan Marion38c61912023-10-17 16:06:26 +0000156static_always_inline void *
157vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
158{
159 void *p;
160 sz += data_sz;
161 sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
162 p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
163 clib_memset (p, 0, sz);
164 return p;
165}
166
167static_always_inline void
168vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
169{
170 u8 free = 0;
171
172 if (!txq->lock_needed)
173 return;
174
175 while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
176 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
177 {
178 while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
179 CLIB_PAUSE ();
180 free = 0;
181 }
182}
183
184static_always_inline void
185vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
186{
187 if (!txq->lock_needed)
188 return;
189 __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
190}
191
192static_always_inline u8
193vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
194{
195 return rxq->buffer_template.buffer_pool_index;
196}
197
198static_always_inline void
199vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
200 vnet_dev_rx_queue_rt_req_t req)
201{
202 __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
203 __ATOMIC_RELEASE);
204}
205
206static_always_inline vnet_dev_rx_node_runtime_t *
207vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
208{
209 return (void *) node->runtime_data;
210}
211
212static_always_inline vnet_dev_tx_node_runtime_t *
213vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
214{
215 return (void *) node->runtime_data;
216}
217
Damjan Marionb8dd9812023-11-03 13:47:05 +0000218static_always_inline vnet_dev_rx_queue_t *
219foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node,
220 vnet_dev_rx_queue_t *rxq)
Damjan Marion38c61912023-10-17 16:06:26 +0000221{
222 vnet_dev_port_t *port;
223 vnet_dev_rx_queue_rt_req_t req;
Damjan Marionb8dd9812023-11-03 13:47:05 +0000224
225 if (rxq == 0)
226 rxq = vnet_dev_get_rx_node_runtime (node)->first_rx_queue;
227 else
228 next:
229 rxq = rxq->next_on_thread;
230
231 if (PREDICT_FALSE (rxq == 0))
232 return 0;
Damjan Marion38c61912023-10-17 16:06:26 +0000233
234 if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
Damjan Marionb8dd9812023-11-03 13:47:05 +0000235 return rxq;
Damjan Marion38c61912023-10-17 16:06:26 +0000236
237 req.as_number =
238 __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
239
240 port = rxq->port;
241 if (req.update_next_index)
242 rxq->next_index = port->intf.rx_next_index;
243
244 if (req.update_feature_arc)
245 {
246 vlib_buffer_template_t *bt = &rxq->buffer_template;
247 bt->current_config_index = port->intf.current_config_index;
248 vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
249 }
250
251 if (req.suspend_on)
252 {
253 rxq->suspended = 1;
Damjan Marionb8dd9812023-11-03 13:47:05 +0000254 goto next;
Damjan Marion38c61912023-10-17 16:06:26 +0000255 }
256
257 if (req.suspend_off)
258 rxq->suspended = 0;
259
Damjan Marionb8dd9812023-11-03 13:47:05 +0000260 return rxq;
Damjan Marion38c61912023-10-17 16:06:26 +0000261}
262
Damjan Marionb8dd9812023-11-03 13:47:05 +0000263#define foreach_vnet_dev_rx_queue_runtime(q, node) \
264 for (vnet_dev_rx_queue_t * (q) = \
265 foreach_vnet_dev_rx_queue_runtime_helper (node, 0); \
266 q; (q) = foreach_vnet_dev_rx_queue_runtime_helper (node, q))
267
Damjan Marion38c61912023-10-17 16:06:26 +0000268static_always_inline void *
269vnet_dev_get_rt_temp_space (vlib_main_t *vm)
270{
271 return vnet_dev_main.runtime_temp_spaces +
272 ((uword) vm->thread_index
273 << vnet_dev_main.log2_runtime_temp_space_sz);
274}
275
276static_always_inline void
277vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
278{
279 vnet_dev_hw_addr_t ha = {};
280 clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
281 *addr = ha;
282}
283
Damjan Marion38c61912023-10-17 16:06:26 +0000284#endif /* _VNET_DEV_FUNCS_H_ */