blob: 892cef4b3e81c2a9278614bf34e8fbb35ba6ae98 [file] [log] [blame]
Damjan Marion38c61912023-10-17 16:06:26 +00001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
3 */
4
5#ifndef _VNET_DEV_FUNCS_H_
6#define _VNET_DEV_FUNCS_H_
7
8#include <vppinfra/clib.h>
9#include <vnet/dev/dev.h>
10
11static_always_inline void *
12vnet_dev_get_data (vnet_dev_t *dev)
13{
14 return dev->data;
15}
16
17static_always_inline vnet_dev_t *
18vnet_dev_from_data (void *p)
19{
20 return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
21}
22
23static_always_inline void *
24vnet_dev_get_port_data (vnet_dev_port_t *port)
25{
26 return port->data;
27}
28
29static_always_inline void *
30vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
31{
32 return rxq->data;
33}
34
35static_always_inline void *
36vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
37{
38 return txq->data;
39}
40
41static_always_inline vnet_dev_t *
42vnet_dev_get_by_index (u32 index)
43{
44 vnet_dev_main_t *dm = &vnet_dev_main;
45 return pool_elt_at_index (dm->devices, index)[0];
46}
47
48static_always_inline vnet_dev_port_t *
49vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
50{
51 return pool_elt_at_index (dev->ports, index)[0];
52}
53
54static_always_inline vnet_dev_port_t *
55vnet_dev_get_port_from_dev_instance (u32 dev_instance)
56{
57 vnet_dev_main_t *dm = &vnet_dev_main;
58 if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
59 return 0;
60 return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
61}
62
63static_always_inline vnet_dev_t *
64vnet_dev_by_id (char *id)
65{
66 vnet_dev_main_t *dm = &vnet_dev_main;
67 uword *p = hash_get (dm->device_index_by_id, id);
68 if (p)
69 return *pool_elt_at_index (dm->devices, p[0]);
70 return 0;
71}
72
73static_always_inline uword
74vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
75{
76 return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
77}
78
79static_always_inline void *
80vnet_dev_get_bus_data (vnet_dev_t *dev)
81{
82 return (void *) dev->bus_data;
83}
84
85static_always_inline vnet_dev_bus_t *
86vnet_dev_get_bus (vnet_dev_t *dev)
87{
88 vnet_dev_main_t *dm = &vnet_dev_main;
89 return pool_elt_at_index (dm->buses, dev->bus_index);
90}
91
92static_always_inline void
93vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
94{
95 ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
96 ASSERT (vm->thread_index == 0);
97}
98
99static_always_inline void
100vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
101{
102 ASSERT (port->dev->process_node_index ==
103 vlib_get_current_process_node_index (vm));
104 ASSERT (vm->thread_index == 0);
105}
106
107static_always_inline u32
108vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
109{
110 return port->intf.sw_if_index;
111}
112
113static_always_inline vnet_dev_port_t *
114vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
115{
116 foreach_vnet_dev_port (p, dev)
117 if (p->port_id == port_id)
118 return p;
119 return 0;
120}
121
122static_always_inline void *
123vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
124{
125 void *p;
126 sz += data_sz;
127 sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
128 p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
129 clib_memset (p, 0, sz);
130 return p;
131}
132
133static_always_inline void
134vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
135{
136 u8 free = 0;
137
138 if (!txq->lock_needed)
139 return;
140
141 while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
142 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
143 {
144 while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
145 CLIB_PAUSE ();
146 free = 0;
147 }
148}
149
150static_always_inline void
151vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
152{
153 if (!txq->lock_needed)
154 return;
155 __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
156}
157
158static_always_inline u8
159vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
160{
161 return rxq->buffer_template.buffer_pool_index;
162}
163
164static_always_inline void
165vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
166 vnet_dev_rx_queue_rt_req_t req)
167{
168 __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
169 __ATOMIC_RELEASE);
170}
171
172static_always_inline vnet_dev_rx_node_runtime_t *
173vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
174{
175 return (void *) node->runtime_data;
176}
177
178static_always_inline vnet_dev_tx_node_runtime_t *
179vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
180{
181 return (void *) node->runtime_data;
182}
183
184static_always_inline vnet_dev_rx_queue_t **
185foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node)
186{
187 vnet_dev_rx_node_runtime_t *rt = vnet_dev_get_rx_node_runtime (node);
188 return rt->rx_queues;
189}
190
191static_always_inline int
192vnet_dev_rx_queue_runtime_update (vnet_dev_rx_queue_t *rxq)
193{
194 vnet_dev_port_t *port;
195 vnet_dev_rx_queue_rt_req_t req;
196 int rv = 1;
197
198 if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
199 return 1;
200
201 req.as_number =
202 __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
203
204 port = rxq->port;
205 if (req.update_next_index)
206 rxq->next_index = port->intf.rx_next_index;
207
208 if (req.update_feature_arc)
209 {
210 vlib_buffer_template_t *bt = &rxq->buffer_template;
211 bt->current_config_index = port->intf.current_config_index;
212 vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
213 }
214
215 if (req.suspend_on)
216 {
217 rxq->suspended = 1;
218 rv = 0;
219 }
220
221 if (req.suspend_off)
222 rxq->suspended = 0;
223
224 return rv;
225}
226
227static_always_inline void *
228vnet_dev_get_rt_temp_space (vlib_main_t *vm)
229{
230 return vnet_dev_main.runtime_temp_spaces +
231 ((uword) vm->thread_index
232 << vnet_dev_main.log2_runtime_temp_space_sz);
233}
234
235static_always_inline void
236vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
237{
238 vnet_dev_hw_addr_t ha = {};
239 clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
240 *addr = ha;
241}
242
243#define foreach_vnet_dev_rx_queue_runtime(q, node) \
244 for (vnet_dev_rx_queue_t * \
245 *__qp = foreach_vnet_dev_rx_queue_runtime_helper (node), \
246 **__last = __qp + (vnet_dev_get_rx_node_runtime (node))->n_rx_queues, \
247 *(q) = *__qp; \
248 __qp < __last; __qp++, (q) = *__qp) \
249 if (vnet_dev_rx_queue_runtime_update (q))
250
251#endif /* _VNET_DEV_FUNCS_H_ */