blob: 906d7118296bbbe7f479acfbfb96640315853428 [file] [log] [blame]
Damjan Marion94100532020-11-06 23:25:57 +01001/*
2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/vnet.h>
17
18/* funciton declarations */
19
20u32 vnet_hw_if_get_rx_queue_index_by_id (vnet_main_t *vnm, u32 hw_if_index,
21 u32 queue_id);
22u32 vnet_hw_if_register_rx_queue (vnet_main_t *vnm, u32 hw_if_index,
23 u32 queue_id, u32 thread_idnex);
24void vnet_hw_if_unregister_rx_queue (vnet_main_t *vnm, u32 queue_index);
25void vnet_hw_if_unregister_all_rx_queues (vnet_main_t *vnm, u32 hw_if_index);
26void vnet_hw_if_set_rx_queue_file_index (vnet_main_t *vnm, u32 queue_index,
27 u32 file_index);
28void vnet_hw_if_set_input_node (vnet_main_t *vnm, u32 hw_if_index,
29 u32 node_index);
30int vnet_hw_if_set_rx_queue_mode (vnet_main_t *vnm, u32 queue_index,
31 vnet_hw_if_rx_mode mode);
32vnet_hw_if_rx_mode vnet_hw_if_get_rx_queue_mode (vnet_main_t *vnm,
33 u32 queue_index);
34void vnet_hw_if_set_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index,
35 u32 thread_index);
Mohammed Hawari266929f2021-09-21 17:44:26 +020036vnet_hw_if_rxq_poll_vector_t *
37vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm,
38 vlib_node_runtime_t *node);
Damjan Marion94100532020-11-06 23:25:57 +010039
40/* inline functions */
41
42static_always_inline vnet_hw_if_rx_queue_t *
43vnet_hw_if_get_rx_queue (vnet_main_t *vnm, u32 queue_index)
44{
45 vnet_interface_main_t *im = &vnm->interface_main;
46 if (pool_is_free_index (im->hw_if_rx_queues, queue_index))
47 return 0;
48 return pool_elt_at_index (im->hw_if_rx_queues, queue_index);
49}
50
51static_always_inline void
52vnet_hw_if_rx_queue_set_int_pending (vnet_main_t *vnm, u32 queue_index)
53{
54 vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index);
55 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index);
Damjan Marion6ffb7c62021-03-26 13:06:13 +010056 vlib_main_t *vm = vlib_get_main_by_index (rxq->thread_index);
Damjan Marion94100532020-11-06 23:25:57 +010057 vnet_hw_if_rx_node_runtime_t *rt;
Mohammed Hawari4c71d6c2021-01-29 10:12:53 +010058 if (PREDICT_FALSE (rxq->mode != VNET_HW_IF_RX_MODE_INTERRUPT &&
59 rxq->mode != VNET_HW_IF_RX_MODE_ADAPTIVE))
60 return;
Damjan Marion94100532020-11-06 23:25:57 +010061 rt = vlib_node_get_runtime_data (vm, hi->input_node_index);
62 if (vm == vlib_get_main ())
63 clib_interrupt_set (rt->rxq_interrupts, queue_index);
64 else
65 clib_interrupt_set_atomic (rt->rxq_interrupts, queue_index);
66 vlib_node_set_interrupt_pending (vm, hi->input_node_index);
67}
68
69static_always_inline vnet_hw_if_rxq_poll_vector_t *
70vnet_hw_if_get_rxq_poll_vector (vlib_main_t *vm, vlib_node_runtime_t *node)
71{
72 vnet_hw_if_rx_node_runtime_t *rt = (void *) node->runtime_data;
Maxime Peim5b223392021-05-06 12:17:25 +020073 vnet_hw_if_rxq_poll_vector_t *pv = rt->rxq_vector_int;
Damjan Marion94100532020-11-06 23:25:57 +010074
75 if (PREDICT_FALSE (node->state == VLIB_NODE_STATE_INTERRUPT))
Mohammed Hawari266929f2021-09-21 17:44:26 +020076 pv = vnet_hw_if_generate_rxq_int_poll_vector (vm, node);
Maxime Peim5b223392021-05-06 12:17:25 +020077 else if (node->flags & VLIB_NODE_FLAG_ADAPTIVE_MODE)
78 pv = rt->rxq_vector_poll;
Damjan Marion94100532020-11-06 23:25:57 +010079
Maxime Peim5b223392021-05-06 12:17:25 +020080 return pv;
Damjan Marion94100532020-11-06 23:25:57 +010081}
82
83static_always_inline u8
84vnet_hw_if_get_rx_queue_numa_node (vnet_main_t *vnm, u32 queue_index)
85{
86 vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index);
87 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index);
88 return hi->numa_node;
89}
90
91static_always_inline u32
92vnet_hw_if_get_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index)
93{
94 vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index);
95 return rxq->thread_index;
96}
97
Mohammed Hawari9fecbe12020-12-11 19:36:37 +010098static_always_inline int
99vnet_hw_if_rxq_cmp_cli_api (vnet_hw_if_rx_queue_t **a,
100 vnet_hw_if_rx_queue_t **b)
101{
102 vnet_main_t *vnm;
103 vnet_hw_interface_t *hif_a;
104 vnet_hw_interface_t *hif_b;
105
106 if (*a == *b)
107 return 0;
108
109 if (a[0]->thread_index != b[0]->thread_index)
110 return 2 * (a[0]->thread_index > b[0]->thread_index) - 1;
111
112 vnm = vnet_get_main ();
113 hif_a = vnet_get_hw_interface (vnm, a[0]->hw_if_index);
114 hif_b = vnet_get_hw_interface (vnm, b[0]->hw_if_index);
115
116 if (hif_a->input_node_index != hif_b->input_node_index)
117 return 2 * (hif_a->input_node_index > hif_b->input_node_index) - 1;
118
119 if (a[0]->hw_if_index != b[0]->hw_if_index)
120 return 2 * (a[0]->hw_if_index > b[0]->hw_if_index) - 1;
121
122 if (a[0]->queue_id != b[0]->queue_id)
123 return 2 * (a[0]->queue_id > b[0]->queue_id) - 1;
124
125 ASSERT (0);
126 return ~0;
127}
128
Damjan Marion94100532020-11-06 23:25:57 +0100129/*
130 * fd.io coding-style-patch-verification: ON
131 *
132 * Local Variables:
133 * eval: (c-set-style "gnu")
134 * End:
135 */