Damjan Marion | 9410053 | 2020-11-06 23:25:57 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2020 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #include <vnet/vnet.h> |
| 17 | |
| 18 | /* funciton declarations */ |
| 19 | |
| 20 | u32 vnet_hw_if_get_rx_queue_index_by_id (vnet_main_t *vnm, u32 hw_if_index, |
| 21 | u32 queue_id); |
| 22 | u32 vnet_hw_if_register_rx_queue (vnet_main_t *vnm, u32 hw_if_index, |
| 23 | u32 queue_id, u32 thread_idnex); |
| 24 | void vnet_hw_if_unregister_rx_queue (vnet_main_t *vnm, u32 queue_index); |
| 25 | void vnet_hw_if_unregister_all_rx_queues (vnet_main_t *vnm, u32 hw_if_index); |
| 26 | void vnet_hw_if_set_rx_queue_file_index (vnet_main_t *vnm, u32 queue_index, |
| 27 | u32 file_index); |
| 28 | void vnet_hw_if_set_input_node (vnet_main_t *vnm, u32 hw_if_index, |
| 29 | u32 node_index); |
| 30 | int vnet_hw_if_set_rx_queue_mode (vnet_main_t *vnm, u32 queue_index, |
| 31 | vnet_hw_if_rx_mode mode); |
| 32 | vnet_hw_if_rx_mode vnet_hw_if_get_rx_queue_mode (vnet_main_t *vnm, |
| 33 | u32 queue_index); |
| 34 | void vnet_hw_if_set_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index, |
| 35 | u32 thread_index); |
| 36 | void vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index); |
| 37 | void vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm, |
| 38 | vlib_node_runtime_t *node); |
| 39 | |
| 40 | /* inline functions */ |
| 41 | |
| 42 | static_always_inline vnet_hw_if_rx_queue_t * |
| 43 | vnet_hw_if_get_rx_queue (vnet_main_t *vnm, u32 queue_index) |
| 44 | { |
| 45 | vnet_interface_main_t *im = &vnm->interface_main; |
| 46 | if (pool_is_free_index (im->hw_if_rx_queues, queue_index)) |
| 47 | return 0; |
| 48 | return pool_elt_at_index (im->hw_if_rx_queues, queue_index); |
| 49 | } |
| 50 | |
| 51 | static_always_inline void |
| 52 | vnet_hw_if_rx_queue_set_int_pending (vnet_main_t *vnm, u32 queue_index) |
| 53 | { |
| 54 | vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index); |
| 55 | vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index); |
| 56 | vlib_main_t *vm = vlib_mains[rxq->thread_index]; |
| 57 | |
| 58 | vnet_hw_if_rx_node_runtime_t *rt; |
| 59 | rt = vlib_node_get_runtime_data (vm, hi->input_node_index); |
| 60 | if (vm == vlib_get_main ()) |
| 61 | clib_interrupt_set (rt->rxq_interrupts, queue_index); |
| 62 | else |
| 63 | clib_interrupt_set_atomic (rt->rxq_interrupts, queue_index); |
| 64 | vlib_node_set_interrupt_pending (vm, hi->input_node_index); |
| 65 | } |
| 66 | |
| 67 | static_always_inline vnet_hw_if_rxq_poll_vector_t * |
| 68 | vnet_hw_if_get_rxq_poll_vector (vlib_main_t *vm, vlib_node_runtime_t *node) |
| 69 | { |
| 70 | vnet_hw_if_rx_node_runtime_t *rt = (void *) node->runtime_data; |
| 71 | |
| 72 | if (PREDICT_FALSE (node->state == VLIB_NODE_STATE_INTERRUPT)) |
| 73 | vnet_hw_if_generate_rxq_int_poll_vector (vm, node); |
| 74 | |
| 75 | return rt->rxq_poll_vector; |
| 76 | } |
| 77 | |
| 78 | static_always_inline u8 |
| 79 | vnet_hw_if_get_rx_queue_numa_node (vnet_main_t *vnm, u32 queue_index) |
| 80 | { |
| 81 | vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index); |
| 82 | vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index); |
| 83 | return hi->numa_node; |
| 84 | } |
| 85 | |
| 86 | static_always_inline u32 |
| 87 | vnet_hw_if_get_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index) |
| 88 | { |
| 89 | vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index); |
| 90 | return rxq->thread_index; |
| 91 | } |
| 92 | |
Mohammed Hawari | 9fecbe1 | 2020-12-11 19:36:37 +0100 | [diff] [blame^] | 93 | static_always_inline int |
| 94 | vnet_hw_if_rxq_cmp_cli_api (vnet_hw_if_rx_queue_t **a, |
| 95 | vnet_hw_if_rx_queue_t **b) |
| 96 | { |
| 97 | vnet_main_t *vnm; |
| 98 | vnet_hw_interface_t *hif_a; |
| 99 | vnet_hw_interface_t *hif_b; |
| 100 | |
| 101 | if (*a == *b) |
| 102 | return 0; |
| 103 | |
| 104 | if (a[0]->thread_index != b[0]->thread_index) |
| 105 | return 2 * (a[0]->thread_index > b[0]->thread_index) - 1; |
| 106 | |
| 107 | vnm = vnet_get_main (); |
| 108 | hif_a = vnet_get_hw_interface (vnm, a[0]->hw_if_index); |
| 109 | hif_b = vnet_get_hw_interface (vnm, b[0]->hw_if_index); |
| 110 | |
| 111 | if (hif_a->input_node_index != hif_b->input_node_index) |
| 112 | return 2 * (hif_a->input_node_index > hif_b->input_node_index) - 1; |
| 113 | |
| 114 | if (a[0]->hw_if_index != b[0]->hw_if_index) |
| 115 | return 2 * (a[0]->hw_if_index > b[0]->hw_if_index) - 1; |
| 116 | |
| 117 | if (a[0]->queue_id != b[0]->queue_id) |
| 118 | return 2 * (a[0]->queue_id > b[0]->queue_id) - 1; |
| 119 | |
| 120 | ASSERT (0); |
| 121 | return ~0; |
| 122 | } |
| 123 | |
Damjan Marion | 9410053 | 2020-11-06 23:25:57 +0100 | [diff] [blame] | 124 | /* |
| 125 | * fd.io coding-style-patch-verification: ON |
| 126 | * |
| 127 | * Local Variables: |
| 128 | * eval: (c-set-style "gnu") |
| 129 | * End: |
| 130 | */ |