Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 1 | /* |
| 2 | ************************************************************************** |
Stephen Wang | aed4633 | 2016-12-12 17:29:03 -0800 | [diff] [blame] | 3 | * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all copies. |
| 7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 10 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 12 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT |
| 13 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 14 | ************************************************************************** |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * nss_edma.c |
| 19 | * NSS EDMA APIs |
| 20 | */ |
| 21 | |
| 22 | #include "nss_tx_rx_common.h" |
| 23 | |
| 24 | /* |
| 25 | ********************************** |
| 26 | Rx APIs |
| 27 | ********************************** |
| 28 | */ |
| 29 | |
| 30 | /* |
| 31 | * nss_edma_metadata_port_stats_sync() |
| 32 | * Handle the syncing of EDMA port statistics. |
| 33 | */ |
| 34 | static void nss_edma_metadata_port_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_port_stats_sync *nepss) |
| 35 | { |
| 36 | uint16_t i, j = 0; |
| 37 | struct nss_top_instance *nss_top = nss_ctx->nss_top; |
| 38 | |
| 39 | spin_lock_bh(&nss_top->stats_lock); |
| 40 | |
| 41 | /* |
| 42 | * edma port stats |
| 43 | * We process a subset of port stats since msg payload is not enough to hold all ports at once. |
| 44 | */ |
| 45 | for (i = nepss->start_port; i < nepss->end_port; i++) { |
| 46 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_RX_PKTS] += nepss->port_stats[j].node_stats.rx_packets; |
| 47 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_RX_BYTES] += nepss->port_stats[j].node_stats.rx_bytes; |
| 48 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_RX_DROPPED] += nepss->port_stats[j].node_stats.rx_dropped; |
| 49 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_TX_PKTS] += nepss->port_stats[j].node_stats.tx_packets; |
| 50 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_TX_BYTES] += nepss->port_stats[j].node_stats.tx_bytes; |
| 51 | |
| 52 | nss_top->stats_edma.port[i].port_type = nepss->port_stats[j].port_type; |
| 53 | nss_top->stats_edma.port[i].port_ring_map[NSS_EDMA_PORT_RX_RING] = nepss->port_stats[j].edma_rx_ring; |
| 54 | nss_top->stats_edma.port[i].port_ring_map[NSS_EDMA_PORT_TX_RING] = nepss->port_stats[j].edma_tx_ring; |
| 55 | j++; |
| 56 | } |
| 57 | |
| 58 | spin_unlock_bh(&nss_top->stats_lock); |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * nss_edma_metadata_ring_stats_sync() |
| 63 | * Handle the syncing of EDMA ring statistics. |
| 64 | */ |
| 65 | static void nss_edma_metadata_ring_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_ring_stats_sync *nerss) |
| 66 | { |
| 67 | int32_t i; |
| 68 | struct nss_top_instance *nss_top = nss_ctx->nss_top; |
| 69 | |
| 70 | spin_lock_bh(&nss_top->stats_lock); |
| 71 | |
| 72 | /* |
| 73 | * edma tx ring stats |
| 74 | */ |
| 75 | for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) { |
| 76 | nss_top->stats_edma.tx_stats[i][NSS_STATS_EDMA_TX_ERR] += nerss->tx_ring[i].tx_err; |
| 77 | nss_top->stats_edma.tx_stats[i][NSS_STATS_EDMA_TX_DROPPED] += nerss->tx_ring[i].tx_dropped; |
| 78 | nss_top->stats_edma.tx_stats[i][NSS_STATS_EDMA_TX_DESC] += nerss->tx_ring[i].desc_cnt; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * edma rx ring stats |
| 83 | */ |
| 84 | for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) { |
| 85 | nss_top->stats_edma.rx_stats[i][NSS_STATS_EDMA_RX_CSUM_ERR] += nerss->rx_ring[i].rx_csum_err; |
| 86 | nss_top->stats_edma.rx_stats[i][NSS_STATS_EDMA_RX_DESC] += nerss->rx_ring[i].desc_cnt; |
| 87 | } |
| 88 | |
| 89 | /* |
| 90 | * edma tx cmpl ring stats |
| 91 | */ |
| 92 | for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) { |
| 93 | nss_top->stats_edma.txcmpl_stats[i][NSS_STATS_EDMA_TXCMPL_DESC] += nerss->txcmpl_ring[i].desc_cnt; |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * edma rx fill ring stats |
| 98 | */ |
| 99 | for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) { |
| 100 | nss_top->stats_edma.rxfill_stats[i][NSS_STATS_EDMA_RXFILL_DESC] += nerss->rxfill_ring[i].desc_cnt; |
| 101 | } |
| 102 | |
| 103 | spin_unlock_bh(&nss_top->stats_lock); |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * nss_edma_interface_handler() |
| 108 | * Handle NSS -> HLOS messages for EDMA node |
| 109 | */ |
| 110 | static void nss_edma_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) |
| 111 | { |
| 112 | struct nss_edma_msg *nem = (struct nss_edma_msg *)ncm; |
| 113 | nss_edma_msg_callback_t cb; |
| 114 | |
| 115 | /* |
| 116 | * Is this a valid request/response packet? |
| 117 | */ |
| 118 | if (nem->cm.type >= NSS_METADATA_TYPE_EDMA_MAX) { |
| 119 | nss_warning("%p: received invalid message %d for edma interface", nss_ctx, nem->cm.type); |
| 120 | return; |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Handle different types of messages |
| 125 | */ |
| 126 | switch (nem->cm.type) { |
| 127 | case NSS_METADATA_TYPE_EDMA_PORT_STATS_SYNC: |
| 128 | nss_edma_metadata_port_stats_sync(nss_ctx, &nem->msg.port_stats); |
| 129 | break; |
| 130 | |
| 131 | case NSS_METADATA_TYPE_EDMA_RING_STATS_SYNC: |
| 132 | nss_edma_metadata_ring_stats_sync(nss_ctx, &nem->msg.ring_stats); |
| 133 | break; |
| 134 | |
| 135 | default: |
| 136 | if (ncm->response != NSS_CMN_RESPONSE_ACK) { |
| 137 | /* |
| 138 | * Check response |
| 139 | */ |
| 140 | nss_info("%p: Received response %d for type %d, interface %d", |
| 141 | nss_ctx, ncm->response, ncm->type, ncm->interface); |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | /* |
| 146 | * Update the callback and app_data for NOTIFY messages, edma sends all notify messages |
| 147 | * to the same callback/app_data. |
| 148 | */ |
| 149 | if (nem->cm.response == NSS_CMM_RESPONSE_NOTIFY) { |
Stephen Wang | aed4633 | 2016-12-12 17:29:03 -0800 | [diff] [blame] | 150 | ncm->cb = (nss_ptr_t)nss_ctx->nss_top->edma_callback; |
| 151 | ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->edma_ctx; |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Do we have a callback? |
| 156 | */ |
| 157 | if (!ncm->cb) { |
| 158 | return; |
| 159 | } |
| 160 | |
| 161 | /* |
| 162 | * Callback |
| 163 | */ |
| 164 | cb = (nss_edma_msg_callback_t)ncm->cb; |
| 165 | cb((void *)ncm->app_data, nem); |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * nss_edma_notify_register() |
| 170 | * Register to received EDMA events. |
| 171 | */ |
| 172 | struct nss_ctx_instance *nss_edma_notify_register(nss_edma_msg_callback_t cb, void *app_data) |
| 173 | { |
| 174 | nss_top_main.edma_callback = cb; |
| 175 | nss_top_main.edma_ctx = app_data; |
| 176 | return &nss_top_main.nss[nss_top_main.edma_handler_id]; |
| 177 | } |
| 178 | EXPORT_SYMBOL(nss_edma_notify_register); |
| 179 | |
| 180 | /* |
| 181 | * nss_edma_notify_unregister() |
| 182 | * Unregister to received EDMA events. |
| 183 | */ |
| 184 | void nss_edma_notify_unregister(void) |
| 185 | { |
| 186 | nss_top_main.edma_callback = NULL; |
| 187 | } |
| 188 | EXPORT_SYMBOL(nss_edma_notify_unregister); |
| 189 | |
| 190 | /* |
| 191 | * nss_edma_register_handler() |
| 192 | */ |
| 193 | void nss_edma_register_handler(void) |
| 194 | { |
| 195 | nss_core_register_handler(NSS_EDMA_INTERFACE, nss_edma_interface_handler, NULL); |
| 196 | } |