Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 1 | /* |
| 2 | ************************************************************************** |
Stephen Wang | aed4633 | 2016-12-12 17:29:03 -0800 | [diff] [blame] | 3 | * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all copies. |
| 7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 10 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 12 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT |
| 13 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 14 | ************************************************************************** |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * nss_edma.c |
| 19 | * NSS EDMA APIs |
| 20 | */ |
| 21 | |
| 22 | #include "nss_tx_rx_common.h" |
| 23 | |
| 24 | /* |
| 25 | ********************************** |
| 26 | Rx APIs |
| 27 | ********************************** |
| 28 | */ |
| 29 | |
| 30 | /* |
| 31 | * nss_edma_metadata_port_stats_sync() |
| 32 | * Handle the syncing of EDMA port statistics. |
| 33 | */ |
| 34 | static void nss_edma_metadata_port_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_port_stats_sync *nepss) |
| 35 | { |
| 36 | uint16_t i, j = 0; |
| 37 | struct nss_top_instance *nss_top = nss_ctx->nss_top; |
| 38 | |
| 39 | spin_lock_bh(&nss_top->stats_lock); |
| 40 | |
| 41 | /* |
| 42 | * edma port stats |
| 43 | * We process a subset of port stats since msg payload is not enough to hold all ports at once. |
| 44 | */ |
| 45 | for (i = nepss->start_port; i < nepss->end_port; i++) { |
| 46 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_RX_PKTS] += nepss->port_stats[j].node_stats.rx_packets; |
| 47 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_RX_BYTES] += nepss->port_stats[j].node_stats.rx_bytes; |
| 48 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_RX_DROPPED] += nepss->port_stats[j].node_stats.rx_dropped; |
| 49 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_TX_PKTS] += nepss->port_stats[j].node_stats.tx_packets; |
| 50 | nss_top->stats_edma.port[i].port_stats[NSS_STATS_NODE_TX_BYTES] += nepss->port_stats[j].node_stats.tx_bytes; |
| 51 | |
| 52 | nss_top->stats_edma.port[i].port_type = nepss->port_stats[j].port_type; |
| 53 | nss_top->stats_edma.port[i].port_ring_map[NSS_EDMA_PORT_RX_RING] = nepss->port_stats[j].edma_rx_ring; |
| 54 | nss_top->stats_edma.port[i].port_ring_map[NSS_EDMA_PORT_TX_RING] = nepss->port_stats[j].edma_tx_ring; |
| 55 | j++; |
| 56 | } |
| 57 | |
| 58 | spin_unlock_bh(&nss_top->stats_lock); |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * nss_edma_metadata_ring_stats_sync() |
| 63 | * Handle the syncing of EDMA ring statistics. |
| 64 | */ |
| 65 | static void nss_edma_metadata_ring_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_ring_stats_sync *nerss) |
| 66 | { |
| 67 | int32_t i; |
| 68 | struct nss_top_instance *nss_top = nss_ctx->nss_top; |
| 69 | |
| 70 | spin_lock_bh(&nss_top->stats_lock); |
| 71 | |
| 72 | /* |
| 73 | * edma tx ring stats |
| 74 | */ |
| 75 | for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) { |
| 76 | nss_top->stats_edma.tx_stats[i][NSS_STATS_EDMA_TX_ERR] += nerss->tx_ring[i].tx_err; |
| 77 | nss_top->stats_edma.tx_stats[i][NSS_STATS_EDMA_TX_DROPPED] += nerss->tx_ring[i].tx_dropped; |
| 78 | nss_top->stats_edma.tx_stats[i][NSS_STATS_EDMA_TX_DESC] += nerss->tx_ring[i].desc_cnt; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * edma rx ring stats |
| 83 | */ |
| 84 | for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) { |
| 85 | nss_top->stats_edma.rx_stats[i][NSS_STATS_EDMA_RX_CSUM_ERR] += nerss->rx_ring[i].rx_csum_err; |
| 86 | nss_top->stats_edma.rx_stats[i][NSS_STATS_EDMA_RX_DESC] += nerss->rx_ring[i].desc_cnt; |
Shashank Balashankar | cbe4899 | 2017-06-27 13:51:28 -0700 | [diff] [blame] | 87 | nss_top->stats_edma.rx_stats[i][NSS_STATS_EDMA_RX_QOS_ERR] += nerss->rx_ring[i].qos_err; |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | /* |
| 91 | * edma tx cmpl ring stats |
| 92 | */ |
| 93 | for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) { |
| 94 | nss_top->stats_edma.txcmpl_stats[i][NSS_STATS_EDMA_TXCMPL_DESC] += nerss->txcmpl_ring[i].desc_cnt; |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * edma rx fill ring stats |
| 99 | */ |
| 100 | for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) { |
| 101 | nss_top->stats_edma.rxfill_stats[i][NSS_STATS_EDMA_RXFILL_DESC] += nerss->rxfill_ring[i].desc_cnt; |
| 102 | } |
| 103 | |
| 104 | spin_unlock_bh(&nss_top->stats_lock); |
| 105 | } |
| 106 | |
| 107 | /* |
Santosh Kivati | b65b68b | 2017-05-18 13:30:58 -0700 | [diff] [blame] | 108 | * nss_edma_metadata_err_stats_sync() |
| 109 | * Handle the syncing of EDMA error statistics. |
| 110 | */ |
| 111 | static void nss_edma_metadata_err_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_err_stats_sync *nerss) |
| 112 | { |
| 113 | |
| 114 | struct nss_top_instance *nss_top = nss_ctx->nss_top; |
| 115 | |
| 116 | spin_lock_bh(&nss_top->stats_lock); |
| 117 | nss_top->stats_edma.misc_err[NSS_EDMA_AXI_RD_ERR] += nerss->msg_err_stats.axi_rd_err; |
| 118 | nss_top->stats_edma.misc_err[NSS_EDMA_AXI_WR_ERR] += nerss->msg_err_stats.axi_wr_err; |
| 119 | nss_top->stats_edma.misc_err[NSS_EDMA_RX_DESC_FIFO_FULL_ERR] += nerss->msg_err_stats.rx_desc_fifo_full_err; |
| 120 | nss_top->stats_edma.misc_err[NSS_EDMA_RX_BUF_SIZE_ERR] += nerss->msg_err_stats.rx_buf_size_err; |
| 121 | nss_top->stats_edma.misc_err[NSS_EDMA_TX_SRAM_FULL_ERR] += nerss->msg_err_stats.tx_sram_full_err; |
| 122 | nss_top->stats_edma.misc_err[NSS_EDMA_TX_CMPL_BUF_FULL_ERR] += nerss->msg_err_stats.tx_cmpl_buf_full_err; |
| 123 | nss_top->stats_edma.misc_err[NSS_EDMA_PKT_LEN_LA64K_ERR] += nerss->msg_err_stats.pkt_len_la64k_err; |
| 124 | nss_top->stats_edma.misc_err[NSS_EDMA_PKT_LEN_LE33_ERR] += nerss->msg_err_stats.pkt_len_le33_err; |
| 125 | nss_top->stats_edma.misc_err[NSS_EDMA_DATA_LEN_ERR] += nerss->msg_err_stats.data_len_err; |
Santosh Kivati | 28c2004 | 2017-07-11 16:47:57 -0700 | [diff] [blame] | 126 | nss_top->stats_edma.misc_err[NSS_EDMA_ALLOC_FAIL_CNT] += nerss->msg_err_stats.alloc_fail_cnt; |
Santosh Kivati | b65b68b | 2017-05-18 13:30:58 -0700 | [diff] [blame] | 127 | spin_unlock_bh(&nss_top->stats_lock); |
| 128 | } |
| 129 | |
| 130 | /* |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 131 | * nss_edma_interface_handler() |
| 132 | * Handle NSS -> HLOS messages for EDMA node |
| 133 | */ |
| 134 | static void nss_edma_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) |
| 135 | { |
| 136 | struct nss_edma_msg *nem = (struct nss_edma_msg *)ncm; |
| 137 | nss_edma_msg_callback_t cb; |
| 138 | |
| 139 | /* |
| 140 | * Is this a valid request/response packet? |
| 141 | */ |
| 142 | if (nem->cm.type >= NSS_METADATA_TYPE_EDMA_MAX) { |
| 143 | nss_warning("%p: received invalid message %d for edma interface", nss_ctx, nem->cm.type); |
| 144 | return; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Handle different types of messages |
| 149 | */ |
| 150 | switch (nem->cm.type) { |
| 151 | case NSS_METADATA_TYPE_EDMA_PORT_STATS_SYNC: |
| 152 | nss_edma_metadata_port_stats_sync(nss_ctx, &nem->msg.port_stats); |
| 153 | break; |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 154 | case NSS_METADATA_TYPE_EDMA_RING_STATS_SYNC: |
| 155 | nss_edma_metadata_ring_stats_sync(nss_ctx, &nem->msg.ring_stats); |
| 156 | break; |
Santosh Kivati | b65b68b | 2017-05-18 13:30:58 -0700 | [diff] [blame] | 157 | case NSS_METADATA_TYPE_EDMA_ERR_STATS_SYNC: |
| 158 | nss_edma_metadata_err_stats_sync(nss_ctx, &nem->msg.err_stats); |
| 159 | break; |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 160 | default: |
| 161 | if (ncm->response != NSS_CMN_RESPONSE_ACK) { |
| 162 | /* |
| 163 | * Check response |
| 164 | */ |
| 165 | nss_info("%p: Received response %d for type %d, interface %d", |
| 166 | nss_ctx, ncm->response, ncm->type, ncm->interface); |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * Update the callback and app_data for NOTIFY messages, edma sends all notify messages |
| 172 | * to the same callback/app_data. |
| 173 | */ |
| 174 | if (nem->cm.response == NSS_CMM_RESPONSE_NOTIFY) { |
Stephen Wang | aed4633 | 2016-12-12 17:29:03 -0800 | [diff] [blame] | 175 | ncm->cb = (nss_ptr_t)nss_ctx->nss_top->edma_callback; |
| 176 | ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->edma_ctx; |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | /* |
| 180 | * Do we have a callback? |
| 181 | */ |
| 182 | if (!ncm->cb) { |
| 183 | return; |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * Callback |
| 188 | */ |
| 189 | cb = (nss_edma_msg_callback_t)ncm->cb; |
| 190 | cb((void *)ncm->app_data, nem); |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * nss_edma_notify_register() |
| 195 | * Register to received EDMA events. |
| 196 | */ |
| 197 | struct nss_ctx_instance *nss_edma_notify_register(nss_edma_msg_callback_t cb, void *app_data) |
| 198 | { |
| 199 | nss_top_main.edma_callback = cb; |
| 200 | nss_top_main.edma_ctx = app_data; |
| 201 | return &nss_top_main.nss[nss_top_main.edma_handler_id]; |
| 202 | } |
| 203 | EXPORT_SYMBOL(nss_edma_notify_register); |
| 204 | |
| 205 | /* |
| 206 | * nss_edma_notify_unregister() |
| 207 | * Unregister to received EDMA events. |
| 208 | */ |
| 209 | void nss_edma_notify_unregister(void) |
| 210 | { |
| 211 | nss_top_main.edma_callback = NULL; |
| 212 | } |
| 213 | EXPORT_SYMBOL(nss_edma_notify_unregister); |
| 214 | |
| 215 | /* |
Thomas Wu | 91f4bdf | 2017-06-09 12:03:02 -0700 | [diff] [blame] | 216 | * nss_get_edma_context() |
| 217 | */ |
| 218 | struct nss_ctx_instance *nss_edma_get_context(void) |
| 219 | { |
| 220 | return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.edma_handler_id]; |
| 221 | } |
| 222 | EXPORT_SYMBOL(nss_edma_get_context); |
| 223 | |
| 224 | /* |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 225 | * nss_edma_register_handler() |
| 226 | */ |
| 227 | void nss_edma_register_handler(void) |
| 228 | { |
Thomas Wu | 91f4bdf | 2017-06-09 12:03:02 -0700 | [diff] [blame] | 229 | struct nss_ctx_instance *nss_ctx = nss_edma_get_context(); |
| 230 | |
| 231 | nss_core_register_handler(nss_ctx, NSS_EDMA_INTERFACE, nss_edma_interface_handler, NULL); |
Shashank Balashankar | 512cb60 | 2016-08-01 17:57:42 -0700 | [diff] [blame] | 232 | } |