Merge "[qca-nss-drv] Prevent out of bound array access when processing NSS status packet."
diff --git a/nss_core.c b/nss_core.c
index 2fccf7d..59f4d9f 100755
--- a/nss_core.c
+++ b/nss_core.c
@@ -305,6 +305,207 @@
}
/*
+ * nss_core_handle_bounced_pkt()
+ * Bounced packet is returned from an interface/bridge bounce operation.
+ *
+ * Return the skb to the registrant.
+ */
+static inline void nss_core_handle_bounced_pkt(struct nss_ctx_instance *nss_ctx,
+ struct nss_shaper_bounce_registrant *reg,
+ struct sk_buff *nbuf)
+{
+ void *app_data;
+ struct module *owner;
+ nss_shaper_bounced_callback_t bounced_callback;
+ struct nss_top_instance *nss_top = nss_ctx->nss_top;
+
+ spin_lock_bh(&nss_top->lock);
+
+ /*
+ * Do we have a registrant?
+ */
+ if (!reg->registered) {
+ spin_unlock_bh(&nss_top->lock);
+ dev_kfree_skb_any(nbuf);
+ return;
+ }
+
+ /*
+ * Get handle to the owning registrant
+ */
+ bounced_callback = reg->bounced_callback;
+ app_data = reg->app_data;
+ owner = reg->owner;
+
+ /*
+ * Callback is active, unregistration is not permitted while this is in progress
+ */
+ reg->callback_active = true;
+ spin_unlock_bh(&nss_top->lock);
+ if (!try_module_get(owner)) {
+ spin_lock_bh(&nss_top->lock);
+ reg->callback_active = false;
+ spin_unlock_bh(&nss_top->lock);
+ dev_kfree_skb_any(nbuf);
+ return;
+ }
+
+ /*
+ * Pass bounced packet back to registrant
+ */
+ bounced_callback(app_data, nbuf);
+ spin_lock_bh(&nss_top->lock);
+ reg->callback_active = false;
+ spin_unlock_bh(&nss_top->lock);
+ module_put(owner);
+}
+
+/*
+ * nss_core_handle_virt_if_pkt()
+ * Handle packet destined to virtual interface.
+ */
+static inline void nss_core_handle_virt_if_pkt(struct nss_ctx_instance *nss_ctx,
+ unsigned int interface_num,
+ struct sk_buff *nbuf)
+{
+ struct nss_top_instance *nss_top = nss_ctx->nss_top;
+ struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_top->subsys_dp_register[interface_num];
+ struct net_device *ndev = NULL;
+
+ uint32_t xmit_ret;
+
+ NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_VIRTUAL]);
+
+ /*
+ * Checksum is already done by NSS for packets forwarded to virtual interfaces
+ */
+ nbuf->ip_summed = CHECKSUM_NONE;
+
+ /*
+ * Obtain net_device pointer
+ */
+ ndev = subsys_dp_reg->ndev;
+ if (unlikely(ndev == NULL)) {
+ nss_warning("%p: Received packet for unregistered virtual interface %d",
+ nss_ctx, interface_num);
+
+ /*
+ * NOTE: The assumption is that gather support is not
+ * implemented in fast path and hence we can not receive
+ * fragmented packets and so we do not need to take care
+ * of freeing a fragmented packet
+ */
+ dev_kfree_skb_any(nbuf);
+ return;
+ }
+
+ /*
+ * TODO: Need to ensure the ndev is not removed before we take dev_hold().
+ */
+ dev_hold(ndev);
+ nbuf->dev = ndev;
+ /*
+ * Linearize the skb if needed
+ */
+ if (nss_core_skb_needs_linearize(nbuf, (uint32_t)netif_skb_features(nbuf)) && __skb_linearize(nbuf)) {
+ /*
+ * We needed to linearize, but __skb_linearize() failed. Therefore
+ * we free the nbuf.
+ */
+ dev_put(ndev);
+ dev_kfree_skb_any(nbuf);
+ return;
+ }
+
+ /*
+ * Send the packet to virtual interface
+ * NOTE: Invoking this will BYPASS any assigned QDisc - this is OKAY
+ * as TX packets out of the NSS will have been shaped inside the NSS.
+ */
+ xmit_ret = ndev->netdev_ops->ndo_start_xmit(nbuf, ndev);
+ if (unlikely(xmit_ret == NETDEV_TX_BUSY)) {
+ dev_kfree_skb_any(nbuf);
+ nss_info("%p: Congestion at virtual interface %d, %p", nss_ctx, interface_num, ndev);
+ }
+ dev_put(ndev);
+}
+
+/*
+ * nss_core_handle_buffer_pkt()
+ * Handle data packet received on physical or virtual interface.
+ */
+static inline void nss_core_handle_buffer_pkt(struct nss_ctx_instance *nss_ctx,
+ unsigned int interface_num,
+ struct sk_buff *nbuf,
+ struct napi_struct *napi,
+ uint16_t flags)
+{
+ struct nss_top_instance *nss_top = nss_ctx->nss_top;
+ struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_top->subsys_dp_register[interface_num];
+ uint32_t netif_flags = subsys_dp_reg->features;
+ struct net_device *ndev = NULL;
+ nss_phys_if_rx_callback_t cb;
+
+ NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_PACKET]);
+
+ /*
+ * Check if NSS was able to obtain checksum
+ */
+ nbuf->ip_summed = CHECKSUM_UNNECESSARY;
+ if (unlikely(!(flags & N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID))) {
+ nbuf->ip_summed = CHECKSUM_NONE;
+ }
+
+ ndev = subsys_dp_reg->ndev;
+ cb = subsys_dp_reg->cb;
+ if (likely(cb) && likely(ndev)) {
+ /*
+ * Packet was received on Physical interface
+ */
+ if (nss_core_skb_needs_linearize(nbuf, netif_flags) && __skb_linearize(nbuf)) {
+ /*
+ * We needed to linearize, but __skb_linearize() failed. So free the nbuf.
+ */
+ dev_kfree_skb_any(nbuf);
+ return;
+ }
+
+ cb(ndev, (void *)nbuf, napi);
+ return;
+ }
+
+ if (NSS_IS_IF_TYPE(DYNAMIC, interface_num) || NSS_IS_IF_TYPE(VIRTUAL, interface_num)) {
+ /*
+ * Packet was received on Virtual interface
+ */
+
+ /*
+ * Give the packet to stack
+ *
+ * TODO: Change to gro receive later
+ */
+ ndev = subsys_dp_reg->ndev;
+ if (ndev) {
+ dev_hold(ndev);
+ nbuf->dev = ndev;
+ nbuf->protocol = eth_type_trans(nbuf, ndev);
+ netif_receive_skb(nbuf);
+ dev_put(ndev);
+ } else {
+ /*
+ * Interface has gone down
+ */
+ nss_warning("%p: Received exception packet from bad virtual interface %d",
+ nss_ctx, interface_num);
+ dev_kfree_skb_any(nbuf);
+ }
+ return;
+ }
+
+ dev_kfree_skb_any(nbuf);
+}
+
+/*
* nss_core_rx_pbuf()
* Receive a pbuf from the NSS into Linux.
*/
@@ -312,10 +513,7 @@
{
unsigned int interface_num = desc->interface_num;
struct nss_top_instance *nss_top = nss_ctx->nss_top;
- struct net_device *ndev = NULL;
- nss_phys_if_rx_callback_t cb;
- struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_top->subsys_dp_register[interface_num];
-
+ struct nss_shaper_bounce_registrant *reg = NULL;
#ifdef CONFIG_DEBUG_KMEMLEAK
/*
@@ -330,225 +528,20 @@
switch (buffer_type) {
case N2H_BUFFER_SHAPER_BOUNCED_INTERFACE:
- {
- /*
- * Bounced packet is returned from an interface bounce operation
- * Obtain the registrant to which to return the skb
- */
- nss_shaper_bounced_callback_t bounced_callback;
- void *app_data;
- struct module *owner;
- struct nss_shaper_bounce_registrant *reg = &nss_top->bounce_interface_registrants[interface_num];
-
- spin_lock_bh(&nss_top->lock);
-
- /*
- * Do we have a registrant?
- */
- if (!reg->registered) {
- spin_unlock_bh(&nss_top->lock);
- break;
- }
-
- /*
- * Get handle to the owning registrant
- */
- bounced_callback = reg->bounced_callback;
- app_data = reg->app_data;
- owner = reg->owner;
- if (!try_module_get(owner)) {
- spin_unlock_bh(&nss_top->lock);
- break;
- }
-
- /*
- * Callback is active, unregistration is not permitted while this is in progress
- */
- reg->callback_active = true;
- spin_unlock_bh(&nss_top->lock);
-
- /*
- * Pass bounced packet back to registrant
- */
- bounced_callback(app_data, nbuf);
- spin_lock_bh(&nss_top->lock);
- reg->callback_active = false;
- spin_unlock_bh(&nss_top->lock);
- module_put(owner);
- }
+ reg = &nss_top->bounce_interface_registrants[interface_num];
+ nss_core_handle_bounced_pkt(nss_ctx, reg, nbuf);
break;
case N2H_BUFFER_SHAPER_BOUNCED_BRIDGE:
- /*
- * Bounced packet is returned from a bridge bounce operation
- */
- {
- /*
- * Bounced packet is returned from a bridge bounce operation
- * Obtain the registrant to which to return the skb
- */
- nss_shaper_bounced_callback_t bounced_callback;
- void *app_data;
- struct module *owner;
- struct nss_shaper_bounce_registrant *reg = &nss_top->bounce_bridge_registrants[interface_num];
-
- spin_lock_bh(&nss_top->lock);
-
- /*
- * Do we have a registrant?
- */
- if (!reg->registered) {
- spin_unlock_bh(&nss_top->lock);
- break;
- }
-
- /*
- * Get handle to the owning registrant
- */
- bounced_callback = reg->bounced_callback;
- app_data = reg->app_data;
- owner = reg->owner;
- if (!try_module_get(owner)) {
- spin_unlock_bh(&nss_top->lock);
- break;
- }
-
- /*
- * Callback is active, unregistration is not permitted while this is in progress
- */
- reg->callback_active = true;
- spin_unlock_bh(&nss_top->lock);
-
- /*
- * Pass bounced packet back to registrant
- */
- bounced_callback(app_data, nbuf);
- spin_lock_bh(&nss_top->lock);
- reg->callback_active = false;
- spin_unlock_bh(&nss_top->lock);
- module_put(owner);
- }
+ reg = &nss_top->bounce_bridge_registrants[interface_num];
+ nss_core_handle_bounced_pkt(nss_ctx, reg, nbuf);
break;
case N2H_BUFFER_PACKET_VIRTUAL:
- {
- /*
- * Packet is destined to virtual interface
- */
- uint32_t xmit_ret;
-
- NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_VIRTUAL]);
-
- /*
- * Checksum is already done by NSS for packets forwarded to virtual interfaces
- */
- nbuf->ip_summed = CHECKSUM_NONE;
-
- /*
- * Obtain net_device pointer
- */
- ndev = subsys_dp_reg->ndev;
- if (unlikely(ndev == NULL)) {
- nss_warning("%p: Received packet for unregistered virtual interface %d",
- nss_ctx, interface_num);
-
- /*
- * NOTE: The assumption is that gather support is not
- * implemented in fast path and hence we can not receive
- * fragmented packets and so we do not need to take care
- * of freeing a fragmented packet
- */
- dev_kfree_skb_any(nbuf);
- break;
- }
-
- /*
- * TODO: Need to ensure the ndev is not removed before we take dev_hold().
- */
- dev_hold(ndev);
- nbuf->dev = ndev;
- /*
- * Linearize the skb if needed
- */
- if (nss_core_skb_needs_linearize(nbuf, (uint32_t)netif_skb_features(nbuf)) && __skb_linearize(nbuf)) {
- /*
- * We needed to linearize, but __skb_linearize() failed. Therefore
- * we free the nbuf.
- */
- dev_kfree_skb_any(nbuf);
- break;
- }
-
- /*
- * Send the packet to virtual interface
- * NOTE: Invoking this will BYPASS any assigned QDisc - this is OKAY
- * as TX packets out of the NSS will have been shaped inside the NSS.
- */
- xmit_ret = ndev->netdev_ops->ndo_start_xmit(nbuf, ndev);
- if (unlikely(xmit_ret == NETDEV_TX_BUSY)) {
- dev_kfree_skb_any(nbuf);
- nss_info("%p: Congestion at virtual interface %d, %p", nss_ctx, interface_num, ndev);
- }
- dev_put(ndev);
- }
+ nss_core_handle_virt_if_pkt(nss_ctx, interface_num, nbuf);
break;
- case N2H_BUFFER_PACKET: {
- uint32_t netif_flags = subsys_dp_reg->features;
-
- NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_PACKET]);
-
- /*
- * Check if NSS was able to obtain checksum
- */
- nbuf->ip_summed = CHECKSUM_UNNECESSARY;
- if (unlikely(!(desc->bit_flags & N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID))) {
- nbuf->ip_summed = CHECKSUM_NONE;
- }
-
- ndev = subsys_dp_reg->ndev;
- cb = subsys_dp_reg->cb;
- if (likely(cb) && likely(ndev)) {
- /*
- * Packet was received on Physical interface
- */
- if (nss_core_skb_needs_linearize(nbuf, netif_flags) && __skb_linearize(nbuf)) {
- /*
- * We needed to linearize, but __skb_linearize() failed. So free the nbuf.
- */
- dev_kfree_skb_any(nbuf);
- break;
- }
-
- cb(ndev, (void *)nbuf, napi);
- } else if (NSS_IS_IF_TYPE(DYNAMIC, interface_num) || NSS_IS_IF_TYPE(VIRTUAL, interface_num)) {
- /*
- * Packet was received on Virtual interface
- */
-
- /*
- * Give the packet to stack
- *
- * TODO: Change to gro receive later
- */
- ndev = subsys_dp_reg->ndev;
- if (ndev) {
- dev_hold(ndev);
- nbuf->dev = ndev;
- nbuf->protocol = eth_type_trans(nbuf, ndev);
- netif_receive_skb(nbuf);
- dev_put(ndev);
- } else {
- /*
- * Interface has gone down
- */
- nss_warning("%p: Received exception packet from bad virtual interface %d",
- nss_ctx, interface_num);
- dev_kfree_skb_any(nbuf);
- }
- } else {
- dev_kfree_skb_any(nbuf);
- }
- }
- break;
+ case N2H_BUFFER_PACKET:
+ nss_core_handle_buffer_pkt(nss_ctx, interface_num, nbuf, napi, desc->bit_flags);
+ break;
case N2H_BUFFER_STATUS:
NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_STATUS]);
@@ -560,15 +553,17 @@
NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_EMPTY]);
/*
- * TODO: Unmap fragments.
+ * Warning: On non-Krait HW, we need to unmap fragments.
+ *
+ * It's not a problem on Krait HW. We don't save dma handle for those
+ * fragments and that's why we are not able to unmap. However, on
+ * Kraits dma_map_single() does not allocate any resource and hence unmap is a
+ * NOP and does not have to free up any resource.
*/
dev_kfree_skb_any(nbuf);
break;
default:
- /*
- * ERROR:
- */
nss_warning("%p: Invalid buffer type %d received from NSS", nss_ctx, buffer_type);
}
}
@@ -591,7 +586,8 @@
dma_unmap_page(NULL, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_FROM_DEVICE);
/*
- * The received frame is not a scattered one.
+ * The first and last bits are both set. Hence the received frame can't have
+ * chains (or it's not a scattered one).
*/
if (likely(bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT) && likely(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) {
@@ -692,6 +688,7 @@
nbuf = jumbo_start;
*nbuf_ptr = nbuf;
*jumbo_start_ptr = NULL;
+ prefetch((void *)(nbuf->data));
pull:
/*
@@ -941,9 +938,13 @@
* properly. Simple skb's are properly mapped but page data skbs
* have the payload mapped (and not the skb->data slab payload).
*
- * TODO: This only unmaps the first segment either slab payload or
- * skb page data. Eventually, we need to unmap all of a frag_list
- * or all of page_data.
+ * Warning: On non-Krait HW, we need to unmap fragments.
+ *
+ * This only unmaps the first segment either slab payload or
+ * skb page data. Eventually, we need to unmap all of a frag_list
+ * or all of page_data however this is not a big concern as of now
+ * since on Kriats dma_map_single() does not allocate any resource
+ * and hence dma_unmap_single() is sort off a nop.
*
* No need to invalidate for Tx Completions, so set dma direction = DMA_TO_DEVICE;
* Similarly prefetch is not needed for an empty buffer.
@@ -1908,17 +1909,14 @@
}
/*
- * WARNING! : The following "is_bounce" check has a HUGE potential to cause corruption
+ * WARNING! : The following "is_bounce" check has a potential to cause corruption
* if things change in the NSS. This check allows fragmented packets to be sent down
- * with garbage payload information under the ASSUMTION that no-body meddles with the
- * buffer. This holds good today for packets that are BOUNCED.
+ * with incomplete payload information since NSS does not care about the payload content
+ * when packets are bounced for shaping. If it starts caring in future, then this code
+ * will have to change.
*
- * WHY WE ARE DOING THIS - This is done as a temporary work around for issues with
- * the handeling of scatter gather in the NSS.
+ * WHY WE ARE DOING THIS - Skipping S/G processing helps with performance.
*
- * WHAT ARE WE DOING - We treat fragmented packets as normal (if bounced). This is okay
- * to do since the skb will eventually be returned to the HLOS for freeing or further
- * processing (post shaping). These packets WILL NOT get transmitted/re-used in the NSS.
*/
count = 0;
if (likely((segments == 0) || is_bounce)) {
diff --git a/nss_log.c b/nss_log.c
index b271763..1a2110e 100644
--- a/nss_log.c
+++ b/nss_log.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -178,6 +178,16 @@
}
/*
+ * nss_log_current_entry()
+ * Reads current entry index from NSS log descriptor.
+ */
+static uint32_t nss_log_current_entry(struct nss_log_descriptor *desc)
+{
+ rmb();
+ return desc->current_entry;
+}
+
+/*
* nss_log_read()
* Read operation lets command like cat and tail read our memory log buffer data.
*/
@@ -213,7 +223,7 @@
* Get the current index
*/
dma_sync_single_for_cpu(NULL, data->dma_addr, sizeof (struct nss_log_descriptor), DMA_FROM_DEVICE);
- entry = desc->current_entry;
+ entry = nss_log_current_entry(desc);
/*
* If the current and last sampled indexes are same then bail out.
@@ -520,7 +530,7 @@
dbg = &msg.msg.addr;
dbg->nentry = nentry;
dbg->version = NSS_DEBUG_LOG_VERSION;
- dbg->addr = dma_addr;
+ dbg->phy_addr = dma_addr;
msg_event = false;
status = nss_debug_interface_tx(nss_ctx, &msg);
diff --git a/nss_log.h b/nss_log.h
index 5c83b51..fd463c5 100644
--- a/nss_log.h
+++ b/nss_log.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -41,26 +41,35 @@
#define NSS_CACHE_LINE_SIZE 32
#define NSS_LOG_COOKIE 0xFF785634
+/*
+ * nss_log_entry is shared between Host and NSS FW
+ */
struct nss_log_entry {
- uint64_t volatile sequence_num; /* Sequence number */
- uint32_t volatile cookie; /* Magic for verification */
- uint32_t volatile thread_num; /* thread-id */
- uint32_t volatile timestamp; /* timestamp in ticks */
- char volatile message[NSS_LOG_LINE_WIDTH]; /* actual debug message */
+ uint64_t sequence_num; /* Sequence number */
+ uint32_t cookie; /* Magic for verification */
+ uint32_t thread_num; /* thread-id */
+ uint32_t timestamp; /* timestamp in ticks */
+ char message[NSS_LOG_LINE_WIDTH]; /* actual debug message */
} __attribute__((aligned(NSS_CACHE_LINE_SIZE)));
+/*
+ * The NSS log descripts holds ring-buffer along with other variables and
+ * it is shared between NSS FW and Host.
+ *
+ * NSS FW writes to ring buffer and current_entry but read by only Host.
+ */
struct nss_log_descriptor {
- uint32_t volatile cookie; /* Magic for verification */
- uint32_t volatile log_nentries; /* No.of log entries */
- uint32_t volatile current_entry; /* pointer to current log entry */
- uint8_t volatile pad[20]; /* pad to align ring buffer at cacheline boundary */
+ uint32_t cookie; /* Magic for verification */
+ uint32_t log_nentries; /* No.of log entries */
+ uint32_t current_entry; /* pointer to current log entry */
+ uint8_t pad[20]; /* pad to align ring buffer at cacheline boundary */
struct nss_log_entry log_ring_buffer[0]; /* The actual log entry ring buffer */
} __attribute__((aligned(NSS_CACHE_LINE_SIZE)));
struct nss_debug_log_memory_msg {
uint32_t version;
uint32_t nentry;
- uint32_t addr;
+ uint32_t phy_addr;
};
struct nss_debug_interface_msg {
diff --git a/nss_tx_rx_virt_if.c b/nss_tx_rx_virt_if.c
index f64a11a..68ad80d 100644
--- a/nss_tx_rx_virt_if.c
+++ b/nss_tx_rx_virt_if.c
@@ -33,14 +33,13 @@
{
struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.ipv4_handler_id];
int32_t if_num = (int32_t)ctx;
- uint32_t features = 0;
nss_assert(NSS_IS_IF_TYPE(VIRTUAL, if_num));
nss_top_main.subsys_dp_register[if_num].ndev = netdev;
nss_top_main.subsys_dp_register[if_num].cb = rx_callback;
nss_top_main.subsys_dp_register[if_num].app_data = NULL;
- nss_top_main.subsys_dp_register[if_num].features = features;
+ nss_top_main.subsys_dp_register[if_num].features = (uint32_t)netdev->features;
return nss_ctx;
}
diff --git a/nss_virt_if.c b/nss_virt_if.c
index 839f951..eafda4d 100644
--- a/nss_virt_if.c
+++ b/nss_virt_if.c
@@ -217,7 +217,6 @@
{
struct nss_ctx_instance *nss_ctx = NULL;
uint32_t ret;
- uint32_t features = 0;
/*
* Register handler for dynamically allocated virtual interface on NSS with nss core.
@@ -240,7 +239,7 @@
nss_top_main.subsys_dp_register[if_num].ndev = netdev;
nss_top_main.subsys_dp_register[if_num].cb = data_callback;
nss_top_main.subsys_dp_register[if_num].app_data = NULL;
- nss_top_main.subsys_dp_register[if_num].features = features;
+ nss_top_main.subsys_dp_register[if_num].features = (uint32_t)netdev->features;
nss_top_main.if_rx_msg_callback[if_num] = msg_callback;