[qca-nss-drv]: Add Changes needed for jumbo support
CR Fixed:555993
The Jumbo Support in the system needs the allocation of memory buffers of sizes > 9600 Bytes.
Added the logic to dynamically change the buffer allocation sizes based on the Maximu MTU value
configured in the system.
The change for jumbo support is needed in NetAP firmware,qca-nss-drv and qca-nss-gmac driver.This commit handles
the needed changes in NSS driver
Signed-off-by: Radhakrishna Jiguru <rjiguru@codeaurora.org>
Change-Id: Icf4bca6b81aac21444917b52158924cb9404661b
Signed-off-by: Radhakrishna Jiguru <rjiguru@codeaurora.org>
diff --git a/nss_api_if.h b/nss_api_if.h
index ead7f2c..1dae90f 100755
--- a/nss_api_if.h
+++ b/nss_api_if.h
@@ -69,6 +69,9 @@
*/
#define IPV6_ADDR_TO_OCTAL(ipv6) ((uint16_t *)ipv6)[0], ((uint16_t *)ipv6)[1], ((uint16_t *)ipv6)[2], ((uint16_t *)ipv6)[3], ((uint16_t *)ipv6)[4], ((uint16_t *)ipv6)[5], ((uint16_t *)ipv6)[6], ((uint16_t *)ipv6)[7]
+#define NSS_ETH_NORMAL_FRAME_MTU 1500
+#define NSS_ETH_MINI_JUMBO_FRAME_MTU 1978
+#define NSS_ETH_FULL_JUMBO_FRAME_MTU 9600
/*
* Link aggregation enslave/release events
*/
diff --git a/nss_core.c b/nss_core.c
index d3dba69..a77883c 100755
--- a/nss_core.c
+++ b/nss_core.c
@@ -483,7 +483,7 @@
while (count) {
struct h2n_descriptor *desc = &desc_ring[hlos_index];
- nbuf = dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE);
+ nbuf = dev_alloc_skb(nss_ctx->max_buf_size);
if (unlikely(!nbuf)) {
/*
* ERR:
@@ -497,7 +497,7 @@
desc->opaque = (uint32_t)nbuf;
desc->payload_offs = (uint16_t) (nbuf->data - nbuf->head);
- desc->buffer = dma_map_single(NULL, nbuf->head, (nbuf->end - nbuf->head), DMA_FROM_DEVICE);
+ desc->buffer = dma_map_single(NULL, nbuf->head, nss_ctx->max_buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(NULL, desc->buffer))) {
/*
* ERR:
@@ -506,7 +506,7 @@
nss_warning("%p: DMA mapping failed for empty buffer", nss_ctx);
break;
}
- desc->buffer_len = (uint16_t)(nbuf->end - nbuf->head);
+ desc->buffer_len = (uint16_t)(nss_ctx->max_buf_size);
desc->buffer_type = H2N_BUFFER_EMPTY;
hlos_index = (hlos_index + 1) & (mask);
count--;
@@ -560,12 +560,19 @@
*
* TODO: Modify the algorithm later with proper weights and Round Robin
*/
+
if (cause & NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS) {
*type = NSS_INTR_CAUSE_NON_QUEUE;
*weight = NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT;
return NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS;
}
+ if (cause & NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE) {
+ *type = NSS_INTR_CAUSE_QUEUE;
+ *weight = NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT;
+ return NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE;
+ }
+
if (cause & NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED) {
*type = NSS_INTR_CAUSE_NON_QUEUE;
*weight = NSS_TX_UNBLOCKED_PROCESSING_WEIGHT;
@@ -578,11 +585,6 @@
return NSS_REGS_N2H_INTR_STATUS_DATA_COMMAND_QUEUE;
}
- if (cause & NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE) {
- *type = NSS_INTR_CAUSE_QUEUE;
- *weight = NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT;
- return NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE;
- }
return 0;
}
@@ -747,7 +749,6 @@
struct nss_if_mem_map *if_map = (struct nss_if_mem_map *)nss_ctx->vmap;
uint32_t frag0phyaddr = 0;
-
nr_frags = skb_shinfo(nbuf)->nr_frags;
BUG_ON(nr_frags > MAX_SKB_FRAGS);
@@ -760,6 +761,7 @@
nss_warning("%p: DMA mapping failed for virtual address = %x", nss_ctx, desc->buffer);
return NSS_CORE_STATUS_FAILURE;
}
+
/*
* Take a lock for queue
*/
@@ -816,10 +818,11 @@
desc->buffer_len = (uint16_t)(nbuf->end - nbuf->head);
desc->buffer = frag0phyaddr;
- if (!NSS_IS_VIRTUAL_INTERFACE(if_num)) {
+ if (unlikely(!NSS_IS_VIRTUAL_INTERFACE(if_num))) {
if (likely(nbuf->destructor == NULL)) {
- if (likely(skb_recycle_check(nbuf, NSS_NBUF_PAYLOAD_SIZE))) {
+ if (likely(skb_recycle_check(nbuf, nss_ctx->max_buf_size))) {
desc->bit_flags |= H2N_BIT_BUFFER_REUSE;
+ desc->buffer_len = nss_ctx->max_buf_size + NET_SKB_PAD;
}
}
}
diff --git a/nss_core.h b/nss_core.h
index 052dd8c..351f39e 100755
--- a/nss_core.h
+++ b/nss_core.h
@@ -84,6 +84,8 @@
* Default payload size for NSS buffers
*/
#define NSS_NBUF_PAYLOAD_SIZE NSS_EMPTY_BUFFER_SIZE
+#define NSS_NBUF_PAD_EXTRA 256
+#define NSS_NBUF_ETH_EXTRA 192
/*
* N2H/H2N Queue IDs
@@ -450,6 +452,8 @@
void *queue_decongestion_ctx[NSS_MAX_CLIENTS];
/* Queue decongestion callback contexts */
spinlock_t decongest_cb_lock; /* Lock to protect queue decongestion cb table */
+ uint16_t phys_if_mtu[NSS_MAX_PHYSICAL_INTERFACES];
+ /* Current MTU value of physical interface */
uint32_t magic;
/* Magic protection */
};
diff --git a/nss_hlos_if.h b/nss_hlos_if.h
index a77a9ac..f843456 100755
--- a/nss_hlos_if.h
+++ b/nss_hlos_if.h
@@ -213,6 +213,14 @@
};
/*
+ * Interface mtu change
+ */
+struct nss_if_mtu_change {
+ uint32_t interface_num; /* Interface number */
+ uint16_t min_buf_size; /* Changed min buf size value */
+};
+
+/*
* Crypto open command
*/
struct nss_crypto_open {
@@ -305,6 +313,7 @@
NSS_TX_METADATA_TYPE_PROFILER_TX,
NSS_TX_METADATA_TYPE_GENERIC_IF_PARAMS,
NSS_TX_METADATA_TYPE_NSS_FREQ_CHANGE,
+ NSS_TX_METADATA_TYPE_INTERFACE_MTU_CHANGE,
};
/*
@@ -334,6 +343,7 @@
struct nss_profiler_tx profiler_tx;
struct nss_generic_if_params generic_if_params;
struct nss_freq_change freq_change;
+ struct nss_if_mtu_change if_mtu_change;
} sub;
};
diff --git a/nss_init.c b/nss_init.c
index 0590fc8..5df5383 100755
--- a/nss_init.c
+++ b/nss_init.c
@@ -377,6 +377,10 @@
nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
}
+ /*
+ * Initialize max buffer size for NSS core
+ */
+ nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE;
nss_info("%p: All resources initialized and nss core%d has been brought out of reset", nss_ctx, nss_dev->id);
goto err_init_0;
diff --git a/nss_tx_rx.c b/nss_tx_rx.c
index 93c2bc4..a933910 100755
--- a/nss_tx_rx.c
+++ b/nss_tx_rx.c
@@ -1532,8 +1532,10 @@
{
struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
struct sk_buff *nbuf;
- int32_t status;
+ int32_t status, i;
+ uint16_t max_mtu;
struct nss_tx_metadata_object *ntmo;
+ struct nss_if_mtu_change *nimc;
nss_info("%p: Phys If Change MTU, id:%d, mtu=%d\n", nss_ctx, if_num, mtu);
@@ -1552,7 +1554,11 @@
}
ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
- ntmo->type = NSS_TX_METADATA_TYPE_DESTROY_ALL_L3_RULES;
+ ntmo->type = NSS_TX_METADATA_TYPE_INTERFACE_MTU_CHANGE;
+
+ nimc = &ntmo->sub.if_mtu_change;
+ nimc->interface_num = if_num;
+ nimc->min_buf_size = (uint16_t)mtu + NSS_NBUF_ETH_EXTRA;
status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
if (status != NSS_CORE_STATUS_SUCCESS) {
@@ -1561,6 +1567,24 @@
return NSS_TX_FAILURE;
}
+ nss_ctx->phys_if_mtu[if_num] = (uint16_t)mtu;
+ max_mtu = nss_ctx->phys_if_mtu[0];
+ for (i = 1; i < NSS_MAX_PHYSICAL_INTERFACES; i++) {
+ if (max_mtu < nss_ctx->phys_if_mtu[i]) {
+ max_mtu = nss_ctx->phys_if_mtu[i];
+ }
+ }
+
+ if (max_mtu <= NSS_ETH_NORMAL_FRAME_MTU) {
+ max_mtu = NSS_ETH_NORMAL_FRAME_MTU;
+ } else if (max_mtu <= NSS_ETH_MINI_JUMBO_FRAME_MTU) {
+ max_mtu = NSS_ETH_MINI_JUMBO_FRAME_MTU;
+ } else if (max_mtu <= NSS_ETH_FULL_JUMBO_FRAME_MTU) {
+ max_mtu = NSS_ETH_FULL_JUMBO_FRAME_MTU;
+ }
+
+ nss_ctx->max_buf_size = ((max_mtu + ETH_HLEN + SMP_CACHE_BYTES - 1) & ~(SMP_CACHE_BYTES - 1)) + NSS_NBUF_PAD_EXTRA;
+
nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
@@ -2127,6 +2151,7 @@
nss_phys_if_event_callback_t event_callback, struct net_device *if_ctx)
{
uint8_t id = nss_top_main.phys_if_handler_id[if_num];
+ struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[id];
nss_assert(if_num <= NSS_MAX_PHYSICAL_INTERFACES);
@@ -2134,7 +2159,8 @@
nss_top_main.if_rx_callback[if_num] = rx_callback;
nss_top_main.phys_if_event_callback[if_num] = event_callback;
- return (void *)&nss_top_main.nss[id];
+ nss_ctx->phys_if_mtu[if_num] = NSS_ETH_NORMAL_FRAME_MTU;
+ return (void *)nss_ctx;
}
/*
@@ -2147,6 +2173,8 @@
nss_top_main.if_rx_callback[if_num] = NULL;
nss_top_main.phys_if_event_callback[if_num] = NULL;
nss_top_main.if_ctx[if_num] = NULL;
+ nss_top_main.nss[0].phys_if_mtu[if_num] = 0;
+ nss_top_main.nss[1].phys_if_mtu[if_num] = 0;
}
/*