[ipq40xx][edma] Add fix for memory allocation issue
Current RX allocation mechanism doesn't account the
number of RX buffers that could not be allocated.
Added mechanism to record the number of buffers that could
not be allocated and use this info during next
round of allocation
Change-Id: Iac37944afce2c754499b36bdcef2c4d2e48e4509
Signed-off-by: Rakesh Nair <ranair@codeaurora.org>
diff --git a/edma.c b/edma.c
index f4541d7..0d93ac2 100644
--- a/edma.c
+++ b/edma.c
@@ -108,6 +108,9 @@
return -ENOMEM;
}
+ /* Initialize pending fill */
+ erxd->pending_fill = 0;
+
return 0;
}
@@ -282,6 +285,13 @@
reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
reg_data |= prod_idx;
edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
+
+ /* If we couldn't allocate all the buffers,
+ * we increment the alloc failure counters
+ */
+ if (cleaned_count)
+ edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
+
return cleaned_count;
}
@@ -564,7 +574,7 @@
* edma_rx_complete()
* Main api called from the poll function to process rx packets.
*/
-static void edma_rx_complete(struct edma_common_info *edma_cinfo,
+static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
int *work_done, int work_to_do, int queue_id,
struct napi_struct *napi)
{
@@ -576,6 +586,7 @@
u16 count = erdr->count, rfd_avail;
u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
+ cleaned_count = erdr->pending_fill;
sw_next_to_clean = erdr->sw_next_to_clean;
edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
@@ -695,6 +706,7 @@
edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
sw_next_to_clean);
cleaned_count = ret_count;
+ erdr->pending_fill = ret_count;
}
continue;
}
@@ -775,6 +787,7 @@
edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
sw_next_to_clean);
cleaned_count = ret_count;
+ erdr->pending_fill = ret_count;
}
/* At this point skb should go to stack */
@@ -796,11 +809,17 @@
/* Refill here in case refill threshold wasn't reached */
if (likely(cleaned_count)) {
ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
- if (ret_count)
- dev_dbg(&pdev->dev, "Not all buffers was reallocated");
+ erdr->pending_fill = ret_count;
+ if (ret_count) {
+ if(net_ratelimit())
+ dev_dbg(&pdev->dev, "Edma not getting memory for descriptors.\n");
+ }
+
edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
erdr->sw_next_to_clean);
}
+
+ return erdr->pending_fill;
}
/* edma_delete_rfs_filter()
@@ -1925,6 +1944,7 @@
ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
if (ret_count)
dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
+ ring->pending_fill = ret_count;
j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
}
@@ -2213,6 +2233,7 @@
u32 shadow_rx_status, shadow_tx_status;
int queue_id;
int i, work_done = 0;
+ u16 rx_pending_fill;
/* Store the Rx/Tx status by ANDing it with
* appropriate CPU RX?TX mask
@@ -2246,11 +2267,17 @@
*/
while (edma_percpu_info->rx_status) {
queue_id = ffs(edma_percpu_info->rx_status) - 1;
- edma_rx_complete(edma_cinfo, &work_done,
+ rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
budget, queue_id, napi);
- if (likely(work_done < budget))
+ if (likely(work_done < budget)) {
+ if (rx_pending_fill) {
+ work_done = budget;
+ break;
+ }
+
edma_percpu_info->rx_status &= ~(1 << queue_id);
+ }
else
break;
}