summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/airoha_eth.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
index e18816b2943..477a132fb1f 100644
--- a/drivers/net/airoha_eth.c
+++ b/drivers/net/airoha_eth.c
@@ -392,13 +392,14 @@ static int airoha_fe_init(struct airoha_eth *eth)
return 0;
}
-static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
- uchar *rx_packet)
+static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index)
{
struct airoha_qdma_desc *desc;
+ uchar *rx_packet;
u32 val;
desc = &q->desc[index];
+ rx_packet = net_rx_packets[index];
index = (index + 1) % q->ndesc;
dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
@@ -420,7 +421,7 @@ static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
int i;
for (i = 0; i < q->ndesc; i++)
- airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
+ airoha_qdma_reset_rx_desc(q, i);
}
static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
@@ -444,10 +445,14 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
RX_RING_SIZE_MASK,
FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+ /*
+ * See arht_eth_free_pkt() for the reasons used to fill
+ * REG_RX_CPU_IDX(qid) register.
+ */
airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
FIELD_PREP(RX_RING_THR_MASK, 0));
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1));
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 3));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
@@ -906,6 +911,7 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
struct airoha_qdma *qdma = &eth->qdma[0];
struct airoha_queue *q;
int qid;
+ u16 prev, pprev;
if (!packet)
return 0;
@@ -913,13 +919,24 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
qid = 0;
q = &qdma->q_rx[qid];
- dma_map_single(packet, length, DMA_TO_DEVICE);
-
- airoha_qdma_reset_rx_desc(q, q->head, packet);
+ /*
+ * Due to cpu cache issue the airoha_qdma_reset_rx_desc() function
+ * will always touch 2 descriptors:
+ * - if current descriptor is even, then the previous and the one
+ * before previous descriptors will be touched (previous cacheline)
+ * - if current descriptor is odd, then only current and previous
+ * descriptors will be touched (current cacheline)
+ *
+ * Thus, to prevent possible destroying of rx queue, only (q->ndesc - 2)
+ * descriptors might be used for packet receiving.
+ */
+ prev = (q->head + q->ndesc - 1) % q->ndesc;
+ pprev = (q->head + q->ndesc - 2) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
+ airoha_qdma_reset_rx_desc(q, prev);
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
- q->head = (q->head + 1) % q->ndesc;
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, pprev));
return 0;
}