diff options
author | Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu> | 2025-07-09 12:28:08 +0300 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2025-07-15 09:56:01 -0600 |
commit | 5d49fa9e56bb0e0b085f5c794ba508349385f6b8 (patch) | |
tree | 06eab03d6ceb9c49ff562cd12ae9cd4c2314879d | |
parent | 189d0b4477077f0445772d7899d8bd0a17e1f67a (diff) |
drivers/net/airoha_eth: fix packet transmission errors
The dma_map_single() function calls one of the functions
* invalidate_dcache_range(),
* flush_dcache_range().
Both of them expect that 'vaddr' is aligned to the ARCH_DMA_MINALIGN
boundary. Unfortunately, RX/TX descriptors are 32-byte long. Thus they
might not be aligned to the ARCH_DMA_MINALIGN boundary. Data flushing
(or invalidating) might do nothing in this case.
The same applies to dma_unmap_single() function.
In the TX path case the issue might prevent package transmission (filled
TX descriptor was not flushed).
To fix an issue a special wrappers for
* dma_map_single(),
* dma_unmap_single()
functions were created. The patch fix flushing/invalidatiog for the
RX path as well.
The bug appears on 32-bit airoha platform, but should be present on
64-bit as well.
The code was tested both on 32-bit and 64-bit airoha boards.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
-rw-r--r-- | drivers/net/airoha_eth.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c index 2aa42b3d727..e18816b2943 100644 --- a/drivers/net/airoha_eth.c +++ b/drivers/net/airoha_eth.c @@ -354,6 +354,27 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) #define airoha_switch_wr(eth, offset, val) \ airoha_wr((eth)->switch_regs, (offset), (val)) +static inline dma_addr_t dma_map_unaligned(void *vaddr, size_t len, + enum dma_data_direction dir) +{ + uintptr_t start, end; + + start = ALIGN_DOWN((uintptr_t)vaddr, ARCH_DMA_MINALIGN); + end = ALIGN((uintptr_t)(vaddr + len), ARCH_DMA_MINALIGN); + + return dma_map_single((void *)start, end - start, dir); +} + +static inline void dma_unmap_unaligned(dma_addr_t addr, size_t len, + enum dma_data_direction dir) +{ + uintptr_t start, end; + + start = ALIGN_DOWN((uintptr_t)addr, ARCH_DMA_MINALIGN); + end = ALIGN((uintptr_t)(addr + len), ARCH_DMA_MINALIGN); + dma_unmap_single(start, end - start, dir); +} + static void airoha_fe_maccr_init(struct airoha_eth *eth) { int p; @@ -391,7 +412,7 @@ static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index, val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN); WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); - dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE); + dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE); } static void airoha_qdma_init_rx_desc(struct airoha_queue *q) @@ -826,14 +847,14 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length) WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); - dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE); + dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE); airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); for (i = 0; i < 100; i++) { - dma_unmap_single(virt_to_phys(desc), sizeof(*desc), - DMA_FROM_DEVICE); + dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc), + DMA_FROM_DEVICE); if (desc->ctrl & QDMA_DESC_DONE_MASK) break; @@ -864,8 +885,8 @@ static int airoha_eth_recv(struct udevice *dev, int flags, uchar **packetp) q = &qdma->q_rx[qid]; desc = &q->desc[q->head]; - dma_unmap_single(virt_to_phys(desc), sizeof(*desc), - DMA_FROM_DEVICE); + dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc), + DMA_FROM_DEVICE); if (!(desc->ctrl & QDMA_DESC_DONE_MASK)) return -EAGAIN; |