diff options
| author | Jason Gunthorpe <jgg@nvidia.com> | 2021-06-22 14:42:52 -0300 |
|---|---|---|
| committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-06-22 14:43:51 -0300 |
| commit | fdcebbc2ac2cfd82a18857b0c85067fa7e8f5233 (patch) | |
| tree | e21b6d503468282cd9010d24b35327dc8d772980 /drivers/dma/mediatek/mtk-uart-apdma.c | |
| parent | 6d33cabf2baf304730d01a942095416b3a8329ab (diff) | |
| parent | 13311e74253fe64329390df80bed3f07314ddd61 (diff) | |
Merge tag 'v5.13-rc7' into rdma.git for-next
Linux 5.13-rc7
Needed for dependencies in following patches. Merge conflict in rxe_cmop.c
resolved by compining both patches.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/dma/mediatek/mtk-uart-apdma.c')
| -rw-r--r-- | drivers/dma/mediatek/mtk-uart-apdma.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 27c07350971d..375e7e647df6 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) { - struct dma_chan *chan = vd->tx.chan; - struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); - - kfree(c->desc); + kfree(container_of(vd, struct mtk_uart_apdma_desc, vd)); } static void mtk_uart_apdma_start_tx(struct mtk_chan *c) @@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c) static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) { - struct mtk_uart_apdma_desc *d = c->desc; - mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); - - list_del(&d->vd.node); - vchan_cookie_complete(&d->vd); } static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) @@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) c->rx_status = d->avail_len - cnt; mtk_uart_apdma_write(c, VFF_RPT, wg); +} - list_del(&d->vd.node); - vchan_cookie_complete(&d->vd); +static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c) +{ + struct mtk_uart_apdma_desc *d = c->desc; + + if (d) { + list_del(&d->vd.node); + vchan_cookie_complete(&d->vd); + c->desc = NULL; + } } static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) @@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) mtk_uart_apdma_rx_handler(c); else if (c->dir == DMA_MEM_TO_DEV) mtk_uart_apdma_tx_handler(c); + mtk_uart_apdma_chan_complete_handler(c); spin_unlock_irqrestore(&c->vc.lock, flags); return IRQ_HANDLED; @@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg return NULL; /* Now allocate and setup the descriptor */ - d = kzalloc(sizeof(*d), GFP_ATOMIC); + d = kzalloc(sizeof(*d), GFP_NOWAIT); if (!d) return NULL; @@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan) unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); - if (vchan_issue_pending(&c->vc)) { + if (vchan_issue_pending(&c->vc) && !c->desc) { vd = vchan_next_desc(&c->vc); c->desc = to_mtk_uart_apdma_desc(&vd->tx); |
