diff options
author | Vinod Koul <vinod.koul@intel.com> | 2018-01-31 13:51:26 +0530 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2018-01-31 13:51:26 +0530 |
commit | b8e1a963593551431ec0b3ac7bfce50f4a32ee35 (patch) | |
tree | 0552e21b5b997e7f87080660d62e25b189f47c53 /drivers/dma | |
parent | cea0dd4d96e548b720657a5e38ef291be6d4fff3 (diff) | |
parent | 2c6929d2ea708fbdcab6e6721c9a57b54a7e5a6e (diff) |
Merge branch 'topic/virt-dma' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 11 | ||||
-rw-r--r-- | drivers/dma/bcm2835-dma.c | 10 | ||||
-rw-r--r-- | drivers/dma/dma-jz4780.c | 10 | ||||
-rw-r--r-- | drivers/dma/edma.c | 7 | ||||
-rw-r--r-- | drivers/dma/img-mdc-dma.c | 17 | ||||
-rw-r--r-- | drivers/dma/k3dma.c | 10 | ||||
-rw-r--r-- | drivers/dma/omap-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/s3c24xx-dma.c | 11 | ||||
-rw-r--r-- | drivers/dma/virt-dma.c | 5 | ||||
-rw-r--r-- | drivers/dma/virt-dma.h | 44 |
10 files changed, 107 insertions, 20 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index b52b0d55247e..97483df1f82e 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan) } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_desc_free(&plchan->at->vd); + vchan_terminate_vdesc(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan) return 0; } +static void pl08x_synchronize(struct dma_chan *chan) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + + vchan_synchronize(&plchan->vc); +} + static int pl08x_pause(struct dma_chan *chan) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); @@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.device_pause = pl08x_pause; pl08x->memcpy.device_resume = pl08x_resume; pl08x->memcpy.device_terminate_all = pl08x_terminate_all; + pl08x->memcpy.device_synchronize = pl08x_synchronize; pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); @@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.device_pause = pl08x_pause; pl08x->slave.device_resume = pl08x_resume; pl08x->slave.device_terminate_all = pl08x_terminate_all; + pl08x->slave.device_synchronize = pl08x_synchronize; pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->slave.directions = diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6204cc32d09c..847f84a41a69 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) * c->desc is NULL and exit.) */ if (c->desc) { - bcm2835_dma_desc_free(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; bcm2835_dma_abort(c->chan_base); @@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) return 0; } +static void bcm2835_dma_synchronize(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + vchan_synchronize(&c->vc); +} + static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq, unsigned int irq_flags) { @@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev) od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; od->ddev.device_config = bcm2835_dma_slave_config; od->ddev.device_terminate_all = bcm2835_dma_terminate_all; + od->ddev.device_synchronize = bcm2835_dma_synchronize; od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 7373b7a555ec..85820a2d69d4 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan) /* Clear the DMA status and stop the transfer. */ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); if (jzchan->desc) { - jz4780_dma_desc_free(&jzchan->desc->vdesc); + vchan_terminate_vdesc(&jzchan->desc->vdesc); jzchan->desc = NULL; } @@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan) return 0; } +static void jz4780_dma_synchronize(struct dma_chan *chan) +{ + struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); + + vchan_synchronize(&jzchan->vchan); +} + static int jz4780_dma_config(struct dma_chan *chan, struct dma_slave_config *config) { @@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; dd->device_config = jz4780_dma_config; dd->device_terminate_all = jz4780_dma_terminate_all; + dd->device_synchronize = jz4780_dma_synchronize; dd->device_tx_status = jz4780_dma_tx_status; dd->device_issue_pending = jz4780_dma_issue_pending; dd->src_addr_widths = JZ_DMA_BUSWIDTHS; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 9364a3ed345a..948df1ab5f1a 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan) /* Move the cyclic channel back to default queue */ if (!echan->tc && echan->edesc->cyclic) edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); - /* - * free the running request descriptor - * since it is not in any of the vdesc lists - */ - edma_desc_free(&echan->edesc->vdesc); + + vchan_terminate_vdesc(&echan->edesc->vdesc); echan->edesc = NULL; } diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 0391f930aecc..25cec9c243e1 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan) static int mdc_terminate_all(struct dma_chan *chan) { struct mdc_chan *mchan = to_mdc_chan(chan); - struct mdc_tx_desc *mdesc; unsigned long flags; LIST_HEAD(head); @@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan) mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, MDC_CONTROL_AND_STATUS); - mdesc = mchan->desc; - mchan->desc = NULL; + if (mchan->desc) { + vchan_terminate_vdesc(&mchan->desc->vd); + mchan->desc = NULL; + } vchan_get_all_descriptors(&mchan->vc, &head); mdc_get_new_events(mchan); spin_unlock_irqrestore(&mchan->vc.lock, flags); - if (mdesc) - mdc_desc_free(&mdesc->vd); vchan_dma_desc_free_list(&mchan->vc, &head); return 0; } +static void mdc_synchronize(struct dma_chan *chan) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + + vchan_synchronize(&mchan->vc); +} + static int mdc_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { @@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev) mdma->dma_dev.device_tx_status = mdc_tx_status; mdma->dma_dev.device_issue_pending = mdc_issue_pending; mdma->dma_dev.device_terminate_all = mdc_terminate_all; + mdma->dma_dev.device_synchronize = mdc_synchronize; mdma->dma_dev.device_config = mdc_slave_config; mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 01d2a750a621..26b67455208f 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan) c->phy = NULL; p->vchan = NULL; if (p->ds_run) { - k3_dma_free_desc(&p->ds_run->vd); + vchan_terminate_vdesc(&p->ds_run->vd); p->ds_run = NULL; } p->ds_done = NULL; @@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan) return 0; } +static void k3_dma_synchronize(struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + + vchan_synchronize(&c->vc); +} + static int k3_dma_transfer_pause(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); @@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_pause = k3_dma_transfer_pause; d->slave.device_resume = k3_dma_transfer_resume; d->slave.device_terminate_all = k3_dma_terminate_all; + d->slave.device_synchronize = k3_dma_synchronize; d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; /* init virtual channel */ diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index f6dd849159d8..d21c19822feb 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan) * c->desc is NULL and exit.) */ if (c->desc) { - omap_dma_desc_free(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; /* Avoid stopping the dma twice */ if (!c->paused) diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index f04c4702d98b..cd92d696bcf9 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) /* Dequeue current job */ if (s3cchan->at) { - s3c24xx_dma_desc_free(&s3cchan->at->vd); + vchan_terminate_vdesc(&s3cchan->at->vd); s3cchan->at = NULL; } @@ -744,6 +744,13 @@ unlock: return ret; } +static void s3c24xx_dma_synchronize(struct dma_chan *chan) +{ + struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); + + vchan_synchronize(&s3cchan->vc); +} + static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) { /* Ensure all queued descriptors are freed */ @@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; + s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize; /* Initialize slave engine for SoC internal dedicated peripherals */ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); @@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; + s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize; s3cdma->slave.filter.map = pdata->slave_map; s3cdma->slave.filter.mapcnt = pdata->slavecnt; s3cdma->slave.filter.fn = s3c24xx_dma_filter; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 545e97279083..88ad8ed2a8d6 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); - if (dmaengine_desc_test_reuse(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + vchan_vdesc_fini(vd); dmaengine_desc_callback_invoke(&cb, NULL); } diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 3f776a46a29c..b09b75ab0751 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -35,6 +35,7 @@ struct virt_dma_chan { struct list_head desc_completed; struct virt_dma_desc *cyclic; + struct virt_dma_desc *vd_terminated; }; static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) @@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) } /** + * vchan_vdesc_fini - Free or reuse a descriptor + * @vd: virtual descriptor to free/reuse + */ +static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + if (dmaengine_desc_test_reuse(&vd->tx)) + list_add(&vd->node, &vc->desc_allocated); + else + vc->desc_free(vd); +} + +/** * vchan_cyclic_callback - report the completion of a period * @vd: virtual descriptor */ @@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) } /** + * vchan_terminate_vdesc - Disable pending cyclic callback + * @vd: virtual descriptor to be terminated + * + * vc.lock must be held by caller + */ +static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + /* free up stuck descriptor */ + if (vc->vd_terminated) + vchan_vdesc_fini(vc->vd_terminated); + + vc->vd_terminated = vd; + if (vc->cyclic == vd) + vc->cyclic = NULL; +} + +/** * vchan_next_desc - peek at the next descriptor to be processed * @vc: virtual channel to obtain descriptor from * @@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) * Makes sure that all scheduled or active callbacks have finished running. For * proper operation the caller has to ensure that no new callbacks are scheduled * after the invocation of this function started. + * Free up the terminated cyclic descriptor to prevent memory leakage. */ static inline void vchan_synchronize(struct virt_dma_chan *vc) { + unsigned long flags; + tasklet_kill(&vc->task); + + spin_lock_irqsave(&vc->lock, flags); + if (vc->vd_terminated) { + vchan_vdesc_fini(vc->vd_terminated); + vc->vd_terminated = NULL; + } + spin_unlock_irqrestore(&vc->lock, flags); } #endif |