summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/dma.c
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2011-02-22 14:46:55 +0530
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:41:57 -0800
commitfa6102941fe0e7a6fe692e2a950390571ac6a7f9 (patch)
treeb5ef07d34960d385ee77a94b2f63e8cbb8c90888 /arch/arm/mach-tegra/dma.c
parentd575ebe72661625d86b25c8c7e49f809fa3ae064 (diff)
arm: tegra: dma: Api for getting transfer count
Adding api for getting the amount of data trsnaferred by dma. Original-Change-Id: I348b8a2f0f855165fb1bf74f0d9013faa97056e7 Reviewed-on: http://git-master/r/20377 Tested-by: Sumit Bhattacharya <sumitb@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Rebase-Id: Raf91b00545ff2e61f14c7136972d807c6afb96ea
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r--arch/arm/mach-tegra/dma.c83
1 files changed, 65 insertions, 18 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 95dd9159420d..eb1c7cab01db 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -191,6 +191,39 @@ int tegra_dma_cancel(struct tegra_dma_channel *ch)
return 0;
}
+static unsigned int get_channel_status(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
+{
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ unsigned int status;
+
+ if (is_stop_dma) {
+ /* STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number
+ * of pending bytes to be transfered.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
+ */
+ spin_lock(&enable_lock);
+ writel(0, addr + APB_DMA_GEN);
+ udelay(20);
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock(&enable_lock);
+ if (status & STA_ISE_EOC) {
+ pr_err("Got Dma Int here clearing");
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+ }
+ req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
+ } else {
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ }
+ return status;
+}
+
/* should be called with the channel lock held */
static unsigned int dma_active_count(struct tegra_dma_channel *ch,
struct tegra_dma_req *req, unsigned int status)
@@ -234,7 +267,6 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
unsigned int status;
unsigned long irq_flags;
int stop = 0;
- void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -256,22 +288,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
if (!stop)
goto skip_status;
- /* STOP the DMA and get the transfer count.
- * Getting the transfer count is tricky.
- * - Globally disable DMA on all channels
- * - Read the channel's status register to know the number of pending
- * bytes to be transferred.
- * - Stop the dma channel
- * - Globally re-enable DMA to resume other transfers
- */
- spin_lock(&enable_lock);
- writel(0, addr + APB_DMA_GEN);
- udelay(20);
- status = readl(ch->addr + APB_DMA_CHAN_STA);
- tegra_dma_stop(ch);
- writel(GEN_ENABLE, addr + APB_DMA_GEN);
- spin_unlock(&enable_lock);
-
+ status = get_channel_status(ch, req, true);
req->bytes_transferred = dma_active_count(ch, req, status);
if (!list_empty(&ch->list)) {
@@ -324,6 +341,36 @@ bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
return false;
}
EXPORT_SYMBOL(tegra_dma_is_req_inflight);
+int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
+{
+ unsigned int status;
+ unsigned long irq_flags;
+ int bytes_transferred = 0;
+
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_debug("The dma request is not the head req\n");
+ return req->bytes_transferred;
+ }
+
+ if (req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_debug("The dma request is not running\n");
+ return req->bytes_transferred;
+ }
+
+ status = get_channel_status(ch, req, is_stop_dma);
+ bytes_transferred = dma_active_count(ch, req, status);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return bytes_transferred;
+}
+EXPORT_SYMBOL(tegra_dma_get_transfer_count);
int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
@@ -681,7 +728,7 @@ static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
tegra_dma_update_hw_partial(ch, next_req);
}
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
- req->status = TEGRA_DMA_REQ_SUCCESS;
+ req->bytes_transferred = req->size >> 1;
/* DMA lock is NOT held when callback is called */
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (likely(req->threshold))