summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/dma.c
diff options
context:
space:
mode:
authorJay Cheng <jacheng@nvidia.com>2010-10-15 23:55:02 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:36:25 -0800
commit2d6c6e57c764b86d7c073e93b8b87c73187ece0d (patch)
tree1f607ff2a614f461191317cac949f1fa61869d65 /arch/arm/mach-tegra/dma.c
parent3660374093b71f91a701e5a02cc6d767b5526cc8 (diff)
[ARM/tegra] dma: add transfer count query. Checking interrupt pending status.
Stopping Dma after last req transfer. add an API to return the completed transfer count of a pending, active or finished DMA request originally fixed by Gary King <gking@nvidia.com> It is observed that the dma interrupt has the lower priority then its client interupt priority. When client's isr calls dma get transfer, the dma status has not been upated as dma isr have not been served yet. So before reading the status, explicitly checking the interrupt status and handling accordingly. The another issue which is observed is that if dma has transferred the data of amount = requested -4 and if it moves to invalid requestor before stopping then status got reset and tarnsfered bytes becomes 0. This seems the apb dma hw behavior. Following is the suggestion to overcome this issue: - Disable global enable bit. - Read status. - Stop dma. - Enable global status bit. Added this workaround and it worked fine. originally fixed by Laxman Dewangan <ldewangan@nvidia.com> In continous mode, dma should stop after last transfer completed and if there is no more req pending. If there is pending req then it should check whether it has updated in hw for next transfer or not and if it has not started then stop dma and start new req immediatley. originally fixed by Laxman Dewangan <ldewangan@nvidia.com> Change-Id: I49c97c96eacdf4060de6b21cec0e71d940d33f00
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r--arch/arm/mach-tegra/dma.c123
1 files changed, 74 insertions, 49 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 71b7ab17fdd6..57727712da87 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -51,7 +51,6 @@
#define CSR_FLOW (1<<21)
#define CSR_REQ_SEL_SHIFT 16
#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
-#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
#define CSR_WCOUNT_SHIFT 2
#define CSR_WCOUNT_MASK 0xFFFC
@@ -129,6 +128,7 @@ struct tegra_dma_channel {
static bool tegra_dma_initialized;
static DEFINE_MUTEX(tegra_dma_lock);
+static DEFINE_SPINLOCK(enable_lock);
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
@@ -176,36 +176,68 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
int tegra_dma_cancel(struct tegra_dma_channel *ch)
{
- u32 csr;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
while (!list_empty(&ch->list))
list_del(ch->list.next);
- csr = readl(ch->addr + APB_DMA_CHAN_CSR);
- csr &= ~CSR_REQ_SEL_MASK;
- csr |= CSR_REQ_SEL_INVALID;
- writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
tegra_dma_stop(ch);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return 0;
}
+/* should be called with the channel lock held */
+static unsigned int dma_active_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, unsigned int status)
+{
+ unsigned int to_transfer;
+ unsigned int req_transfer_count;
+
+ unsigned int bytes_transferred;
+
+ to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
+ req_transfer_count = ch->req_transfer_count;
+ req_transfer_count += 1;
+ to_transfer += 1;
+
+ bytes_transferred = req_transfer_count;
+
+ if (status & STA_BUSY)
+ bytes_transferred -= to_transfer;
+
+ /* In continuous transfer mode, DMA only tracks the count of the
+ * half DMA buffer. So, if the DMA already finished half the DMA
+ * then add the half buffer to the completed count.
+ */
+ if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
+ bytes_transferred += req_transfer_count;
+
+ if (status & STA_ISE_EOC)
+ bytes_transferred += req_transfer_count;
+
+ bytes_transferred *= 4;
+
+ return bytes_transferred;
+}
+
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *_req)
{
- unsigned int csr;
- unsigned int status;
struct tegra_dma_req *req = NULL;
int found = 0;
+ unsigned int status;
unsigned long irq_flags;
- int to_transfer;
- int req_transfer_count;
+ int stop = 0;
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
+ stop = 1;
+
list_for_each_entry(req, &ch->list, node) {
if (req == _req) {
list_del(&req->node);
@@ -218,47 +250,27 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
return 0;
}
+ if (!stop)
+ goto skip_status;
+
/* STOP the DMA and get the transfer count.
* Getting the transfer count is tricky.
- * - Change the source selector to invalid to stop the DMA from
- * FIFO to memory.
- * - Read the status register to know the number of pending
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number of pending
* bytes to be transferred.
- * - Finally stop or program the DMA to the next buffer in the
- * list.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
*/
- csr = readl(ch->addr + APB_DMA_CHAN_CSR);
- csr &= ~CSR_REQ_SEL_MASK;
- csr |= CSR_REQ_SEL_INVALID;
- writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
- /* Get the transfer count */
+ spin_lock(&enable_lock);
+ writel(0, addr + APB_DMA_GEN);
+ udelay(20);
status = readl(ch->addr + APB_DMA_CHAN_STA);
- to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
- req_transfer_count = ch->req_transfer_count;
- req_transfer_count += 1;
- to_transfer += 1;
-
- req->bytes_transferred = req_transfer_count;
-
- if (status & STA_BUSY)
- req->bytes_transferred -= to_transfer;
-
- /* In continuous transfer mode, DMA only tracks the count of the
- * half DMA buffer. So, if the DMA already finished half the DMA
- * then add the half buffer to the completed count.
- *
- * FIXME: There can be a race here. What if the req to
- * dequue happens at the same time as the DMA just moved to
- * the new buffer and SW didn't yet received the interrupt?
- */
- if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
- if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
- req->bytes_transferred += req_transfer_count;
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock(&enable_lock);
- req->bytes_transferred *= 4;
+ req->bytes_transferred = dma_active_count(ch, req, status);
- tegra_dma_stop(ch);
if (!list_empty(&ch->list)) {
/* if the list is not empty, queue the next request */
struct tegra_dma_req *next_req;
@@ -266,6 +278,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
}
+skip_status:
req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -559,6 +572,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
static void handle_continuous_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ struct tegra_dma_req *next_req;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -588,8 +602,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
tegra_dma_stop(ch);
if (!list_is_last(&req->node, &ch->list)) {
- struct tegra_dma_req *next_req;
-
next_req = list_entry(req->node.next,
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
@@ -605,8 +617,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
/* Load the next request into the hardware, if available
* */
if (!list_is_last(&req->node, &ch->list)) {
- struct tegra_dma_req *next_req;
-
next_req = list_entry(req->node.next,
typeof(*next_req), node);
tegra_dma_update_hw_partial(ch, next_req);
@@ -632,6 +642,19 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
req->bytes_transferred = bytes_transferred;
req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list))
+ tegra_dma_stop(ch);
+ else {
+ /* It may be possible that req came after
+ * half dma complete so it need to start
+ * immediately */
+ next_req = list_entry(req->node.next, typeof(*next_req), node);
+ if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ }
+ }
+
list_del(&req->node);
/* DMA lock is NOT held when callbak is called */
@@ -640,6 +663,8 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
return;
} else {
+ tegra_dma_stop(ch);
+ /* Dma should be stop much earlier */
BUG();
}
}