diff options
author | Laxman Dewangan <ldewangan@nvidia.com> | 2010-11-17 18:00:48 +0530 |
---|---|---|
committer | Varun Colbert <vcolbert@nvidia.com> | 2010-11-24 15:17:05 -0800 |
commit | f2b1e18bc44cd6bf8e76d00a5f0fcb0d2b2e0995 (patch) | |
tree | 4dee470b214f0cd1dae1b8d1f42fa86f4d4d0756 | |
parent | 164d4221875009fe179dda6679709ed8f7f9537e (diff) |
[arm/tegra]dma: protecting the dma transfer count update
When client asked for the dma transfer count, then the dma
status register read and update is protected by the spin lock
so that it should not be updated incorrectly.
If client asked for transfer count between dma interrupt occurs
and dma isr thread executed and repeat mode is selected then
updating the transfer count at the time of calculating transfer
count.
bug 732197
bug 731403
Change-Id: Id112d5bea9c2f4a1fc4d329d5ce04eed477f1dfa
Reviewed-on: http://git-master/r/11019
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
Tested-by: Laxman Dewangan <ldewangan@nvidia.com>
Reviewed-by: Udaykumar Rameshchan Raval <uraval@nvidia.com>
Tested-by: Pradeep Goudagunta <pgoudagunta@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-by: Varun Colbert <vcolbert@nvidia.com>
Tested-by: Varun Colbert <vcolbert@nvidia.com>
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 77 |
1 files changed, 57 insertions, 20 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index 1cfe13912116..01d2c6145fee 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c @@ -121,6 +121,7 @@ struct tegra_dma_channel { void __iomem *addr; int mode; int irq; + bool is_int_service_due; /* Register shadow */ u32 csr; @@ -204,7 +205,6 @@ void tegra_dma_flush(struct tegra_dma_channel *ch) } EXPORT_SYMBOL(tegra_dma_flush); - /* should be called with the channel lock held */ static unsigned int dma_active_count(struct tegra_dma_channel *ch, struct tegra_dma_req *req, unsigned int status) @@ -228,16 +228,35 @@ static unsigned int dma_active_count(struct tegra_dma_channel *ch, * half DMA buffer. So, if the DMA already finished half the DMA * then add the half buffer to the completed count. */ - if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS) { - if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) - bytes_transferred += req_transfer_count; - } + if (!(ch->mode & TEGRA_DMA_MODE_CONTINUOUS)) + goto skip_handle_cont; - if (status & STA_ISE_EOC) - bytes_transferred += req_transfer_count; + if (!((status & STA_ISE_EOC) || (ch->is_int_service_due))) + goto skip_irq_handle; + pr_debug("Dma transfer adjusted out of isr is_int_service_due %d\n", + ch->is_int_service_due); + if (req->is_repeat_req) { + if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { + pr_debug("Int hit when buffer status empty\n"); + req->bytes_transferred = req_transfer_count << 2; + req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; + } else if (req->buffer_status == + TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { + pr_debug("Int hit when buffer status half full\n"); + pr_debug("The curr byte transferred 0x%x status 0x%x\n", + bytes_transferred, status); + req->bytes_transferred = 0; + req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY; + } else { + BUG(); + } + ch->is_int_service_due = false; + } +skip_irq_handle: + bytes_transferred += req->bytes_transferred >> 2; +skip_handle_cont: bytes_transferred *= 4; - return bytes_transferred; } @@ -281,7 +300,7 @@ static unsigned int get_channel_status(struct tegra_dma_channel *ch, } unsigned int tegra_dma_transferred_req(struct tegra_dma_channel *ch, - struct tegra_dma_req *req) + struct tegra_dma_req *req) { unsigned long irq_flags; unsigned int bytes_transferred; @@ -536,6 +555,7 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) ch = &dma_channels[channel]; ch->mode = mode; + ch->is_int_service_due = false; return ch; } EXPORT_SYMBOL(tegra_dma_allocate_channel); @@ -770,12 +790,19 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) struct tegra_dma_req *req; struct tegra_dma_req *next_req; unsigned long irq_flags; + int bytes_transferred; spin_lock_irqsave(&ch->lock, irq_flags); if (list_empty(&ch->list)) { spin_unlock_irqrestore(&ch->lock, irq_flags); return; } + if (!ch->is_int_service_due) { + pr_debug("This int already served, ignoring...\n"); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + ch->is_int_service_due = false; req = list_entry(ch->list.next, typeof(*req), node); if (req) { @@ -787,10 +814,9 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) is_dma_ping_complete = !is_dma_ping_complete; /* Out of sync - Release current buffer */ if( !is_dma_ping_complete ) { - int bytes_transferred; - bytes_transferred = - (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; + (ch->csr & CSR_WCOUNT_MASK) >> + CSR_WCOUNT_SHIFT; bytes_transferred += 1; bytes_transferred <<= 3; req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; @@ -815,6 +841,11 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) } /* Load the next request into the hardware, if available * */ + bytes_transferred = + (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; + bytes_transferred += 1; + bytes_transferred <<= 2; + req->bytes_transferred = bytes_transferred; if (!req->is_repeat_req) { if (!list_is_last(&req->node, &ch->list)) { next_req = list_entry(req->node.next, @@ -837,15 +868,14 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { /* Callback when the buffer is completely full (i.e on * the second interrupt */ - int bytes_transferred; bytes_transferred = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; bytes_transferred += 1; - bytes_transferred <<= 3; + bytes_transferred <<= 2; req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; - req->bytes_transferred = bytes_transferred; + req->bytes_transferred += bytes_transferred; req->status = TEGRA_DMA_REQ_SUCCESS; if (!req->is_repeat_req) { if (list_is_last(&req->node, &ch->list)) { @@ -868,6 +898,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) spin_unlock_irqrestore(&ch->lock, irq_flags); req->complete(req); } else { + req->bytes_transferred = 0; req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY; req->status = TEGRA_DMA_REQ_INFLIGHT; spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -889,15 +920,21 @@ static irqreturn_t dma_isr(int irq, void *data) { struct tegra_dma_channel *ch = data; unsigned long status; + unsigned long irq_flags; + unsigned long ret; + spin_lock_irqsave(&ch->lock, irq_flags); status = readl(ch->addr + APB_DMA_CHAN_STA); - if (status & STA_ISE_EOC) + if (status & STA_ISE_EOC) { writel(status, ch->addr + APB_DMA_CHAN_STA); - else { + ch->is_int_service_due = true; + ret = IRQ_WAKE_THREAD; + } else { pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); - return IRQ_HANDLED; + ret = IRQ_HANDLED; } - return IRQ_WAKE_THREAD; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return ret; } static irqreturn_t dma_thread_fn(int irq, void *data) @@ -924,7 +961,7 @@ int __init tegra_dma_init(void) writel(GEN_ENABLE, addr + APB_DMA_GEN); writel(0, addr + APB_DMA_CNTRL); writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), - addr + APB_DMA_IRQ_MASK_SET); + addr + APB_DMA_IRQ_MASK_SET); memset(channel_usage, 0, sizeof(channel_usage)); memset(dma_channels, 0, sizeof(dma_channels)); |