summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2012-02-04 21:39:21 +0530
committerSimone Willett <swillett@nvidia.com>2012-02-14 10:05:22 -0800
commitf0f11b070e50ec083b308c167cc53782f4f88749 (patch)
tree7fcaa111a8e8b0f57744e88c190c8640b8b823f4
parentacfb26304a9e091f7d72b3979366eb2f962e85bb (diff)
ARM: tegra: dma: code cleanups and run checkpatch
Fixed the checkpatch error and doing code cleanups. Change-Id: Ice966d80e4b7175b72ce218197f6ff5ebd7d8c67 Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com> Reviewed-on: http://git-master/r/79404 Reviewed-by: Automatic_Commit_Validation_User
-rw-r--r--arch/arm/mach-tegra/dma.c159
1 files changed, 85 insertions, 74 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index bb59df967a23..ce79e3aef8b7 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -108,13 +108,13 @@
(TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
static struct clk *dma_clk;
-const unsigned int ahb_addr_wrap_table[8] = {
+static const unsigned int ahb_addr_wrap_table[8] = {
0, 32, 64, 128, 256, 512, 1024, 2048
};
-const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
+static const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
-const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
+static const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
#define TEGRA_DMA_NAME_SIZE 16
struct tegra_dma_channel {
@@ -142,7 +142,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
-static void tegra_dma_stop(struct tegra_dma_channel *ch);
void tegra_dma_flush(struct tegra_dma_channel *ch)
{
@@ -238,7 +237,8 @@ static unsigned int dma_active_count(struct tegra_dma_channel *ch,
if (status & STA_BUSY)
bytes_transferred -= to_transfer;
- /* In continuous transfer mode, DMA only tracks the count of the
+ /*
+ * In continuous transfer mode, DMA only tracks the count of the
* half DMA buffer. So, if the DMA already finished half the DMA
* then add the half buffer to the completed count.
*/
@@ -400,7 +400,8 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
if (start_dma)
tegra_dma_update_hw(ch, req);
- /* Check to see if this request needs to be pushed immediately.
+ /*
+ * Check to see if this request needs to be pushed immediately.
* For continuous single-buffer DMA:
* The first buffer is always in-flight. The 2nd buffer should
* also be in-flight. The 3rd buffer becomes in-flight when the
@@ -597,14 +598,16 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
ch->req_transfer_count = (req->size >> 2) - 1;
- /* One shot mode is always single buffered. Continuous mode could
+ /*
+ * One shot mode is always single buffered. Continuous mode could
* support either.
*/
if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
csr |= CSR_ONCE;
} else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) {
ahb_seq |= AHB_SEQ_DBL_BUF;
- /* We want an interrupt halfway through, then on the
+ /*
+ * We want an interrupt halfway through, then on the
* completion. The double buffer means 2 interrupts
* pass before the DMA HW latches a new AHB_PTR etc.
*/
@@ -729,72 +732,35 @@ static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
}
req = list_entry(ch->list.next, typeof(*req), node);
- if (req) {
- if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
- bool is_dma_ping_complete;
- is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
- & STA_PING_PONG) ? true : false;
- if (req->to_memory)
- is_dma_ping_complete = !is_dma_ping_complete;
- /* Out of sync - Release current buffer */
- if (!is_dma_ping_complete) {
- int bytes_transferred;
-
- bytes_transferred = ch->req_transfer_count;
- bytes_transferred += 1;
- bytes_transferred <<= 3;
- req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
- req->bytes_transferred = bytes_transferred;
- req->status = TEGRA_DMA_REQ_SUCCESS;
- tegra_dma_stop(ch);
-
- if (!list_is_last(&req->node, &ch->list)) {
- next_req = list_entry(req->node.next,
- typeof(*next_req), node);
- tegra_dma_update_hw(ch, next_req);
- }
+ if (!req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return;
+ }
- list_del(&req->node);
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
+ bool is_dma_ping_complete;
+ unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA);
+ is_dma_ping_complete = (status & STA_PING_PONG) ? true : false;
+
+ /* Ping pong status shows in reverse if it is Memory write */
+ if (req->to_memory)
+ is_dma_ping_complete = !is_dma_ping_complete;
+
+ /* Out of sync - Release current buffer */
+ if (!is_dma_ping_complete) {
+ int bytes_transferred;
+ bytes_transferred = ch->req_transfer_count;
+ bytes_transferred += 1;
+ bytes_transferred <<= 3;
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = bytes_transferred;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ tegra_dma_stop(ch);
- /* DMA lock is NOT held when callbak is called */
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- req->complete(req);
- return;
- }
- /* Load the next request into the hardware, if available
- * */
if (!list_is_last(&req->node, &ch->list)) {
next_req = list_entry(req->node.next,
- typeof(*next_req), node);
- tegra_dma_update_hw_partial(ch, next_req);
- }
- req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
- req->bytes_transferred = req->size >> 1;
- /* DMA lock is NOT held when callback is called */
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- if (likely(req->threshold))
- req->threshold(req);
- return;
-
- } else if (req->buffer_status ==
- TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
- /* Callback when the buffer is completely full (i.e on
- * the second interrupt */
-
- req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
- req->bytes_transferred = req->size;
- req->status = TEGRA_DMA_REQ_SUCCESS;
- if (list_is_last(&req->node, &ch->list))
- tegra_dma_stop(ch);
- else {
- /* It may be possible that req came after
- * half dma complete so it need to start
- * immediately */
- next_req = list_entry(req->node.next, typeof(*next_req), node);
- if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
- tegra_dma_stop(ch);
- tegra_dma_update_hw(ch, next_req);
- }
+ typeof(*next_req), node);
+ tegra_dma_update_hw(ch, next_req);
}
list_del(&req->node);
@@ -803,13 +769,57 @@ static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
spin_unlock_irqrestore(&ch->lock, irq_flags);
req->complete(req);
return;
+ }
+ /* Load the next request into the hardware, if available */
+ if (!list_is_last(&req->node, &ch->list)) {
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw_partial(ch, next_req);
+ }
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
+ req->bytes_transferred = req->size >> 1;
+ /* DMA lock is NOT held when callback is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ if (likely(req->threshold))
+ req->threshold(req);
+ return;
+ }
- } else {
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
+ /*
+ * Callback when the buffer is completely full (i.e on
+ * the second interrupt
+ */
+
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = req->size;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list))
tegra_dma_stop(ch);
- /* Dma should be stop much earlier */
- BUG();
+ else {
+ /*
+ * It may be possible that req came after half dma
+ * complete so it need to start immediately
+ */
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ }
}
+
+ list_del(&req->node);
+
+ /* DMA lock is NOT held when callbak is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+ return;
}
+ tegra_dma_stop(ch);
+ /* Dma should be stop much earlier */
+ BUG();
+
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
@@ -844,7 +854,8 @@ static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch)
pr_debug("%s: stop\n", __func__);
tegra_dma_stop(ch);
} else {
- /* The next entry should have already been queued and is now
+ /*
+ * The next entry should have already been queued and is now
* in the middle of xfer. We can then write the next->next one
* if it exists.
*/