summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary King <gking@nvidia.com>2009-12-16 15:53:01 -0800
committerGary King <gking@nvidia.com>2009-12-16 19:27:46 -0800
commit7f3aed6eb9e2040475df4afacedd5bc1ffca5f3b (patch)
tree2635b384b014656776a9f7ed0de38cfd2e169dd6
parent83c28f57eab1725bf4405d7f2002bbfbc9d65346 (diff)
tegra: enhance system DMA API with per-request dequeing
remove some of the global status APIs from the public API set, add a per- request dequeue instead. move bytes_transferred out of the completion function parameters and into the request structure.
-rw-r--r--arch/arm/mach-tegra/dma.c197
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h19
-rw-r--r--arch/arm/mach-tegra/init_common.c13
3 files changed, 183 insertions, 46 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 6629f9cf45af..3d6ed3e3c56e 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -74,10 +74,12 @@ struct tegra_dma_channel {
int id;
spinlock_t lock;
char *name;
- void __iomem *addr;
+ volatile void __iomem *addr;
unsigned long phys_addr;
int mode;
+ int odd_interrupt;
+
/* Register shadow */
unsigned long csr;
unsigned long ahb_seq;
@@ -88,9 +90,9 @@ struct tegra_dma_channel {
#define NV_DMA_MAX_CHANNELS 32
-/* We are only allowed to use the channels in the following range, Others are
+/* We are only allowed to use the channels in the following range, others are
* used by different code base */
-#define NV_DMA_CHANNEL_MIN 11
+#define NV_DMA_CHANNEL_MIN 12
#define NV_DMA_CHANNEL_MAX 12
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
@@ -98,17 +100,33 @@ static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
+static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
+static void tegra_dma_stop(struct tegra_dma_channel *ch);
+
+void tegra_dma_flush(int channel)
+{
+
+}
+EXPORT_SYMBOL(tegra_dma_flush);
-void tegra_dma_stop(int channel)
+void tegra_dma_dequeue(int channel)
{
struct tegra_dma_channel *ch = &dma_channels[channel];
- unsigned int csr;
- unsigned int status;
+ struct tegra_dma_req *req;
+ req = list_entry(ch->list.next, typeof(*req), list);
- csr = readl(ch->addr + APBDMACHAN_CHANNEL_0_AHB_PTR_0);
+ tegra_dma_dequeue_req(channel, req);
+ return;
+}
- /* Disable interrupt, disable enable and the clear any interrupt */
+void tegra_dma_stop(struct tegra_dma_channel *ch)
+{
+ unsigned int csr;
+ unsigned int status;
+ csr = ch->csr;
csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, IE_EOC,
DISABLE, csr);
writel(csr, ch->addr + APBDMACHAN_CHANNEL_0_CSR_0);
@@ -120,7 +138,78 @@ void tegra_dma_stop(int channel)
if (status & NV_DRF_DEF(APBDMACHAN_CHANNEL_0, STA, ISE_EOC, INTR))
writel(status, ch->addr + APBDMACHAN_CHANNEL_0_STA_0);
}
-EXPORT_SYMBOL(tegra_dma_stop);
+
+int tegra_dma_dequeue_req(int channel, struct tegra_dma_req *_req)
+{
+ struct tegra_dma_channel *ch = &dma_channels[channel];
+ unsigned int csr;
+ unsigned int status;
+ struct tegra_dma_req *req = NULL;
+ int found = 0;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ list_for_each_entry (req, &ch->list, list) {
+ if (req == _req) {
+ list_del(&req->list);
+ found = 1;
+ break;
+ }
+ }
+ BUG_ON(found==0);
+
+ if (found) {
+ int to_transfer;
+ int req_transfer_count;
+
+ /* STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Change the source selector to invalid to stop the DMA from
+ * FIFO to memory.
+ * - Read the status register to knoe the number of pending
+ * bytes to be transfered.
+ * - Finally stop or program the DMA to the next buffer in the
+ * list.
+ */
+ csr = ch->csr;
+ csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR,
+ REQ_SEL, NA31, csr);
+ /* Set the enable as that is not shadowed */
+ csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR,
+ ENB, ENABLE, csr);
+ writel(csr, ch->addr + APBDMACHAN_CHANNEL_0_CSR_0);
+
+ /* Get the transfer count */
+ status = readl(ch->addr + APBDMACHAN_CHANNEL_0_STA_0);
+ to_transfer = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, STA,
+ COUNT, status);
+ req_transfer_count = NV_DRF_VAL(APBDMACHAN_CHANNEL_0,
+ CSR, WCOUNT, ch->csr);
+
+ req->bytes_transferred = req_transfer_count - to_transfer;
+ req->bytes_transferred *= 4;
+
+ tegra_dma_stop(ch);
+ if (!list_empty(&ch->list)) {
+ /* if the list is not empty, queue the next request */
+ struct tegra_dma_req *next_req;
+ next_req = list_entry(ch->list.next,
+ typeof(*next_req), list);
+ tegra_dma_update_hw(ch, next_req);
+ }
+ req->status = -TEGRA_DMA_REQ_ERROR_ABOTRED;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ req->complete(req, req->status);
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ if (found)
+ return 0;
+ else
+ return -ENOENT;
+}
+EXPORT_SYMBOL(tegra_dma_dequeue_req);
int tegra_dma_is_empty(int channel)
{
@@ -153,6 +242,8 @@ int tegra_dma_enqueue_req(int channel, struct tegra_dma_req *req)
spin_lock_irqsave(&ch->lock, irq_flags);
+ req->bytes_transferred = 0;
+ req->status = 0;
if (list_empty(&ch->list))
start_dma = 1;
@@ -167,12 +258,6 @@ int tegra_dma_enqueue_req(int channel, struct tegra_dma_req *req)
}
EXPORT_SYMBOL(tegra_dma_enqueue_req);
-void tegra_dma_flush(int channel)
-{
-
-}
-EXPORT_SYMBOL(tegra_dma_flush);
-
int tegra_dma_allocate_channel(int mode)
{
int channel;
@@ -199,7 +284,7 @@ void tegra_dma_free_channel(int channel)
{
struct tegra_dma_channel *ch;
- if (channel < NV_DMA_CHANNEL_MIN && channel > NV_DMA_CHANNEL_MAX)
+ if (channel < NV_DMA_CHANNEL_MIN && channel >= NV_DMA_CHANNEL_MAX)
return;
ch = &dma_channels[channel];
@@ -224,19 +309,34 @@ static int tegra_dma_set_name(int i, const char *fmt, ...)
return ret;
}
+static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ if (req->to_memory) {
+ ch->apb_ptr = req->source_addr;
+ ch->ahb_ptr = req->dest_addr;
+ } else {
+ ch->apb_ptr = req->dest_addr;
+ ch->ahb_ptr = req->source_addr;
+ }
+ writel(ch->apb_ptr, ch->addr + APBDMACHAN_CHANNEL_0_APB_PTR_0);
+ writel(ch->ahb_ptr, ch->addr + APBDMACHAN_CHANNEL_0_AHB_PTR_0);
+ return;
+}
+
static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
{
- unsigned long csr;
int ahb_addr_wrap, apb_addr_wrap;
int index;
+ unsigned long csr;
switch (req->modid)
{
case NvRmModuleID_Uart:
- BUG_ON(req->instace >= 5);
+ BUG_ON(req->instance >= 5);
ch->csr = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0, CSR,
- REQ_SEL, uart_selector_values[req->instace], ch->csr);
+ REQ_SEL, uart_selector_values[req->instance], ch->csr);
ch->csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
CSR, FLOW, ENABLE, ch->csr);
ch->apb_seq = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
@@ -245,9 +345,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
AHB_SEQ, AHB_BURST, DMA_BURST_1WORDS, ch->ahb_seq);
break;
case NvRmModuleID_I2c:
- BUG_ON(req->instace >= 3);
+ BUG_ON(req->instance >= 3);
ch->csr = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0, CSR,
- REQ_SEL, i2c_selector_values[req->instace], ch->csr);
+ REQ_SEL, i2c_selector_values[req->instance], ch->csr);
ch->csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
CSR, FLOW, ENABLE, ch->csr);
ch->apb_seq = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
@@ -284,7 +384,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
AHB_SEQ, DBL_BUF, RELOAD_FOR_2X_BLOCKS, ch->ahb_seq);
/* In double buffered mode, we set the size to half the
- * requested size and interrupt every half the data
+ * requested size and interrupt when half the buffer
* is full */
ch->csr = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
((req->size>>3)-1), ch->csr);
@@ -334,6 +434,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
ch->ahb_seq = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0,
AHB_SEQ, WRAP, index, ch->ahb_seq);
+ ch->csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, IE_EOC,
+ ENABLE, ch->csr);
+
/* update hw registers with the shadow */
writel(ch->csr, ch->addr + APBDMACHAN_CHANNEL_0_CSR_0);
writel(ch->apb_seq, ch->addr + APBDMACHAN_CHANNEL_0_APB_SEQ_0);
@@ -371,16 +474,18 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
req = list_entry(ch->list.next, typeof(*req), list);
if (req) {
- int bytes_transfered;
+ int bytes_transferred;
- bytes_transfered = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
- ch->csr);
+ bytes_transferred = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, CSR,
+ WCOUNT, ch->csr);
- bytes_transfered += 1;
- bytes_transfered <<= 2;
+ bytes_transferred += 1;
+ bytes_transferred <<= 2;
list_del(&req->list);
- req->complete(req, bytes_transfered, 0);
+ req->bytes_transferred = bytes_transferred;
+ req->status = 0;
+ req->complete(req, 0);
}
if (!list_empty(&ch->list)) {
@@ -399,14 +504,28 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
req = list_entry(ch->list.next, typeof(*req), list);
if (req) {
- int bytes_transfered;
-
- bytes_transfered = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
- ch->csr);
-
- bytes_transfered += 1;
- bytes_transfered <<= 2;
- req->complete(req, bytes_transfered, 0);
+ ch->odd_interrupt = (~ch->odd_interrupt & 0x1);
+ if (ch->odd_interrupt) {
+ struct tegra_dma_req *next_req;
+ /* Load the next request into the hardware */
+ next_req = list_first_entry(ch->list.next,
+ typeof(*next_req), list);
+ tegra_dma_update_hw_partial(ch, next_req);
+ } else {
+ /* Callback when the buffer is completely full (i.e on
+ * the second interrupt */
+ int bytes_transferred;
+
+ bytes_transferred = NV_DRF_VAL(APBDMACHAN_CHANNEL_0,
+ CSR, WCOUNT, ch->csr);
+ bytes_transferred += 1;
+ bytes_transferred <<= 3;
+
+ req->bytes_transferred = bytes_transferred;
+ req->status = 0;
+ list_del(&req->list);
+ req->complete(req, 0);
+ }
}
}
@@ -447,11 +566,11 @@ int __init tegra_dma_init(void)
/* Reserve all the channels we are not supposed to touch */
for (i=0; i< NV_DMA_MAX_CHANNELS; i++) {
- if ((i < NV_DMA_CHANNEL_MIN) || (i > NV_DMA_CHANNEL_MAX))
+ if ((i < NV_DMA_CHANNEL_MIN) || (i >= NV_DMA_CHANNEL_MAX))
__set_bit(i, channel_usage);
}
- for (i = NV_DMA_CHANNEL_MIN; i <= NV_DMA_CHANNEL_MAX; i++) {
+ for (i = NV_DMA_CHANNEL_MIN; i < NV_DMA_CHANNEL_MAX; i++) {
struct tegra_dma_channel *ch = &dma_channels[i];
ch->id = i;
@@ -472,7 +591,7 @@ int __init tegra_dma_init(void)
tegra_dma_init_hw(ch);
}
- for (i = NV_DMA_CHANNEL_MIN; i <= NV_DMA_CHANNEL_MAX; i++) {
+ for (i = NV_DMA_CHANNEL_MIN; i < NV_DMA_CHANNEL_MAX; i++) {
irq = NvRmGetIrqForLogicalInterrupt(s_hRmGlobal,
NvRmPrivModuleID_ApbDma, i);
printk("Irq value = %d\n", irq);
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
index 63414e6d4f90..01fedd06e7f5 100644
--- a/arch/arm/mach-tegra/include/mach/dma.h
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -30,13 +30,16 @@ enum tegra_dma_mode {
TEGRA_DMA_MODE_ONESHOT = 4,
};
+enum tegra_dma_req_error {
+ TEGRA_DMA_REQ_ERROR_ABOTRED,
+};
+
struct tegra_dma_req {
struct list_head list;
unsigned int modid;
- int instace;
+ int instance;
- void (*complete)(struct tegra_dma_req *req, int bytes_trasferred,
- int err);
+ void (*complete)(struct tegra_dma_req *req, int err);
/* 1 to copy to memory.
* 0 to copy from the memory to device FIFO */
int to_memory;
@@ -48,15 +51,17 @@ struct tegra_dma_req {
unsigned int size;
+ /* Updated by the DMA driver on the conpletion of the request. */
+ int bytes_transferred;
+ int status;
+
/* Client specific data */
void *data;
};
-void tegra_dma_stop(int channel);
-void tegra_dma_is_enabled(int channel);
-void tegra_dma_start(int channel);
-
int tegra_dma_enqueue_req(int channel, struct tegra_dma_req *req);
+int tegra_dma_dequeue_req(int channel, struct tegra_dma_req *req);
+void tegra_dma_dequeue(int channel);
/* Returns 1 if there are DMA is empty.
*/
diff --git a/arch/arm/mach-tegra/init_common.c b/arch/arm/mach-tegra/init_common.c
index a16a9896eef0..1da5f0f1de08 100644
--- a/arch/arm/mach-tegra/init_common.c
+++ b/arch/arm/mach-tegra/init_common.c
@@ -380,6 +380,9 @@ static void __init tegra_register_i2c(void)
#if !defined(CONFIG_SERIAL_TEGRA_DDK)
#define tegra_register_uart() do {} while (0)
#else
+
+static u64 tegra_uart_dma_mask = DMA_32BIT_MASK;
+
void __init tegra_register_uart(void)
{
struct platform_device *pDev = NULL;
@@ -409,6 +412,9 @@ void __init tegra_register_uart(void)
goto fail;
if (platform_device_add(pDev))
goto fail;
+
+ pDev->dev.coherent_dma_mask = ~0;
+ pDev->dev.dma_mask = &tegra_uart_dma_mask;
}
fail:
if (pDev)
@@ -780,12 +786,19 @@ static void __init tegra_init_cpu(void)
#error "Unrecognized Tegra SoC family"
#endif
+#ifdef CONFIG_TEGRA_SYSTEM_DMA
+extern int __init tegra_dma_init(void);
+#else
+#define tegra_dma_init() do {} while (0)
+#endif
+
void __init tegra_common_init(void)
{
NV_ASSERT_SUCCESS(NvRmOpen(&s_hRmGlobal,0));
NV_ASSERT_SUCCESS(NvRmGpioOpen(s_hRmGlobal, &s_hGpioGlobal));
tegra_init_cpu();
+ tegra_dma_init();
tegra_register_i2c();
tegra_register_spi();
tegra_register_uart();