diff options
author | Venkata(Muni) Anda <muni@nvidia.com> | 2010-01-15 01:35:56 -0800 |
---|---|---|
committer | Venkata(Muni) Anda <muni@nvidia.com> | 2010-01-20 21:48:03 -0800 |
commit | de047f921798c652d90fa6bebe7b94b5047f42a2 (patch) | |
tree | 2ff3d755ed482bd1ae26f7bc2c8aa82e9cbed50f /arch/arm | |
parent | 5252a652cc1581f3ea1603a6f27ec6a6abf65eb4 (diff) |
tegra: DMA driver and tegra serial driver updates.
DMA changes:
- Channel lock should be taken in the ISR for making the DMA driver SMP safe.
- In continous DMA mode,WCOUNT is the count of the half buffer not the full
buffer. So, when updating the bytes received member of the req structure,
received count should be accounted for appropriately.
- Implemented the tegra_dma_flush() API. This API stops the DMA, resets the
transfer status of the req in the head of the queue and reloads the req into
the hardware if pending.
- Added "threshold" callback. This is called when the buffer reaches the
threshold. This is now aviable only for continous mode.
Serial driver updates:
- Simplifed the ISR handling in case of the DMA mode.
- Using RTS to enable the flow control when dequeing and enqueing the DMA
buffers. This will make sure that the sender will be flow controlled. This
is still not working properly.
- Added flush buffer call back - Called by serial core when the tx buffer is
reset.
- Fixed the locking at a couple of places, like ISR and shutdown callback.
With these changes, i could able to use the BT sometimes. Sometimes i got
parity errors.
When I swtiched the Tx to PIO mode, BT worked fine. This can be done by
changing the tx_force_pio global variable to 1.
Change-Id: I4f738faac2fdb14622257878ef08db5397eebf8a
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 164 | ||||
-rw-r--r-- | arch/arm/mach-tegra/include/mach/dma.h | 37 |
2 files changed, 136 insertions, 65 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index fa4186513ce5..4f337d06c5a7 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c @@ -78,8 +78,6 @@ struct tegra_dma_channel { unsigned long phys_addr; int mode; - int odd_interrupt; - /* Register shadow */ unsigned long csr; unsigned long ahb_seq; @@ -102,7 +100,6 @@ static void tegra_dma_stop(struct tegra_dma_channel *ch); void tegra_dma_flush(int channel) { - } EXPORT_SYMBOL(tegra_dma_flush); @@ -142,6 +139,8 @@ int tegra_dma_dequeue_req(int channel, struct tegra_dma_req *_req) struct tegra_dma_req *req = NULL; int found = 0; unsigned long irq_flags; + int to_transfer; + int req_transfer_count; spin_lock_irqsave(&ch->lock, irq_flags); list_for_each_entry (req, &ch->list, list) { @@ -151,60 +150,65 @@ int tegra_dma_dequeue_req(int channel, struct tegra_dma_req *_req) break; } } - BUG_ON(found==0); - - if (found) { - int to_transfer; - int req_transfer_count; - - /* STOP the DMA and get the transfer count. - * Getting the transfer count is tricky. - * - Change the source selector to invalid to stop the DMA from - * FIFO to memory. - * - Read the status register to knoe the number of pending - * bytes to be transfered. - * - Finally stop or program the DMA to the next buffer in the - * list. - */ - csr = ch->csr; - csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, - REQ_SEL, NA31, csr); - /* Set the enable as that is not shadowed */ - csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, - ENB, ENABLE, csr); - writel(csr, ch->addr + APBDMACHAN_CHANNEL_0_CSR_0); - - /* Get the transfer count */ - status = readl(ch->addr + APBDMACHAN_CHANNEL_0_STA_0); - to_transfer = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, STA, - COUNT, status); - req_transfer_count = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, - CSR, WCOUNT, ch->csr); - - req->bytes_transferred = req_transfer_count - to_transfer; - req->bytes_transferred *= 4; - - tegra_dma_stop(ch); - if (!list_empty(&ch->list)) { - /* if the list is not empty, queue the next request */ - struct tegra_dma_req *next_req; - next_req = list_entry(ch->list.next, - typeof(*next_req), list); - tegra_dma_update_hw(ch, next_req); - } - req->status = -TEGRA_DMA_REQ_ERROR_ABOTRED; - + if (found==0) { spin_unlock_irqrestore(&ch->lock, irq_flags); - req->complete(req, req->status); - spin_lock_irqsave(&ch->lock, irq_flags); + return 0; + } + /* STOP the DMA and get the transfer count. + * Getting the transfer count is tricky. + * - Change the source selector to invalid to stop the DMA from + * FIFO to memory. + * - Read the status register to know the number of pending + * bytes to be transfered. + * - Finally stop or program the DMA to the next buffer in the + * list. + */ + csr = ch->csr; + csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, + REQ_SEL, NA31, csr); + /* Set the enable as that is not shadowed */ + csr = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, + ENB, ENABLE, csr); + writel(csr, ch->addr + APBDMACHAN_CHANNEL_0_CSR_0); + + /* Get the transfer count */ + status = readl(ch->addr + APBDMACHAN_CHANNEL_0_STA_0); + to_transfer = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, STA, + COUNT, status); + req_transfer_count = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, + CSR, WCOUNT, ch->csr); + + req->bytes_transferred = req_transfer_count - to_transfer; + req->bytes_transferred *= 4; + /* In continous transfer mode, DMA only tracks the count of the + * half DMA buffer. So, if the DMA already finished half the DMA + * then add the half buffer to the completed count. + * + * FIXME: There can be a race here. What if the req to + * dequue happens at the same time as the DMA just moved to + * the new buffer and SW didn't yet received the interrupt? + */ + if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) + if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { + req->bytes_transferred += 4 * req_transfer_count; + } + + tegra_dma_stop(ch); + if (!list_empty(&ch->list)) { + /* if the list is not empty, queue the next request */ + struct tegra_dma_req *next_req; + next_req = list_entry(ch->list.next, + typeof(*next_req), list); + tegra_dma_update_hw(ch, next_req); } + req->status = -TEGRA_DMA_REQ_ERROR_ABOTRED; + spin_unlock_irqrestore(&ch->lock, irq_flags); - if (found) - return 0; - else - return -ENOENT; + /* Callback should be called without any lock */ + req->complete(req, req->status); + return 0; } EXPORT_SYMBOL(tegra_dma_dequeue_req); @@ -241,6 +245,7 @@ int tegra_dma_enqueue_req(int channel, struct tegra_dma_req *req) req->bytes_transferred = 0; req->status = 0; + req->buffer_status = 0; if (list_empty(&ch->list)) start_dma = 1; @@ -466,9 +471,11 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) { static struct tegra_dma_req *req; - if (list_empty(&ch->list)) - /* Why did we got an interrupt? */ + spin_lock(&ch->lock); + if (list_empty(&ch->list)) { + spin_unlock(&ch->lock); return; + } req = list_entry(ch->list.next, typeof(*req), list); if (req) { @@ -483,33 +490,51 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) list_del(&req->list); req->bytes_transferred = bytes_transferred; req->status = 0; + + spin_unlock(&ch->lock); + /* Callback should be called without any lock */ req->complete(req, 0); + spin_lock(&ch->lock); } if (!list_empty(&ch->list)) { req = list_entry(ch->list.next, typeof(*req), list); tegra_dma_update_hw(ch, req); } + spin_unlock(&ch->lock); } static void handle_continuous_dma(struct tegra_dma_channel *ch) { static struct tegra_dma_req *req; - if (list_empty(&ch->list)) - /* Why did we got an interrupt? */ + spin_lock(&ch->lock); + if (list_empty(&ch->list)) { + spin_unlock(&ch->lock); return; + } req = list_entry(ch->list.next, typeof(*req), list); if (req) { - ch->odd_interrupt = (~ch->odd_interrupt & 0x1); - if (ch->odd_interrupt) { - struct tegra_dma_req *next_req; - /* Load the next request into the hardware */ - next_req = list_first_entry(ch->list.next, - typeof(*next_req), list); - tegra_dma_update_hw_partial(ch, next_req); - } else { + if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { + /* Load the next request into the hardware, if available + * */ + if (!list_is_last(&req->list, &ch->list)) { + struct tegra_dma_req *next_req; + + printk("Queue the next request\n"); + next_req = list_entry(req->list.next, + typeof(*next_req), list); + tegra_dma_update_hw_partial(ch, next_req); + } + req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; + /* DMA lock is NOT held when callbak is called */ + spin_unlock(&ch->lock); + req->threshold(req, 0); + return; + + } else if (req->buffer_status == + TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { /* Callback when the buffer is completely full (i.e on * the second interrupt */ int bytes_transferred; @@ -519,12 +544,21 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) bytes_transferred += 1; bytes_transferred <<= 3; + req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; req->bytes_transferred = bytes_transferred; req->status = 0; list_del(&req->list); + + /* DMA lock is NOT held when callbak is called */ + spin_unlock(&ch->lock); req->complete(req, 0); + return; + + } else { + BUG(); } } + spin_unlock(&ch->lock); } static irqreturn_t dma_isr(int irq, void *id) @@ -546,6 +580,7 @@ static irqreturn_t dma_isr(int irq, void *id) else handle_continuous_dma(ch); + return IRQ_HANDLED; } @@ -608,4 +643,3 @@ fail: /* FIXME cleanup */ return ret; } - diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h index 964af099a392..c39be865bafc 100644 --- a/arch/arm/mach-tegra/include/mach/dma.h +++ b/arch/arm/mach-tegra/include/mach/dma.h @@ -34,12 +34,46 @@ enum tegra_dma_req_error { TEGRA_DMA_REQ_ERROR_ABOTRED, }; +enum tegra_dma_req_buff_status { + TEGRA_DMA_REQ_BUF_STATUS_EMPTY, + TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL, + TEGRA_DMA_REQ_BUF_STATUS_FULL, +}; + struct tegra_dma_req { struct list_head list; unsigned int modid; int instance; + /* Called when the req is complete and from the DMA ISR context. + * When this is called the req structure is no longer queued by + * the DMA channel. + * + * State of the DMA depends on the number of req it has. If there are + * no DMA requests queued up, then it will STOP the DMA. It there are + * more requests in the DMA, then it will queue the next request. + */ void (*complete)(struct tegra_dma_req *req, int err); + + /* This is a called from the DMA ISR context when the DMA is still in + * progress and is actively filling same buffer. + * + * In case of continous mode receive, this threshold is 1/2 the buffer + * size. In other cases, this will not even be called as there is no + * hardware support for it. + * + * In the case of continous mode receive, if there is next req already + * queued, DMA programs the HW to use that req when this req is + * completed. If there is no "next req" queued, then DMA ISR doesn't do + * anything before calling this callback. + * + * This is mainly used by the cases, where the clients has queued + * only one req and want to get some sort of DMA threshold + * callback to program the next buffer. + * + */ + void (*threshold)(struct tegra_dma_req *req, int err); + /* 1 to copy to memory. * 0 to copy from the memory to device FIFO */ int to_memory; @@ -57,6 +91,9 @@ struct tegra_dma_req { int bytes_transferred; int status; + /* DMA completion tracking information */ + int buffer_status; + /* Client specific data */ void *data; }; |