diff options
-rw-r--r-- | drivers/tty/serial/imx.c | 69 |
1 files changed, 53 insertions, 16 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 353281e7094b..81b91ee98ae4 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -211,6 +211,7 @@ struct imx_port { struct work_struct tsk_dma_rx, tsk_dma_tx; unsigned int dma_tx_nents; bool dma_is_rxing; + wait_queue_head_t dma_wait; }; #ifdef CONFIG_IRDA @@ -326,6 +327,13 @@ static void imx_stop_rx(struct uart_port *port) struct imx_port *sport = (struct imx_port *)port; unsigned long temp; + /* + * We are in SMP now, so if the DMA RX thread is running, + * we have to wait for it to finish. + */ + if (sport->enable_dma && sport->dma_is_rxing) + return; + temp = readl(sport->port.membase + UCR2); writel(temp &~ UCR2_RXEN, sport->port.membase + UCR2); } @@ -746,10 +754,28 @@ static void dma_rx_work(struct work_struct *w) start_rx_dma(sport); } +static void imx_finish_dma(struct imx_port *sport) +{ + unsigned long temp; + + /* Enable the interrupt when the RXFIFO is not empty. */ + temp = readl(sport->port.membase + UCR1); + temp |= UCR1_RRDYEN; + writel(temp, sport->port.membase + UCR1); + + sport->dma_is_rxing = false; + if (waitqueue_active(&sport->dma_wait)) + wake_up(&sport->dma_wait); +} + /* - * There are two kinds RX DMA interrupts: + * There are three kinds of RX DMA interrupts: * [1] the RX DMA buffer is full. - * [2] the Aging timer reached its final value(enabled the UCR4_IDDMAEN). + * [2] the Aging timer expires(wait for 8 bytes long) + * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN). + * + * The [2] and [3] are similar, but [3] is better. + * [3] can wait for 32 bytes long, so we do not use [2]. */ static void dma_rx_callback(void *data) { @@ -767,21 +793,20 @@ static void dma_rx_callback(void *data) /* unmap it first */ dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE); + /* If we have finish the reading. we will not accept any more data. */ + if (tty->closing) { + imx_finish_dma(sport); + return; + } + status = chan->device->device_tx_status(chan, (dma_cookie_t)NULL, &state); count = RX_BUF_SIZE - state.residue; if (count) { sport->rx_bytes = count; schedule_work(&sport->tsk_dma_rx); - } else { - unsigned long temp; - - /* Enable the interrupt when the RXFIFO is not empty. */ - temp = readl(sport->port.membase + UCR1); - temp |= UCR1_RRDYEN; - writel(temp, sport->port.membase + UCR1); - sport->dma_is_rxing = false; - } + } else + imx_finish_dma(sport); } static int start_rx_dma(struct imx_port *sport) @@ -976,6 +1001,7 @@ static int imx_startup(struct uart_port *port) sport->port.flags |= UPF_LOW_LATENCY; INIT_WORK(&sport->tsk_dma_tx, dma_tx_work); INIT_WORK(&sport->tsk_dma_rx, dma_rx_work); + init_waitqueue_head(&sport->dma_wait); } spin_lock_irqsave(&sport->port.lock, flags); @@ -987,7 +1013,7 @@ static int imx_startup(struct uart_port *port) temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; if (sport->enable_dma) { - temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN; + temp |= UCR1_RDMAEN | UCR1_TDMAEN; /* ICD, wait for more than 32 frames, but it still to short. */ temp |= UCR1_ICD_REG(3); } @@ -1076,6 +1102,13 @@ static void imx_shutdown(struct uart_port *port) unsigned long temp; unsigned long flags; + if (sport->enable_dma) { + /* We have to wait for the DMA to finish. */ + wait_event(sport->dma_wait, !sport->dma_is_rxing); + imx_stop_rx(port); + imx_uart_dma_exit(sport); + } + spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_TXEN); @@ -1114,12 +1147,16 @@ static void imx_shutdown(struct uart_port *port) temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); if (USE_IRDA(sport)) temp &= ~(UCR1_IREN); - + if (sport->enable_dma) + temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN); writel(temp, sport->port.membase + UCR1); - spin_unlock_irqrestore(&sport->port.lock, flags); - if (sport->enable_dma) - imx_uart_dma_exit(sport); + if (sport->enable_dma) { + temp = readl(sport->port.membase + UCR4); + temp &= ~UCR4_IDDMAEN; + writel(temp, sport->port.membase + UCR4); + } + spin_unlock_irqrestore(&sport->port.lock, flags); } static void |