diff options
author | Andy Duan <fugang.duan@nxp.com> | 2017-03-29 15:23:27 +0800 |
---|---|---|
committer | Leonard Crestez <leonard.crestez@nxp.com> | 2018-08-24 12:41:33 +0300 |
commit | 74d44e427be659e9b7e9bd661d4e7880358ee503 (patch) | |
tree | ad760ef4b73fb8b867a5d40dea3e57c56b7c85c9 /drivers/tty/serial/fsl_lpuart.c | |
parent | c27cc34d180509a508f44dea24903b606203000f (diff) |
MLK-14543 tty: serial: fsl_lpuart: add lpuart32 dma support
The current driver don't support lpuart32 DMA mode.
The patch add lpuart32 tx/rx DMA support, there have two main changes:
- lpuart32 tx dma resue lpuart tx dma mode to reduce code duplication.
- lpuart32 rx dma still use prep_sg mode since imx7ulp don't support
eeop mode that also ailgned with 4.1.y.
If don't use DMA mode, remove dma chan property in dts file.
Signed-off-by: Fugang Duan <fugang.duan@nxp.com>
Diffstat (limited to 'drivers/tty/serial/fsl_lpuart.c')
-rw-r--r-- | drivers/tty/serial/fsl_lpuart.c | 876 |
1 files changed, 572 insertions, 304 deletions
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 920ba407db93..d6a18ec7e6c2 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -25,10 +25,10 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> +#include <linux/reset.h> #include <linux/serial_core.h> #include <linux/slab.h> #include <linux/tty_flip.h> -#include <linux/of_irq.h> /* All registers are 8-bit width */ #define UARTBDH 0x00 @@ -134,6 +134,7 @@ #define UARTBAUD_M10 0x20000000 #define UARTBAUD_TDMAE 0x00800000 #define UARTBAUD_RDMAE 0x00200000 +#define UARTBAUD_RIDMAE 0x00100000 #define UARTBAUD_MATCFG 0x00400000 #define UARTBAUD_BOTHEDGE 0x00020000 #define UARTBAUD_RESYNCDIS 0x00010000 @@ -182,7 +183,7 @@ #define UARTCTRL_SBK 0x00010000 #define UARTCTRL_MA1IE 0x00008000 #define UARTCTRL_MA2IE 0x00004000 -#define UARTCTRL_IDLECFG 0x00000100 +#define UARTCTRL_IDLECFG_OFF 0x8 #define UARTCTRL_LOOPS 0x00000080 #define UARTCTRL_DOZEEN 0x00000040 #define UARTCTRL_RSRC 0x00000020 @@ -197,9 +198,12 @@ #define UARTDATA_FRETSC 0x00002000 #define UARTDATA_RXEMPT 0x00001000 #define UARTDATA_IDLINE 0x00000800 +#define UARTDATA_INVALID 0x0000F000 #define UARTDATA_MASK 0x3ff #define UARTMODIR_IREN 0x00020000 +#define UARTMODIR_RTSWATER_S 0x8 +#define UARTMODIR_RTSWATER_M 0x0000ff00 #define UARTMODIR_TXCTSSRC 0x00000020 #define UARTMODIR_TXCTSC 0x00000010 #define UARTMODIR_RXRTSE 0x00000008 @@ -213,6 +217,8 @@ #define UARTFIFO_RXUF 0x00010000 #define UARTFIFO_TXFLUSH 0x00008000 #define UARTFIFO_RXFLUSH 0x00004000 +#define UARTFIFO_RXIDEN_MASK 0x7 +#define UARTFIFO_RXIDEN_OFF 10 #define UARTFIFO_TXOFE 0x00000200 #define UARTFIFO_RXUFE 0x00000100 #define UARTFIFO_TXFE 0x00000080 @@ -228,8 +234,9 @@ #define UARTWATER_TXWATER_OFF 0 #define UARTWATER_RXWATER_OFF 16 -/* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */ -#define DMA_RX_TIMEOUT (10) +#define UARTFIFO_RXIDEN_RDRF 0x3 +#define UARTCTRL_IDLECFG 0x7 +#define FSL_UART_RX_DMA_BUFFER_SIZE 64 #define DRIVER_NAME "fsl-lpuart" #define DEV_NAME "ttyLP" @@ -241,6 +248,8 @@ struct lpuart_port { struct clk *per_clk; unsigned int txfifo_size; unsigned int rxfifo_size; + unsigned int txfifo_watermark; + unsigned int rxfifo_watermark; bool lpuart32; bool lpuart_dma_tx_use; @@ -249,16 +258,18 @@ struct lpuart_port { struct dma_chan *dma_rx_chan; struct dma_async_tx_descriptor *dma_tx_desc; struct dma_async_tx_descriptor *dma_rx_desc; + dma_addr_t dma_rx_buf_bus; dma_cookie_t dma_tx_cookie; dma_cookie_t dma_rx_cookie; + unsigned char *dma_rx_buf_virt; unsigned int dma_tx_bytes; unsigned int dma_rx_bytes; + size_t rxdma_len; bool dma_tx_in_progress; + bool dma_rx_in_progress; unsigned int dma_rx_timeout; struct timer_list lpuart_timer; struct scatterlist rx_sgl, tx_sgl[2]; - struct circ_buf rx_ring; - int rx_dma_rng_buf_len; unsigned int dma_tx_nents; wait_queue_head_t dma_wait; }; @@ -281,7 +292,9 @@ static const struct of_device_id lpuart_dt_ids[] = { MODULE_DEVICE_TABLE(of, lpuart_dt_ids); /* Forward declare this for the dma callbacks*/ +static int lpuart_dma_rx(struct lpuart_port *sport); static void lpuart_dma_tx_complete(void *arg); +static inline void lpuart_prepare_rx(struct lpuart_port *sport); static u32 lpuart32_read(void __iomem *addr) { @@ -412,15 +425,18 @@ static int lpuart_dma_tx_request(struct uart_port *port) struct dma_slave_config dma_tx_sconfig = {}; int ret; - dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR; + if (sport->lpuart32) + dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDATA; + else + dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR; dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_tx_sconfig.dst_maxburst = 1; dma_tx_sconfig.direction = DMA_MEM_TO_DEV; ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig); - if (ret) { + if (ret < 0) { dev_err(sport->port.dev, - "DMA slave config failed, err = %d\n", ret); + "Dma slave config failed, err = %d\n", ret); return ret; } @@ -430,7 +446,6 @@ static int lpuart_dma_tx_request(struct uart_port *port) static void lpuart_flush_buffer(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - if (sport->lpuart_dma_tx_use) { if (sport->dma_tx_in_progress) { dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0], @@ -490,13 +505,13 @@ static void lpuart_start_tx(struct uart_port *port) struct circ_buf *xmit = &sport->port.state->xmit; unsigned char temp; - temp = readb(port->membase + UARTCR2); - writeb(temp | UARTCR2_TIE, port->membase + UARTCR2); - if (sport->lpuart_dma_tx_use) { if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) lpuart_dma_tx(sport); } else { + temp = readb(port->membase + UARTCR2); + writeb(temp | UARTCR2_TIE, port->membase + UARTCR2); + if (readb(port->membase + UARTSR1) & UARTSR1_TDRE) lpuart_transmit_buffer(sport); } @@ -505,13 +520,19 @@ static void lpuart_start_tx(struct uart_port *port) static void lpuart32_start_tx(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + struct circ_buf *xmit = &sport->port.state->xmit; unsigned long temp; - temp = lpuart32_read(port->membase + UARTCTRL); - lpuart32_write(temp | UARTCTRL_TIE, port->membase + UARTCTRL); - - if (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TDRE) - lpuart32_transmit_buffer(sport); + if (sport->lpuart_dma_tx_use) { + if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) + lpuart_dma_tx(sport); + } else { + temp = lpuart32_read(port->membase + UARTCTRL); + lpuart32_write(temp | UARTCTRL_TIE, port->membase + UARTCTRL); + if (lpuart32_read(sport->port.membase + UARTSTAT) & + UARTSTAT_TDRE) + lpuart32_transmit_buffer(sport); + } } /* return TIOCSER_TEMT when transmitter is not busy */ @@ -533,8 +554,18 @@ static unsigned int lpuart_tx_empty(struct uart_port *port) static unsigned int lpuart32_tx_empty(struct uart_port *port) { - return (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TC) ? - TIOCSER_TEMT : 0; + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + unsigned int sr1 = lpuart32_read(port->membase + UARTSTAT); + unsigned int sfifo = lpuart32_read(sport->port.membase + UARTFIFO); + + if (sport->dma_tx_in_progress) + return 0; + + if (sr1 & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) + return TIOCSER_TEMT; + + return 0; } static irqreturn_t lpuart_txint(int irq, void *dev_id) @@ -649,6 +680,7 @@ static irqreturn_t lpuart32_rxint(int irq, void *dev_id) while (!(lpuart32_read(sport->port.membase + UARTFIFO) & UARTFIFO_RXEMPT)) { flg = TTY_NORMAL; sport->port.icount.rx++; + /* * to clear the FE, OR, NF, FE, PE flags, * read STAT then read DATA reg @@ -662,7 +694,6 @@ static irqreturn_t lpuart32_rxint(int irq, void *dev_id) continue; } - rx &= UARTDATA_MASK; if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) continue; @@ -696,6 +727,10 @@ static irqreturn_t lpuart32_rxint(int irq, void *dev_id) #endif } + if (rx & UARTDATA_INVALID) + continue; + + rx &= UARTDATA_MASK; tty_insert_flip_char(port, rx, flg); } @@ -709,14 +744,19 @@ out: static irqreturn_t lpuart_int(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; - unsigned char sts; + unsigned char sts, crdma; sts = readb(sport->port.membase + UARTSR1); + crdma = readb(sport->port.membase + UARTCR5); - if (sts & UARTSR1_RDRF) - lpuart_rxint(irq, dev_id); + if (sts & UARTSR1_RDRF && !(crdma & UARTCR5_RDMAS)) { + if (sport->lpuart_dma_rx_use) + lpuart_prepare_rx(sport); + else + lpuart_rxint(irq, dev_id); + } - if (sts & UARTSR1_TDRE) + if (sts & UARTSR1_TDRE && !sport->lpuart_dma_tx_use) lpuart_txint(irq, dev_id); return IRQ_HANDLED; @@ -725,195 +765,143 @@ static irqreturn_t lpuart_int(int irq, void *dev_id) static irqreturn_t lpuart32_int(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; - unsigned long sts, rxcount; + unsigned long sts, rxcount, crdma; sts = lpuart32_read(sport->port.membase + UARTSTAT); rxcount = lpuart32_read(sport->port.membase + UARTWATER); rxcount = rxcount >> UARTWATER_RXCNT_OFF; + crdma = lpuart32_read(sport->port.membase + UARTBAUD); + + if (!sts) + return IRQ_NONE; - if (sts & UARTSTAT_RDRF || rxcount > 0) - lpuart32_rxint(irq, dev_id); + if (!(crdma & UARTBAUD_RDMAE) && rxcount > 0) { + if (!sport->lpuart_dma_rx_use || + (sts & (UARTSTAT_PE | UARTSTAT_NF | UARTSTAT_FE))) + lpuart32_rxint(irq, dev_id); + else + lpuart_prepare_rx(sport); + } - if ((sts & UARTSTAT_TDRE) && - !(lpuart32_read(sport->port.membase + UARTBAUD) & UARTBAUD_TDMAE)) + if (sts & UARTSTAT_TDRE && !sport->lpuart_dma_tx_use) lpuart_txint(irq, dev_id); lpuart32_write(sts, sport->port.membase + UARTSTAT); return IRQ_HANDLED; } -static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) +static void lpuart_copy_rx_to_tty(struct lpuart_port *sport, + struct tty_port *tty, int count) { - struct tty_port *port = &sport->port.state->port; - struct dma_tx_state state; - enum dma_status dmastat; - struct circ_buf *ring = &sport->rx_ring; - unsigned long flags; - int count = 0; - unsigned char sr; - - sr = readb(sport->port.membase + UARTSR1); + int copied; - if (sr & (UARTSR1_PE | UARTSR1_FE)) { - /* Read DR to clear the error flags */ - readb(sport->port.membase + UARTDR); - - if (sr & UARTSR1_PE) - sport->port.icount.parity++; - else if (sr & UARTSR1_FE) - sport->port.icount.frame++; - } - - async_tx_ack(sport->dma_rx_desc); + sport->port.icount.rx += count; - spin_lock_irqsave(&sport->port.lock, flags); - - dmastat = dmaengine_tx_status(sport->dma_rx_chan, - sport->dma_rx_cookie, - &state); - - if (dmastat == DMA_ERROR) { - dev_err(sport->port.dev, "Rx DMA transfer failed!\n"); - spin_unlock_irqrestore(&sport->port.lock, flags); + if (!tty) { + dev_err(sport->port.dev, "No tty port\n"); return; } - /* CPU claims ownership of RX DMA buffer */ - dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); - - /* - * ring->head points to the end of data already written by the DMA. - * ring->tail points to the beginning of data to be read by the - * framework. - * The current transfer size should not be larger than the dma buffer - * length. - */ - ring->head = sport->rx_sgl.length - state.residue; - BUG_ON(ring->head > sport->rx_sgl.length); - /* - * At this point ring->head may point to the first byte right after the - * last byte of the dma buffer: - * 0 <= ring->head <= sport->rx_sgl.length - * - * However ring->tail must always points inside the dma buffer: - * 0 <= ring->tail <= sport->rx_sgl.length - 1 - * - * Since we use a ring buffer, we have to handle the case - * where head is lower than tail. In such a case, we first read from - * tail to the end of the buffer then reset tail. - */ - if (ring->head < ring->tail) { - count = sport->rx_sgl.length - ring->tail; - - tty_insert_flip_string(port, ring->buf + ring->tail, count); - ring->tail = 0; - sport->port.icount.rx += count; - } + dma_sync_single_for_cpu(sport->port.dev, sport->dma_rx_buf_bus, + sport->rxdma_len, DMA_FROM_DEVICE); + copied = tty_insert_flip_string(tty, + ((unsigned char *)(sport->dma_rx_buf_virt)), count); - /* Finally we read data from tail to head */ - if (ring->tail < ring->head) { - count = ring->head - ring->tail; - tty_insert_flip_string(port, ring->buf + ring->tail, count); - /* Wrap ring->head if needed */ - if (ring->head >= sport->rx_sgl.length) - ring->head = 0; - ring->tail = ring->head; - sport->port.icount.rx += count; + if (copied != count) { + WARN_ON(1); + dev_err(sport->port.dev, "RxData copy to tty layer failed\n"); } +} - dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1, - DMA_FROM_DEVICE); - - spin_unlock_irqrestore(&sport->port.lock, flags); +static void lpuart_dma_stop(struct lpuart_port *sport) +{ + unsigned int temp; + unsigned int crdma; - tty_flip_buffer_push(port); - mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout); + if (sport->lpuart32) { + lpuart32_write(UARTSTAT_IDLE, sport->port.membase + UARTSTAT); + crdma = lpuart32_read(sport->port.membase + UARTBAUD); + lpuart32_write(crdma & ~(UARTBAUD_RDMAE), + sport->port.membase + UARTBAUD); + temp = lpuart32_read(sport->port.membase + UARTCTRL); + temp |= (UARTCTRL_RIE | UARTCTRL_ILIE); + lpuart32_write(temp, sport->port.membase + UARTCTRL); + } else { + temp = readb(sport->port.membase + UARTCR5); + writeb(temp & ~UARTCR5_RDMAS, sport->port.membase + UARTCR5); + } } static void lpuart_dma_rx_complete(void *arg) { struct lpuart_port *sport = arg; + struct tty_port *port = &sport->port.state->port; + unsigned long flags; + struct dma_tx_state state; + int count; - lpuart_copy_rx_to_tty(sport); -} + async_tx_ack(sport->dma_rx_desc); + mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout); -static void lpuart_timer_func(unsigned long data) -{ - struct lpuart_port *sport = (struct lpuart_port *)data; + spin_lock_irqsave(&sport->port.lock, flags); + sport->dma_rx_in_progress = false; + dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state); + count = sport->rxfifo_watermark - state.residue; + lpuart_copy_rx_to_tty(sport, port, count); + tty_flip_buffer_push(port); + + if (count < sport->rxfifo_watermark) + lpuart_dma_stop(sport); + else + lpuart_dma_rx(sport); - lpuart_copy_rx_to_tty(sport); + spin_unlock_irqrestore(&sport->port.lock, flags); } -static inline int lpuart_start_rx_dma(struct lpuart_port *sport) +static void lpuart_timer_func(unsigned long data) { - struct dma_slave_config dma_rx_sconfig = {}; - struct circ_buf *ring = &sport->rx_ring; - int ret, nent; - int bits, baud; - struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); - struct ktermios *termios = &tty->termios; - - baud = tty_get_baud_rate(tty); - - bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10; - if (termios->c_cflag & PARENB) - bits++; + struct lpuart_port *sport = (struct lpuart_port *)data; + struct tty_port *port = &sport->port.state->port; + struct dma_tx_state state; + unsigned long flags; + int count; - /* - * Calculate length of one DMA buffer size to keep latency below - * 10ms at any baud rate. - */ - sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2; - sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1)); - if (sport->rx_dma_rng_buf_len < 16) - sport->rx_dma_rng_buf_len = 16; - - ring->buf = kmalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC); - if (!ring->buf) { - dev_err(sport->port.dev, "Ring buf alloc failed\n"); - return -ENOMEM; - } + del_timer(&sport->lpuart_timer); + dmaengine_pause(sport->dma_rx_chan); + dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state); + dmaengine_terminate_all(sport->dma_rx_chan); + count = sport->rxdma_len - state.residue; + async_tx_ack(sport->dma_rx_desc); - sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len); - sg_set_buf(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len); - nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); + spin_lock_irqsave(&sport->port.lock, flags); - if (!nent) { - dev_err(sport->port.dev, "DMA Rx mapping error\n"); - return -EINVAL; - } + sport->dma_rx_in_progress = false; + lpuart_copy_rx_to_tty(sport, port, count); + tty_flip_buffer_push(port); + lpuart_dma_stop(sport); - dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR; - dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - dma_rx_sconfig.src_maxburst = 1; - dma_rx_sconfig.direction = DMA_DEV_TO_MEM; - ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig); + spin_unlock_irqrestore(&sport->port.lock, flags); +} - if (ret < 0) { - dev_err(sport->port.dev, - "DMA Rx slave config failed, err = %d\n", ret); - return ret; - } +static int lpuart_dma_rx(struct lpuart_port *sport) +{ + dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus, + sport->rxdma_len, DMA_FROM_DEVICE); + sport->dma_rx_desc = dmaengine_prep_slave_single(sport->dma_rx_chan, + sport->dma_rx_buf_bus, sport->rxdma_len, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); - sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan, - sg_dma_address(&sport->rx_sgl), - sport->rx_sgl.length, - sport->rx_sgl.length / 2, - DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT); if (!sport->dma_rx_desc) { - dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); - return -EFAULT; + dev_err(sport->port.dev, "Not able to get desc for rx\n"); + return -EIO; } sport->dma_rx_desc->callback = lpuart_dma_rx_complete; sport->dma_rx_desc->callback_param = sport; + sport->dma_rx_in_progress = true; sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc); dma_async_issue_pending(sport->dma_rx_chan); - writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS, - sport->port.membase + UARTCR5); - return 0; } @@ -922,15 +910,38 @@ static void lpuart_dma_rx_free(struct uart_port *port) struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - if (sport->dma_rx_chan) - dmaengine_terminate_all(sport->dma_rx_chan); + dma_unmap_single(sport->port.dev, sport->dma_rx_buf_bus, + sport->rxdma_len, DMA_FROM_DEVICE); - dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); - kfree(sport->rx_ring.buf); - sport->rx_ring.tail = 0; - sport->rx_ring.head = 0; - sport->dma_rx_desc = NULL; - sport->dma_rx_cookie = -EINVAL; + sport->dma_rx_buf_bus = 0; + sport->dma_rx_buf_virt = NULL; +} + +static inline void lpuart_prepare_rx(struct lpuart_port *sport) +{ + unsigned long flags; + unsigned int temp; + unsigned int crdma; + + spin_lock_irqsave(&sport->port.lock, flags); + + sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout; + add_timer(&sport->lpuart_timer); + + lpuart_dma_rx(sport); + if (sport->lpuart32) { + temp = lpuart32_read(sport->port.membase + UARTCTRL); + temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE); + lpuart32_write(temp, sport->port.membase + UARTCTRL); + crdma = lpuart32_read(sport->port.membase + UARTBAUD); + lpuart32_write(crdma | UARTBAUD_RDMAE, + sport->port.membase + UARTBAUD); + } else { + temp = readb(sport->port.membase + UARTCR5); + writeb(temp | UARTCR5_RDMAS, sport->port.membase + UARTCR5); + } + + spin_unlock_irqrestore(&sport->port.lock, flags); } static int lpuart_config_rs485(struct uart_port *port, @@ -1006,11 +1017,15 @@ static unsigned int lpuart32_get_mctrl(struct uart_port *port) if (reg & UARTMODIR_RXRTSE) temp |= TIOCM_RTS; + if (lpuart32_read(port->membase + UARTCTRL) & UARTCTRL_LOOPS) + temp |= TIOCM_LOOP; + return temp; } static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl) { + /* No flow control for user handle */ unsigned char temp; struct lpuart_port *sport = container_of(port, struct lpuart_port, port); @@ -1034,14 +1049,10 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl) { unsigned long temp; - temp = lpuart32_read(port->membase + UARTMODIR) & - ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE); - - if (mctrl & TIOCM_RTS) - temp |= UARTMODIR_RXRTSE; - - if (mctrl & TIOCM_CTS) - temp |= UARTMODIR_TXCTSE; + temp = lpuart32_read(port->membase + UARTCTRL); + if (mctrl & TIOCM_LOOP) + temp |= UARTCTRL_LOOPS; + lpuart32_write(temp, port->membase + UARTCTRL); } static void lpuart_break_ctl(struct uart_port *port, int break_state) @@ -1115,22 +1126,74 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport) val = lpuart32_read(sport->port.membase + UARTFIFO); val |= UARTFIFO_TXFE | UARTFIFO_RXFE; val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; + val &= ~(UARTFIFO_RXIDEN_MASK << UARTFIFO_RXIDEN_OFF); + val |= ((UARTFIFO_RXIDEN_RDRF & UARTFIFO_RXIDEN_MASK) << + UARTFIFO_RXIDEN_OFF); lpuart32_write(val, sport->port.membase + UARTFIFO); /* set the watermark */ - val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF); + if (uart_console(&sport->port)) { + val = (0x1 << UARTWATER_RXWATER_OFF) | + (0x0 << UARTWATER_TXWATER_OFF); + } else { + val = lpuart32_read(sport->port.membase + UARTMODIR); + val = sport->rxfifo_watermark << UARTMODIR_RTSWATER_S; + lpuart32_write(val, sport->port.membase + UARTMODIR); + val = (0x1 << UARTWATER_RXWATER_OFF) | + (sport->txfifo_watermark << UARTWATER_TXWATER_OFF); + } lpuart32_write(val, sport->port.membase + UARTWATER); /* Restore cr2 */ lpuart32_write(ctrl_saved, sport->port.membase + UARTCTRL); } -static void rx_dma_timer_init(struct lpuart_port *sport) +static int lpuart_dma_rx_request(struct uart_port *port) { - setup_timer(&sport->lpuart_timer, lpuart_timer_func, - (unsigned long)sport); - sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout; - add_timer(&sport->lpuart_timer); + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + struct dma_slave_config dma_rx_sconfig; + dma_addr_t dma_bus; + unsigned char *dma_buf; + int ret; + + dma_buf = devm_kzalloc(sport->port.dev, + FSL_UART_RX_DMA_BUFFER_SIZE, GFP_KERNEL); + + if (!dma_buf) { + dev_err(sport->port.dev, "Dma rx alloc failed\n"); + return -ENOMEM; + } + + dma_bus = dma_map_single(sport->port.dev, dma_buf, + FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + + if (dma_mapping_error(sport->port.dev, dma_bus)) { + dev_err(sport->port.dev, "dma_map_single rx failed\n"); + return -ENOMEM; + } + + if (sport->lpuart32) + dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDATA; + else + dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR; + + dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_rx_sconfig.src_maxburst = 1; + dma_rx_sconfig.direction = DMA_DEV_TO_MEM; + ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig); + + if (ret < 0) { + dev_err(sport->port.dev, + "Dma slave config failed, err = %d\n", ret); + return ret; + } + + sport->dma_rx_buf_virt = dma_buf; + sport->dma_rx_buf_bus = dma_bus; + sport->dma_rx_in_progress = false; + + return 0; } static int lpuart_startup(struct uart_port *port) @@ -1159,6 +1222,24 @@ static int lpuart_startup(struct uart_port *port) sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK) + 1); + sport->rxdma_len = FSL_UART_RX_DMA_BUFFER_SIZE; + + if (sport->dma_rx_chan && !lpuart_dma_rx_request(port)) { + sport->lpuart_dma_rx_use = true; + setup_timer(&sport->lpuart_timer, lpuart_timer_func, + (unsigned long)sport); + } else + sport->lpuart_dma_rx_use = false; + + + if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) { + init_waitqueue_head(&sport->dma_wait); + sport->lpuart_dma_tx_use = true; + temp = readb(port->membase + UARTCR5); + temp &= ~UARTCR5_RDMAS; + writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5); + } else + sport->lpuart_dma_tx_use = false; ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0, DRIVER_NAME, sport); @@ -1170,42 +1251,28 @@ static int lpuart_startup(struct uart_port *port) lpuart_setup_watermark(sport); temp = readb(sport->port.membase + UARTCR2); - temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE); + temp |= (UARTCR2_RIE | UARTCR2_RE | UARTCR2_TE); writeb(temp, sport->port.membase + UARTCR2); - if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) { - /* set Rx DMA timeout */ - sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT); - if (!sport->dma_rx_timeout) - sport->dma_rx_timeout = 1; - - sport->lpuart_dma_rx_use = true; - rx_dma_timer_init(sport); - } else { - sport->lpuart_dma_rx_use = false; - } - - if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) { - init_waitqueue_head(&sport->dma_wait); - sport->lpuart_dma_tx_use = true; - temp = readb(port->membase + UARTCR5); - writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5); - } else { - sport->lpuart_dma_tx_use = false; - } - spin_unlock_irqrestore(&sport->port.lock, flags); - return 0; } static int lpuart32_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + struct tty_port *tty_port = &sport->port.state->port; int ret; unsigned long flags; unsigned long temp; + /* some modem may need reset */ + if (!tty_port_suspended(tty_port)) { + ret = device_reset(sport->port.dev); + if (ret && ret != -ENOENT) + return ret; + } + ret = clk_prepare_enable(sport->ipg_clk); if (ret) return ret; @@ -1219,10 +1286,33 @@ static int lpuart32_startup(struct uart_port *port) temp = lpuart32_read(sport->port.membase + UARTFIFO); sport->txfifo_size = 0x1 << (((temp >> UARTFIFO_TXSIZE_OFF) & - UARTFIFO_FIFOSIZE_MASK) - 1); + UARTFIFO_FIFOSIZE_MASK) + 1); + + sport->port.fifosize = sport->txfifo_size; sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) & - UARTFIFO_FIFOSIZE_MASK) - 1); + UARTFIFO_FIFOSIZE_MASK) + 1); + + sport->txfifo_watermark = sport->txfifo_size >> 1; + sport->rxfifo_watermark = sport->rxfifo_size >> 1; + sport->rxdma_len = sport->rxfifo_watermark; + + if (sport->dma_rx_chan && !lpuart_dma_rx_request(port)) { + sport->lpuart_dma_rx_use = true; + setup_timer(&sport->lpuart_timer, lpuart_timer_func, + (unsigned long)sport); + } else + sport->lpuart_dma_rx_use = false; + + + if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) { + init_waitqueue_head(&sport->dma_wait); + sport->lpuart_dma_tx_use = true; + temp = lpuart32_read(sport->port.membase + UARTBAUD); + temp |= UARTBAUD_TDMAE; + lpuart32_write(temp, sport->port.membase + UARTBAUD); + } else + sport->lpuart_dma_tx_use = false; ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0, DRIVER_NAME, sport); @@ -1230,9 +1320,13 @@ static int lpuart32_startup(struct uart_port *port) return ret; spin_lock_irqsave(&sport->port.lock, flags); + + lpuart32_setup_watermark(sport); + temp = lpuart32_read(sport->port.membase + UARTCTRL); - temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | UARTCTRL_TE); + temp |= (UARTCTRL_RIE | UARTCTRL_RE | UARTCTRL_TE); temp |= UARTCTRL_ILIE; + temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF; lpuart32_write(temp, sport->port.membase + UARTCTRL); spin_unlock_irqrestore(&sport->port.lock, flags); @@ -1244,6 +1338,7 @@ static void lpuart_shutdown(struct uart_port *port) struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned char temp; unsigned long flags; + int ret; spin_lock_irqsave(&port->lock, flags); @@ -1258,18 +1353,23 @@ static void lpuart_shutdown(struct uart_port *port) devm_free_irq(port->dev, port->irq, sport); if (sport->lpuart_dma_rx_use) { - del_timer_sync(&sport->lpuart_timer); + ret = wait_event_interruptible_timeout(sport->dma_wait, + !sport->dma_rx_in_progress, msecs_to_jiffies(1)); + if (ret <= 0) { + sport->dma_rx_in_progress = false; + dmaengine_terminate_all(sport->dma_rx_chan); + } lpuart_dma_rx_free(&sport->port); + del_timer_sync(&sport->lpuart_timer); } if (sport->lpuart_dma_tx_use) { - if (wait_event_interruptible(sport->dma_wait, - !sport->dma_tx_in_progress) != false) { + ret = wait_event_interruptible_timeout(sport->dma_wait, + !sport->dma_tx_in_progress, msecs_to_jiffies(1)); + if (ret <= 0) { sport->dma_tx_in_progress = false; dmaengine_terminate_all(sport->dma_tx_chan); } - - lpuart_stop_tx(port); } clk_disable_unprepare(sport->per_clk); clk_disable_unprepare(sport->ipg_clk); @@ -1280,19 +1380,46 @@ static void lpuart32_shutdown(struct uart_port *port) struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned long temp; unsigned long flags; + int ret; spin_lock_irqsave(&port->lock, flags); + /* disable Rx/Tx DMA */ + temp = lpuart32_read(sport->port.membase + UARTBAUD); + temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE); + lpuart32_write(temp, sport->port.membase + UARTBAUD); + /* disable Rx/Tx and interrupts */ temp = lpuart32_read(port->membase + UARTCTRL); - temp &= ~(UARTCTRL_TE | UARTCTRL_RE | - UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE); + temp &= ~(UARTCTRL_TE | UARTCTRL_RE | UARTCTRL_TIE | + UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_ILIE | + UARTCTRL_LOOPS); lpuart32_write(temp, port->membase + UARTCTRL); + lpuart32_write(0, sport->port.membase + UARTMODIR); spin_unlock_irqrestore(&port->lock, flags); devm_free_irq(port->dev, port->irq, sport); + if (sport->lpuart_dma_rx_use) { + ret = wait_event_interruptible_timeout(sport->dma_wait, + !sport->dma_rx_in_progress, msecs_to_jiffies(1)); + if (ret <= 0) { + sport->dma_rx_in_progress = false; + dmaengine_terminate_all(sport->dma_rx_chan); + } + lpuart_dma_rx_free(&sport->port); + del_timer_sync(&sport->lpuart_timer); + } + + if (sport->lpuart_dma_tx_use) { + ret = wait_event_interruptible_timeout(sport->dma_wait, + !sport->dma_tx_in_progress, msecs_to_jiffies(1)); + if (ret <= 0) { + sport->dma_tx_in_progress = false; + dmaengine_terminate_all(sport->dma_tx_chan); + } + } clk_disable_unprepare(sport->per_clk); clk_disable_unprepare(sport->ipg_clk); } @@ -1408,6 +1535,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); + if (sport->lpuart_dma_rx_use) { + /* Calculate delay for 1.5 DMA buffers */ + sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) * + FSL_UART_RX_DMA_BUFFER_SIZE * 3 / + sport->rxfifo_size / 2; + dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n", + sport->dma_rx_timeout * 1000 / HZ, sport->port.timeout); + if (sport->dma_rx_timeout < msecs_to_jiffies(20)) + sport->dma_rx_timeout = msecs_to_jiffies(20); + } + /* wait transmit engin complete */ while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC)) barrier(); @@ -1432,25 +1570,60 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, /* restore control register */ writeb(old_cr2, sport->port.membase + UARTCR2); - /* - * If new baud rate is set, we will also need to update the Ring buffer - * length according to the selected baud rate and restart Rx DMA path. - */ - if (old) { - if (sport->lpuart_dma_rx_use) { - del_timer_sync(&sport->lpuart_timer); - lpuart_dma_rx_free(&sport->port); + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static void +lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate) +{ + u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp; + u32 clk = sport->port.uartclk; + + baud_diff = baudrate; + osr = 0; + sbr = 0; + for (tmp_osr = 4; tmp_osr <= 32; tmp_osr++) { + tmp_sbr = (clk / (baudrate * tmp_osr)); + if (tmp_sbr == 0) + tmp_sbr = 1; + + /*calculate difference in actual buad w/ current values */ + tmp_diff = (clk / (tmp_osr * tmp_sbr)); + tmp_diff = tmp_diff - baudrate; + + /* select best values between sbr and sbr+1 */ + if (tmp_diff > (baudrate - (clk / (tmp_osr * (tmp_sbr + 1))))) { + tmp_diff = baudrate - (clk / (tmp_osr * (tmp_sbr + 1))); + tmp_sbr++; } - if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) { - sport->lpuart_dma_rx_use = true; - rx_dma_timer_init(sport); - } else { - sport->lpuart_dma_rx_use = false; + if (tmp_diff <= baud_diff) { + baud_diff = tmp_diff; + osr = tmp_osr; + sbr = tmp_sbr; } } - spin_unlock_irqrestore(&sport->port.lock, flags); + /*TODO handle buadrate outside acceptable rate + * if (baudDiff > ((config->baudRate_Bps / 100) * 3)) + * { + * Unacceptable baud rate difference of more than 3% + * return kStatus_LPUART_BaudrateNotSupport; + * } + * + */ + tmp = lpuart32_read(sport->port.membase + UARTBAUD); + + if ((osr > 3) && (osr < 8)) + tmp |= UARTBAUD_BOTHEDGE; + + tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT); + tmp |= (((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT); + + tmp &= ~UARTBAUD_SBR_MASK; + tmp |= sbr & UARTBAUD_SBR_MASK; + + lpuart32_write(tmp, sport->port.membase + UARTBAUD); } static void @@ -1462,7 +1635,6 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, unsigned long ctrl, old_ctrl, bd, modem; unsigned int baud; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; - unsigned int sbr, osr; ctrl = old_ctrl = lpuart32_read(sport->port.membase + UARTCTRL); bd = lpuart32_read(sport->port.membase + UARTBAUD); @@ -1502,7 +1674,9 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, } if (termios->c_cflag & CSTOPB) - termios->c_cflag &= ~CSTOPB; + bd |= UARTBAUD_SBNS; + else + bd &= ~UARTBAUD_SBNS; /* parity must be enabled when CS7 to match 8-bits format */ if ((termios->c_cflag & CSIZE) == CS7) @@ -1524,7 +1698,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, } /* ask the core to calculate the divisor */ - baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); spin_lock_irqsave(&sport->port.lock, flags); @@ -1551,7 +1725,19 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); - /* wait transmit engin complete */ + if (sport->lpuart_dma_rx_use) { + /* Calculate delay for 1.5 DMA buffers */ + sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) * + sport->rxdma_len * 3 / + sport->rxfifo_size / 2; + dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n", + sport->dma_rx_timeout * 1000 / HZ, sport->port.timeout); + if (sport->dma_rx_timeout < msecs_to_jiffies(20)) + sport->dma_rx_timeout = msecs_to_jiffies(20); + } + + /* wait transmit engin complete, there disable flow control */ + lpuart32_write(0, sport->port.membase + UARTMODIR); while (!(lpuart32_read(sport->port.membase + UARTSTAT) & UARTSTAT_TC)) barrier(); @@ -1559,18 +1745,10 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, lpuart32_write(old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), sport->port.membase + UARTCTRL); - /* For i.MX8QM LPUART */ - osr = (bd >> UARTBAUD_OSR_SHIFT) & UARTBAUD_OSR_MASK; - sbr = sport->port.uartclk / (baud * (osr + 1)); - - bd &= ~UARTBAUD_SBR_MASK; - bd |= sbr & UARTBAUD_SBR_MASK; - bd |= UARTBAUD_BOTHEDGE; - bd &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE); lpuart32_write(bd, sport->port.membase + UARTBAUD); + lpuart32_serial_setbrg(sport, baud); lpuart32_write(modem, sport->port.membase + UARTMODIR); lpuart32_write(ctrl, sport->port.membase + UARTCTRL); - /* restore control register */ spin_unlock_irqrestore(&sport->port.lock, flags); } @@ -1705,6 +1883,7 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count) cr |= (UARTCTRL_TE | UARTCTRL_RE); cr &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE); lpuart32_write(cr, sport->port.membase + UARTCTRL); + uart_console_write(&sport->port, s, count, lpuart32_console_putchar); /* wait for transmitter finish complete and restore CR2 */ @@ -1775,7 +1954,7 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud, int *parity, int *bits) { unsigned long cr, bd; - unsigned int sbr, uartclk, baud_raw; + unsigned int sbr, osr, uartclk, baud_raw; cr = lpuart32_read(sport->port.membase + UARTCTRL); cr &= UARTCTRL_TE | UARTCTRL_RE; @@ -1802,16 +1981,13 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud, bd = lpuart32_read(sport->port.membase + UARTBAUD); bd &= UARTBAUD_SBR_MASK; sbr = bd; + osr = (bd >> UARTBAUD_OSR_SHIFT) & UARTBAUD_OSR_MASK; if (sport->per_clk) uartclk = clk_get_rate(sport->per_clk); else uartclk = clk_get_rate(sport->ipg_clk); - /* - * baud = mod_clk/(16*(sbr[13]+(brfa)/32) - */ - baud_raw = uartclk / (16 * sbr); - + baud_raw = uartclk / ((osr + 1) * sbr); if (*baud != baud_raw) printk(KERN_INFO "Serial: Console lpuart rounded baud rate" "from %d to %d\n", baud_raw, *baud); @@ -1860,8 +2036,7 @@ static int __init lpuart_console_setup(struct console *co, char *options) else lpuart_setup_watermark(sport); - uart_set_options(&sport->port, co, baud, parity, bits, flow); - return 0; + return uart_set_options(&sport->port, co, baud, parity, bits, flow); } static struct uart_driver lpuart_reg; @@ -1995,7 +2170,8 @@ static int lpuart_probe(struct platform_device *pdev) sport->port.ops = &lpuart_pops; sport->port.flags = UPF_BOOT_AUTOCONF; - sport->port.rs485_config = lpuart_config_rs485; + if (!sport->lpuart32) + sport->port.rs485_config = lpuart_config_rs485; sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(sport->ipg_clk)) { @@ -2039,7 +2215,16 @@ static int lpuart_probe(struct platform_device *pdev) return ret; } - if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) { + sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); + if (!sport->dma_tx_chan) + dev_info(sport->port.dev, "NO DMA tx channel, run at cpu mode\n"); + + sport->dma_rx_chan = dma_request_slave_channel(sport->port.dev, "rx"); + if (!sport->dma_rx_chan) + dev_info(sport->port.dev, "NO DMA rx channel, run at cpu mode\n"); + + if (!sport->lpuart32 && + of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) { sport->port.rs485.flags |= SER_RS485_ENABLED; sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND; writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM); @@ -2069,6 +2254,7 @@ static int lpuart_remove(struct platform_device *pdev) static int lpuart_suspend(struct device *dev) { struct lpuart_port *sport = dev_get_drvdata(dev); + struct tty_port *port = &sport->port.state->port; unsigned long temp; int ret; @@ -2076,87 +2262,169 @@ static int lpuart_suspend(struct device *dev) if (ret) return ret; + uart_suspend_port(&lpuart_reg, &sport->port); + if (sport->lpuart32) { /* disable Rx/Tx and interrupts */ temp = lpuart32_read(sport->port.membase + UARTCTRL); temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE); + if (!console_suspend_enabled && uart_console(&sport->port) && + !sport->port.irq_wake) + temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE); lpuart32_write(temp, sport->port.membase + UARTCTRL); } else { /* disable Rx/Tx and interrupts */ temp = readb(sport->port.membase + UARTCR2); temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE); + if (!console_suspend_enabled && uart_console(&sport->port) && + !sport->port.irq_wake) + temp &= ~UARTCR2_RIE; writeb(temp, sport->port.membase + UARTCR2); } - uart_suspend_port(&lpuart_reg, &sport->port); - - if (sport->lpuart_dma_rx_use) { - /* - * EDMA driver during suspend will forcefully release any - * non-idle DMA channels. If port wakeup is enabled or if port - * is console port or 'no_console_suspend' is set the Rx DMA - * cannot resume as as expected, hence gracefully release the - * Rx DMA path before suspend and start Rx DMA path on resume. - */ - if (sport->port.irq_wake) { - del_timer_sync(&sport->lpuart_timer); - lpuart_dma_rx_free(&sport->port); - } - - /* Disable Rx DMA to use UART port as wakeup source */ - writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_RDMAS, - sport->port.membase + UARTCR5); + if (sport->lpuart_dma_rx_use && sport->port.irq_wake && + tty_port_initialized(port)) { + /* it needs uart interrupt as wakeup source in low power mode */ + lpuart_dma_stop(sport); + dmaengine_terminate_all(sport->dma_rx_chan); + del_timer_sync(&sport->lpuart_timer); + lpuart_dma_rx_free(&sport->port); } - if (sport->lpuart_dma_tx_use) { + if (sport->lpuart_dma_tx_use && sport->port.irq_wake && + tty_port_initialized(port)) { + if (sport->lpuart32) { + temp = lpuart32_read(sport->port.membase + UARTBAUD); + temp &= ~UARTBAUD_TDMAE; + lpuart32_write(temp, sport->port.membase + UARTBAUD); + } else { + temp = readb(sport->port.membase + UARTCR5); + temp &= ~UARTCR5_TDMAS; + writeb(temp, sport->port.membase + UARTCR5); + } sport->dma_tx_in_progress = false; dmaengine_terminate_all(sport->dma_tx_chan); } clk_disable_unprepare(sport->ipg_clk); + pinctrl_pm_select_sleep_state(dev); return 0; } -static int lpuart_resume(struct device *dev) +static inline void lpuart32_resume_init(struct lpuart_port *sport) { - struct lpuart_port *sport = dev_get_drvdata(dev); + struct tty_port *port = &sport->port.state->port; unsigned long temp; - clk_prepare_enable(sport->ipg_clk); + lpuart32_setup_watermark(sport); + + temp = lpuart32_read(sport->port.membase + UARTCTRL); + temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | + UARTCTRL_TE | UARTCTRL_ILIE); + + if (sport->dma_rx_chan) + temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE); + + if (sport->dma_tx_chan) + temp &= ~(UARTCTRL_TIE | UARTCTRL_TE); + + lpuart32_write(temp, sport->port.membase + UARTCTRL); + + if (sport->lpuart_dma_rx_use && sport->port.irq_wake && + tty_port_initialized(port)) { + if (!lpuart_dma_rx_request(&sport->port)) { + sport->lpuart_dma_rx_use = true; + setup_timer(&sport->lpuart_timer, lpuart_timer_func, + (unsigned long)sport); + } else { + sport->lpuart_dma_rx_use = false; + } - if (sport->lpuart32) { - lpuart32_setup_watermark(sport); temp = lpuart32_read(sport->port.membase + UARTCTRL); - temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | - UARTCTRL_TE | UARTCTRL_ILIE); + temp |= (UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE); + temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF; lpuart32_write(temp, sport->port.membase + UARTCTRL); - } else { - lpuart_setup_watermark(sport); - temp = readb(sport->port.membase + UARTCR2); - temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE); - writeb(temp, sport->port.membase + UARTCR2); } - if (sport->lpuart_dma_rx_use) { - if (sport->port.irq_wake) { - if (!lpuart_start_rx_dma(sport)) { - sport->lpuart_dma_rx_use = true; - rx_dma_timer_init(sport); - } else { - sport->lpuart_dma_rx_use = false; - } + if (sport->lpuart_dma_tx_use && sport->port.irq_wake && + tty_port_initialized(port)) { + if (!lpuart_dma_tx_request(&sport->port)) { + init_waitqueue_head(&sport->dma_wait); + temp = lpuart32_read(sport->port.membase + UARTBAUD); + temp |= UARTBAUD_TDMAE; + lpuart32_write(temp, sport->port.membase + UARTBAUD); + } else { + sport->lpuart_dma_tx_use = false; + } + + temp = lpuart32_read(sport->port.membase + UARTCTRL); + temp |= UARTCTRL_TE; + lpuart32_write(temp, sport->port.membase + UARTCTRL); + } +} + +static inline void lpuart_resume_init(struct lpuart_port *sport) +{ + struct tty_port *port = &sport->port.state->port; + unsigned char temp; + + lpuart_setup_watermark(sport); + temp = readb(sport->port.membase + UARTCR2); + temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE); + + if (sport->dma_rx_chan) + temp &= ~(UARTCR2_RIE | UARTCR2_RE); + + if (sport->dma_tx_chan) + temp &= ~(UARTCR2_TIE | UARTCR2_TE); + + writeb(temp, sport->port.membase + UARTCR2); + + if (sport->lpuart_dma_rx_use && sport->port.irq_wake && + tty_port_initialized(port)) { + if (!lpuart_dma_rx_request(&sport->port)) { + sport->lpuart_dma_rx_use = true; + setup_timer(&sport->lpuart_timer, + lpuart_timer_func, + (unsigned long)sport); + } else { + sport->lpuart_dma_rx_use = false; } + + temp = readb(sport->port.membase + UARTCR2); + temp |= (UARTCR2_RIE | UARTCR2_RE); + writeb(temp, sport->port.membase + UARTCR2); } - if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) { + if (sport->lpuart_dma_tx_use && sport->port.irq_wake && + tty_port_initialized(port)) { + if (!lpuart_dma_tx_request(&sport->port)) { init_waitqueue_head(&sport->dma_wait); - sport->lpuart_dma_tx_use = true; - writeb(readb(sport->port.membase + UARTCR5) | - UARTCR5_TDMAS, sport->port.membase + UARTCR5); - } else { - sport->lpuart_dma_tx_use = false; + temp = readb(sport->port.membase + UARTCR5); + temp |= UARTCR5_TDMAS; + writeb(temp, sport->port.membase + UARTCR5); + } else { + sport->lpuart_dma_tx_use = false; + } + + temp = readb(sport->port.membase + UARTCR2); + temp |= UARTCR2_TE; + writeb(temp, sport->port.membase + UARTCR2); } +} + +static int lpuart_resume(struct device *dev) +{ + struct lpuart_port *sport = dev_get_drvdata(dev); + + pinctrl_pm_select_default_state(dev); + clk_prepare_enable(sport->ipg_clk); + + if (sport->lpuart32) + lpuart32_resume_init(sport); + else + lpuart_resume_init(sport); uart_resume_port(&lpuart_reg, &sport->port); |