diff options
Diffstat (limited to 'drivers/mtd/nand/lba')
-rw-r--r-- | drivers/mtd/nand/lba/Makefile | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/lba/gpmi-transport.c | 832 | ||||
-rw-r--r-- | drivers/mtd/nand/lba/gpmi.h | 103 | ||||
-rw-r--r-- | drivers/mtd/nand/lba/lba-blk.c | 345 | ||||
-rw-r--r-- | drivers/mtd/nand/lba/lba-core.c | 619 | ||||
-rw-r--r-- | drivers/mtd/nand/lba/lba.h | 140 |
6 files changed, 2041 insertions, 0 deletions
diff --git a/drivers/mtd/nand/lba/Makefile b/drivers/mtd/nand/lba/Makefile new file mode 100644 index 000000000000..0e576bfa856d --- /dev/null +++ b/drivers/mtd/nand/lba/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MTD_NAND_GPMI_LBA) += gpmi_lba.o +gpmi_lba-objs += gpmi-transport.o lba-core.o lba-blk.o diff --git a/drivers/mtd/nand/lba/gpmi-transport.c b/drivers/mtd/nand/lba/gpmi-transport.c new file mode 100644 index 000000000000..0073842db6b9 --- /dev/null +++ b/drivers/mtd/nand/lba/gpmi-transport.c @@ -0,0 +1,832 @@ +/* + * Freescale STMP37XX/STMP378X GPMI transport layer for LBA driver + * + * Author: Dmitrij Frasenyak <sed@embeddedalley.com> + * Clock settings and hw init comes from + * gpmi driver by Dmitry Pervushin. + * + * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2009 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/ctype.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <asm/div64.h> +#include <mach/platform.h> +#include <mach/regs-apbh.h> +#include <mach/regs-gpmi.h> +#include <mach/stmp3xxx.h> +#include <mach/dma.h> +#include "gpmi.h" +#include "lba.h" + +struct lba_data *g_data; +static int max_chips = 1; +static long clk = -1; + +struct gpmi_nand_timing gpmi_safe_timing = { + .address_setup = 25, + .data_setup = 80, + .data_hold = 60, + .dsample_time = 6, +}; + +/****************************************************************************** + * HW init part + ******************************************************************************/ + +/** + * gpmi_irq - IRQ handler + * + * @irq: irq no + * @context: IRQ context, pointer to gpmi_nand_data + */ +static irqreturn_t gpmi_irq(int irq, void *context) +{ + struct lba_data *data = context; + int i; + + for (i = 0; i < max_chips; i++) { + if (stmp3xxx_dma_is_interrupt(data->nand[i].dma_ch)) { + stmp3xxx_dma_clear_interrupt(data->nand[i].dma_ch); + complete(&data->nand[i].done); + } + + } + stmp3xxx_clearl(BM_GPMI_CTRL1_DEV_IRQ | BM_GPMI_CTRL1_TIMEOUT_IRQ, + REGS_GPMI_BASE + HW_GPMI_CTRL1); + return IRQ_HANDLED; +} + +static inline u32 gpmi_cycles_ceil(u32 ntime, u32 period) +{ + int k; + + k = (ntime + period - 1) / period; + if (k == 0) + k++; + return k ; +} + + +/** + * gpmi_set_timings - set GPMI timings + * @pdev: pointer to GPMI platform device + * @tm: pointer to structure &gpmi_nand_timing with new timings + * + * During initialization, GPMI uses safe sub-optimal timings, which + * can be changed after reading boot control blocks + */ +void gpmi_set_timings(struct lba_data *data, struct gpmi_nand_timing *tm) +{ + u32 period_ns = 1000000 / clk_get_rate(data->clk) + 1; + u32 address_cycles, data_setup_cycles; + u32 data_hold_cycles, data_sample_cycles; + u32 busy_timeout; + u32 t0, reg; + + address_cycles = gpmi_cycles_ceil(tm->address_setup, period_ns); + data_setup_cycles = gpmi_cycles_ceil(tm->data_setup, period_ns); + data_hold_cycles = gpmi_cycles_ceil(tm->data_hold, period_ns); + data_sample_cycles = gpmi_cycles_ceil(tm->dsample_time + period_ns / 4, + period_ns / 2); + busy_timeout = gpmi_cycles_ceil(10000000 / 4096, period_ns); + + t0 = address_cycles << BP_GPMI_TIMING0_ADDRESS_SETUP; + t0 |= data_setup_cycles << BP_GPMI_TIMING0_DATA_SETUP; + t0 |= data_hold_cycles << BP_GPMI_TIMING0_DATA_HOLD; + __raw_writel(t0, REGS_GPMI_BASE + HW_GPMI_TIMING0); + + __raw_writel(busy_timeout, REGS_GPMI_BASE + HW_GPMI_TIMING1); + + reg = __raw_readl(REGS_GPMI_BASE + HW_GPMI_CTRL1); +#ifdef CONFIG_ARCH_STMP378X + reg &= ~BM_GPMI_CTRL1_RDN_DELAY; + reg |= data_sample_cycles << BP_GPMI_CTRL1_RDN_DELAY; +#else + reg &= ~BM_GPMI_CTRL1_DSAMPLE_TIME; + reg |= data_sample_cycles << BP_GPMI_CTRL1_DSAMPLE_TIME; +#endif + __raw_writel(reg, REGS_GPMI_BASE + HW_GPMI_CTRL1); +} + +void queue_plug(struct lba_data *data) +{ + u32 ctrl1; + clk_enable(data->clk); + if (clk <= 0) + clk = 24000; /* safe setting, some chips do not work on + speeds >= 24kHz */ + clk_set_rate(data->clk, clk); + + clk = clk_get_rate(data->clk); + + + stmp3xxx_reset_block(HW_GPMI_CTRL0 + REGS_GPMI_BASE, 1); + + ctrl1 = __raw_readl(REGS_GPMI_BASE + HW_GPMI_CTRL1); + + /* write protection OFF */ + ctrl1 |= BM_GPMI_CTRL1_DEV_RESET; + + /* IRQ polarity */ + ctrl1 |= BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY; + + /* ...and ECC module */ + /*HW_GPMI_CTRL1_SET(bch_mode());*/ + + /* choose NAND mode (1 means ATA, 0 - NAND */ + ctrl1 &= ~BM_GPMI_CTRL1_GPMI_MODE; + + __raw_writel(ctrl1, REGS_GPMI_BASE + HW_GPMI_CTRL1); + + gpmi_set_timings(data, &gpmi_safe_timing); +} + +void queue_release(struct lba_data *data) +{ + stmp3xxx_setl(BM_GPMI_CTRL0_SFTRST, REGS_GPMI_BASE + HW_GPMI_CTRL0); + + clk_disable(data->clk); +} + + +/** + * gpmi_init_hw - initialize the hardware + * @pdev: pointer to platform device + * + * Initialize GPMI hardware and set default (safe) timings for NAND access. + * Returns error code or 0 on success + */ +static int gpmi_init_hw(struct platform_device *pdev, int request_pins) +{ + struct lba_data *data = platform_get_drvdata(pdev); + struct gpmi_platform_data *gpd = + (struct gpmi_platform_data *)pdev->dev.platform_data; + int err = 0; + + data->clk = clk_get(NULL, "gpmi"); + if (IS_ERR(data->clk)) { + err = PTR_ERR(data->clk); + dev_err(&pdev->dev, "cannot set failsafe clockrate\n"); + goto out; + } + if (request_pins) + gpd->pinmux(1); + + queue_plug(data); + +out: + return err; +} +/** + * gpmi_release_hw - free the hardware + * @pdev: pointer to platform device + * + * In opposite to gpmi_init_hw, release all acquired resources + */ +static void gpmi_release_hw(struct platform_device *pdev) +{ + struct gpmi_platform_data *gpd = + (struct gpmi_platform_data *)pdev->dev.platform_data; + struct lba_data *data = platform_get_drvdata(pdev); + + queue_release(data); + clk_put(data->clk); + gpd->pinmux(0); +} + + +/** + * gpmi_alloc_buffers - allocate DMA buffers for one chip + * + * @pdev: GPMI platform device + * @g: pointer to structure associated with NAND chip + * + * Allocate buffer using dma_alloc_coherent + */ +static int gpmi_alloc_buffers(struct platform_device *pdev, + struct gpmi_perchip_data *g) +{ + g->cmd_buffer = dma_alloc_coherent(&pdev->dev, + g->cmd_buffer_size, + &g->cmd_buffer_handle, GFP_DMA); + if (!g->cmd_buffer) + goto out1; + + g->write_buffer = dma_alloc_coherent(&pdev->dev, + g->write_buffer_size, + &g->write_buffer_handle, GFP_DMA); + if (!g->write_buffer) + goto out2; + + g->data_buffer = dma_alloc_coherent(&pdev->dev, + g->data_buffer_size, + &g->data_buffer_handle, GFP_DMA); + if (!g->data_buffer) + goto out3; + + g->oob_buffer = dma_alloc_coherent(&pdev->dev, + g->oob_buffer_size, + &g->oob_buffer_handle, GFP_DMA); + if (!g->oob_buffer) + goto out4; + + g->cmdtail_buffer = dma_alloc_coherent(&pdev->dev, + g->cmdtail_buffer_size, + &g->cmdtail_buffer_handle, GFP_DMA); + if (!g->oob_buffer) + goto out5; + + return 0; + +out5: + dma_free_coherent(&pdev->dev, g->oob_buffer_size, + g->oob_buffer, g->oob_buffer_handle); + +out4: + dma_free_coherent(&pdev->dev, g->data_buffer_size, + g->data_buffer, g->data_buffer_handle); +out3: + dma_free_coherent(&pdev->dev, g->write_buffer_size, + g->write_buffer, g->write_buffer_handle); +out2: + dma_free_coherent(&pdev->dev, g->cmd_buffer_size, + g->cmd_buffer, g->cmd_buffer_handle); +out1: + return -ENOMEM; +} + +/** + * gpmi_free_buffers - free buffers allocated by gpmi_alloc_buffers + * + * @pdev: platform device + * @g: pointer to structure associated with NAND chip + * + * Deallocate buffers on exit + */ +static void gpmi_free_buffers(struct platform_device *pdev, + struct gpmi_perchip_data *g) +{ + dma_free_coherent(&pdev->dev, g->oob_buffer_size, + g->oob_buffer, g->oob_buffer_handle); + dma_free_coherent(&pdev->dev, g->write_buffer_size, + g->write_buffer, g->write_buffer_handle); + dma_free_coherent(&pdev->dev, g->cmd_buffer_size, + g->cmd_buffer, g->cmd_buffer_handle); + dma_free_coherent(&pdev->dev, g->cmdtail_buffer_size, + g->cmdtail_buffer, g->cmdtail_buffer_handle); + dma_free_coherent(&pdev->dev, g->data_buffer_size, + g->data_buffer, g->data_buffer_handle); +} + + +/****************************************************************************** + * Arch specific chain_* callbaks + ******************************************************************************/ + +/** + * chain_w4r - Initialize descriptor to perform W4R operation + * + * @chain: Descriptor to use + * @cs: CS for this operation + * + * Due to HW bug we have to put W4R into separate desc. + */ +static void chain_w4r(struct stmp3xxx_dma_descriptor *chain, int cs) +{ + chain->command->cmd = + (4 << BP_APBH_CHn_CMD_CMDWORDS) | + BM_APBH_CHn_CMD_WAIT4ENDCMD | + BM_APBH_CHn_CMD_NANDWAIT4READY | + BM_APBH_CHn_CMD_NANDLOCK | + BM_APBH_CHn_CMD_CHAIN | + (BV_APBH_CHn_CMD_COMMAND__NO_DMA_XFER << BP_APBH_CHn_CMD_COMMAND); + chain->command->pio_words[0] = + (BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY << BP_GPMI_CTRL0_COMMAND_MODE) | + BM_GPMI_CTRL0_WORD_LENGTH | + (BV_GPMI_CTRL0_ADDRESS__NAND_DATA << BP_GPMI_CTRL0_ADDRESS) | + (cs << BP_GPMI_CTRL0_CS); + chain->command->pio_words[1] = 0; + chain->command->pio_words[2] = 0; + chain->command->pio_words[3] = 0; + chain->command->buf_ptr = 0; +} + +/** + * chain_cmd - Initialize descriptor to push CMD to the bus + * + * @chain: Descriptor to use + * @cmd_handle: dma_addr_t pointer that holds the command + * @lba_cmd: flags and lenghth of this command. + * @cs: CS for this operation + * + * CLE || CLE+ALE + */ +static void chain_cmd(struct stmp3xxx_dma_descriptor *chain, + dma_addr_t cmd_handle, + struct lba_cmd *lba_cmd, + int cs) +{ + /* output command */ + chain->command->cmd = + (lba_cmd->len << BP_APBH_CHn_CMD_XFER_COUNT) | + (3 << BP_APBH_CHn_CMD_CMDWORDS) | + BM_APBH_CHn_CMD_WAIT4ENDCMD | + BM_APBH_CHn_CMD_NANDLOCK | + (BV_APBH_CHn_CMD_COMMAND__DMA_READ << BP_APBH_CHn_CMD_COMMAND); + chain->command->cmd |= BM_APBH_CHn_CMD_CHAIN; + chain->command->pio_words[0] = + (BV_GPMI_CTRL0_COMMAND_MODE__WRITE << BP_GPMI_CTRL0_COMMAND_MODE) | + BM_GPMI_CTRL0_WORD_LENGTH | + BM_GPMI_CTRL0_LOCK_CS | + (cs << BP_GPMI_CTRL0_CS) | + (BV_GPMI_CTRL0_ADDRESS__NAND_CLE << BP_GPMI_CTRL0_ADDRESS) | + (lba_cmd->len << BP_GPMI_CTRL0_XFER_COUNT); + chain->command->pio_words[1] = 0; + chain->command->pio_words[2] = 0; + chain->command->buf_ptr = cmd_handle; + + if (lba_cmd->flag & FE_CMD_INC) + chain->command->pio_words[0] |= BM_GPMI_CTRL0_ADDRESS_INCREMENT; +/*BUG if (lba_cmd->flag & FE_W4R) */ +/* chain->command->cmd |= BM_APBH_CHn_CMD_NANDWAIT4READY; */ +} + +/** + * chain_cmd - Initialize descriptor to read data from the bus + * + * @chain: Descriptor to use + * @data_handle: dma_addr_t pointer to buffer to store data + * @data_len: the size of the data buffer to read + * @cmd_handle: dma_addr_t pointer that holds the command + * @lba_cmd: flags and lenghth of this command. + * @cs: CS for this operation + */ +static void chain_read_data(struct stmp3xxx_dma_descriptor *chain, + dma_addr_t data_handle, + dma_addr_t data_len, + struct lba_cmd *lba_cmd, + int cs) +{ + chain->command->cmd = + (data_len << BP_APBH_CHn_CMD_XFER_COUNT) | + (4 << BP_APBH_CHn_CMD_CMDWORDS) | + BM_APBH_CHn_CMD_WAIT4ENDCMD | + BM_APBH_CHn_CMD_NANDLOCK | + BM_APBH_CHn_CMD_CHAIN | + (BV_APBH_CHn_CMD_COMMAND__DMA_WRITE << BP_APBH_CHn_CMD_COMMAND); + chain->command->pio_words[0] = + (BV_GPMI_CTRL0_COMMAND_MODE__READ << BP_GPMI_CTRL0_COMMAND_MODE) | + BM_GPMI_CTRL0_WORD_LENGTH | + BM_GPMI_CTRL0_LOCK_CS | + (cs << BP_GPMI_CTRL0_CS) | + (BV_GPMI_CTRL0_ADDRESS__NAND_DATA << BP_GPMI_CTRL0_ADDRESS) | + (data_len << BP_GPMI_CTRL0_XFER_COUNT); + + chain->command->pio_words[1] = 0; + chain->command->pio_words[2] = 0; + chain->command->pio_words[3] = 0; + chain->command->buf_ptr = data_handle; + + if (lba_cmd->flag & FE_CMD_INC) + chain->command->pio_words[0] |= BM_GPMI_CTRL0_ADDRESS_INCREMENT; +/*BUG if (lba_cmd->flag & FE_W4R) */ +/* chain->command->cmd |= BM_APBH_CHn_CMD_NANDWAIT4READY; */ + +} + +/** + * chain_cmd - Initialize descriptor to read data from the bus + * + * @chain: Descriptor to use + * @data_handle: dma_addr_t pointer to buffer to read data from + * @data_len: the size of the data buffer to write + * @cmd_handle: dma_addr_t pointer that holds the command + * @lba_cmd: flags and lenghth of this command. + * @cs: CS for this operation + */ +static void chain_write_data(struct stmp3xxx_dma_descriptor *chain, + dma_addr_t data_handle, + int data_len, + struct lba_cmd *lba_cmd, + int cs) +{ + + chain->command->cmd = + (data_len << BP_APBH_CHn_CMD_XFER_COUNT) | + (4 << BP_APBH_CHn_CMD_CMDWORDS) | + BM_APBH_CHn_CMD_WAIT4ENDCMD | + BM_APBH_CHn_CMD_NANDLOCK | + BM_APBH_CHn_CMD_CHAIN | + (BV_APBH_CHn_CMD_COMMAND__DMA_READ << BP_APBH_CHn_CMD_COMMAND); + chain->command->pio_words[0] = + (BV_GPMI_CTRL0_COMMAND_MODE__WRITE << BP_GPMI_CTRL0_COMMAND_MODE) | + BM_GPMI_CTRL0_WORD_LENGTH | + BM_GPMI_CTRL0_LOCK_CS | + (cs << BP_GPMI_CTRL0_CS) | + (BV_GPMI_CTRL0_ADDRESS__NAND_DATA << BP_GPMI_CTRL0_ADDRESS) | + (data_len << BP_GPMI_CTRL0_XFER_COUNT); + + chain->command->pio_words[1] = 0; + chain->command->pio_words[2] = 0; + chain->command->pio_words[3] = 0; + chain->command->buf_ptr = data_handle; + + if (lba_cmd->flag & FE_CMD_INC) + chain->command->pio_words[0] |= BM_GPMI_CTRL0_ADDRESS_INCREMENT; +/*BUG if (lba_cmd->flag & FE_W4R) */ +/* chain->command->cmd |= BM_APBH_CHn_CMD_NANDWAIT4READY; */ + +} + + +/****************************************************************************** + * Interface to arch independent part + ******************************************************************************/ +/** + * queue_cmd - Setup a chain of descriptors + * + * @priv: private data passed + * @cmd_buf: pointer to command buffer (to be removed) + * @cmd_handle: dma_addr_t pointer that holds the command + * @cmd_len: the size of the command buffer (to be removed) + * @data_handle: dma_addr_t pointer to a data buffer + * @data_len: the size of the data buffer + * @cmd_flags: commands flags + */ +int queue_cmd(void *priv, + uint8_t *cmd_buf, dma_addr_t cmd_handle, int cmd_len, + dma_addr_t data, int data_len, + struct lba_cmd *cmd_flags) +{ + + struct gpmi_perchip_data *g = priv; + unsigned long flags; + int idx; + int ret = 0; + struct stmp3xxx_dma_descriptor *chain ; + int i; + + if (!g || !(cmd_buf || cmd_handle)) + BUG(); + + spin_lock_irqsave(&g->lock, flags); + + /* Keep it for debug purpose */ + chain = g->d; + for (i = g->d_tail; i < GPMI_DMA_MAX_CHAIN; i++) { + chain[i].command->cmd = 0; + chain[i].command->buf_ptr = 0; + } + /* End */ + + if (!cmd_handle) { + if (!cmd_buf) + BUG(); + memcpy(g->cmd_buffer, cmd_buf, cmd_len); + cmd_handle = g->cmd_buffer_handle; + } + + idx = g->d_tail; + chain = &g->d[idx]; + + do { + if (!cmd_flags) + BUG(); + + if (cmd_flags->flag & FE_W4R) { + /* there seems to be HW BUG with W4R flag. + * IRQ controller hangs forever when it's combined + * with real operation. + */ + chain_w4r(chain, g->cs); + chain++; idx++; + } + + + switch (cmd_flags->flag & F_MASK) { + + case F_CMD: + chain_cmd(chain, cmd_handle, cmd_flags, g->cs); + break; + case F_DATA_READ: + chain_read_data(chain, data, data_len, + cmd_flags, g->cs); + break; + case F_DATA_WRITE: + chain_write_data(chain, data, data_len, + cmd_flags, g->cs); + break; + default:{ + if (cmd_flags->flag & FE_END) + goto out; + else{ + printk(KERN_ERR "uknown cmd\n"); + BUG(); + } + } + } + + + chain++; idx++; + cmd_handle += cmd_flags->len; + + if (idx >= GPMI_DMA_MAX_CHAIN) { + printk(KERN_ERR "to many chains; idx is 0x%x\n", idx); + BUG(); + } + + } while (!((cmd_flags++)->flag & FE_END)); + +out: + if (idx < GPMI_DMA_MAX_CHAIN) { + ret = idx; + g->d_tail = idx; + } + spin_unlock_irqrestore(g->lock, flags); + + return ret; + +} + +dma_addr_t queue_get_cmd_handle(void *priv) +{ + struct gpmi_perchip_data *g = priv; + return g->cmd_buffer_handle; +} + +uint8_t *queue_get_cmd_ptr(void *priv) +{ + struct gpmi_perchip_data *g = priv; + return g->cmd_buffer; +} + +dma_addr_t queue_get_data_handle(void *priv) +{ + struct gpmi_perchip_data *g = priv; + return g->data_buffer_handle; +} + +uint8_t *queue_get_data_ptr(void *priv) +{ + struct gpmi_perchip_data *g = priv; + return g->data_buffer; +} + + +/** + * queue_run - run the chain + * + * @priv: private data. + */ +int queue_run(void *priv) +{ + struct gpmi_perchip_data *g = priv; + + if (!g->d_tail) + return 0; + stmp3xxx_dma_reset_channel(g->dma_ch); + stmp3xxx_dma_clear_interrupt(g->dma_ch); + stmp3xxx_dma_enable_interrupt(g->dma_ch); + + g->d[g->d_tail-1].command->cmd &= ~(BM_APBH_CHn_CMD_NANDLOCK | + BM_APBH_CHn_CMD_CHAIN); + g->d[g->d_tail-1].command->cmd |= BM_APBH_CHn_CMD_IRQONCMPLT ; + + g->d[g->d_tail-1].command->pio_words[0] &= ~BM_GPMI_CTRL0_LOCK_CS; + +#ifdef DEBUG + /*stmp37cc_dma_print_chain(&g->chain);*/ +#endif + + init_completion(&g->done); + stmp3xxx_dma_go(g->dma_ch, g->d, 1); + wait_for_completion(&g->done); + + g->d_tail = 0; + + return 0; + +} + +/****************************************************************************** + * Platform specific part / chard driver and misc functions + ******************************************************************************/ + + + +static int __init lba_probe(struct platform_device *pdev) +{ + struct lba_data *data; + struct resource *r; + struct gpmi_perchip_data *g; + int err; + + /* Allocate memory for the device structure (and zero it) */ + data = kzalloc(sizeof(*data) + sizeof(struct gpmi_perchip_data), + GFP_KERNEL); + if (!data) { + dev_err(&pdev->dev, "failed to allocate gpmi_nand_data\n"); + err = -ENOMEM; + goto out1; + } + g_data = data; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "failed to get resource\n"); + err = -ENXIO; + goto out2; + } + data->io_base = ioremap(r->start, r->end - r->start + 1); + if (!data->io_base) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto out2; + } + + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!r) { + err = -EIO; + dev_err(&pdev->dev, "can't get IRQ resource\n"); + goto out3; + } + data->irq = r->start; + + platform_set_drvdata(pdev, data); + err = gpmi_init_hw(pdev, 1); + if (err) + goto out3; + + + err = request_irq(data->irq, + gpmi_irq, 0, dev_name(&pdev->dev), data); + if (err) { + dev_err(&pdev->dev, "can't request GPMI IRQ\n"); + goto out4; + } + + g = data->nand; + + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!r) { + dev_err(&pdev->dev, "can't get DMA resource\n"); + goto out_res; + } + g->cs = 0; + g->dma_ch = r->start; + + err = stmp3xxx_dma_request(g->dma_ch, NULL, dev_name(&pdev->dev)); + if (err) { + dev_err(&pdev->dev, "can't request DMA channel 0x%x\n", + g->dma_ch); + goto out_res; + } + + err = stmp3xxx_dma_make_chain(g->dma_ch, &g->chain, + g->d, ARRAY_SIZE(g->d)); + if (err) { + dev_err(&pdev->dev, "can't setup DMA chain\n"); + stmp3xxx_dma_release(g->dma_ch); + goto out_res; + } + + g->cmd_buffer_size = GPMI_CMD_BUF_SZ; + g->cmdtail_buffer_size = GPMI_CMD_BUF_SZ; + g->write_buffer_size = GPMI_WRITE_BUF_SZ; + g->data_buffer_size = GPMI_DATA_BUF_SZ; + g->oob_buffer_size = GPMI_OOB_BUF_SZ; + + err = gpmi_alloc_buffers(pdev, g); + if (err) { + dev_err(&pdev->dev, "can't setup buffers\n"); + stmp3xxx_dma_free_chain(&g->chain); + stmp3xxx_dma_release(g->dma_ch); + goto out_res; + } + + g->dev = pdev; + g->chip.priv = g; + g->index = 0; + g->timing = gpmi_safe_timing; + + g->cmd_buffer_sz = + g->write_buffer_sz = + g->data_buffer_sz = + 0; + g->valid = !0; /* mark the data as valid */ + + + lba_core_init(data); + + return 0; + +out_res: + free_irq(data->irq, data); +out4: + gpmi_release_hw(pdev); +out3: + platform_set_drvdata(pdev, NULL); + iounmap(data->io_base); +out2: + kfree(data); +out1: + return err; +} + +static int gpmi_suspend(struct platform_device *pdev, pm_message_t pm) +{ + struct lba_data *data = platform_get_drvdata(pdev); + int err; + + printk(KERN_INFO "%s: %d\n", __func__, __LINE__); + err = lba_core_suspend(pdev, data); + if (!err) + gpmi_release_hw(pdev); + + return err; +} + +static int gpmi_resume(struct platform_device *pdev) +{ + struct lba_data *data = platform_get_drvdata(pdev); + int r; + + printk(KERN_INFO "%s: %d\n", __func__, __LINE__); + r = gpmi_init_hw(pdev, 1); + lba_core_resume(pdev, data); + return r; +} + +/** + * gpmi_nand_remove - remove a GPMI device + * + */ +static int __devexit lba_remove(struct platform_device *pdev) +{ + struct lba_data *data = platform_get_drvdata(pdev); + int i; + + lba_core_remove(data); + gpmi_release_hw(pdev); + free_irq(data->irq, data); + + for (i = 0; i < max_chips; i++) { + if (!data->nand[i].valid) + continue; + gpmi_free_buffers(pdev, &data->nand[i]); + stmp3xxx_dma_free_chain(&data->nand[i].chain); + stmp3xxx_dma_release(data->nand[i].dma_ch); + } + iounmap(data->io_base); + kfree(data); + + return 0; +} + +static struct platform_driver lba_driver = { + .probe = lba_probe, + .remove = __devexit_p(lba_remove), + .driver = { + .name = "gpmi", + .owner = THIS_MODULE, + }, + .suspend = gpmi_suspend, + .resume = gpmi_resume, + +}; + + +static int __init lba_init(void) +{ + return platform_driver_register(&lba_driver); +} + +static void lba_exit(void) +{ + platform_driver_unregister(&lba_driver); +} + +module_init(lba_init); +module_exit(lba_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/lba/gpmi.h b/drivers/mtd/nand/lba/gpmi.h new file mode 100644 index 000000000000..a647c948040f --- /dev/null +++ b/drivers/mtd/nand/lba/gpmi.h @@ -0,0 +1,103 @@ +/* + * Freescale STMP37XX/STMP378X GPMI (General-Purpose-Media-Interface) + * + * Author: dmitry pervushin <dimka@embeddedalley.com> + * + * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ +#ifndef __DRIVERS_GPMI_H +#define __DRIVERS_GPMI_H + +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <mach/stmp3xxx.h> +#include <mach/dma.h> + +#include <mach/gpmi.h> +#include <mach/regs-gpmi.h> +#include <mach/regs-apbh.h> +#ifdef CONFIG_MTD_NAND_GPMI_BCH +#include <mach/regs-bch.h> +#endif + + +struct gpmi_nand_timing { + u8 data_setup; + u8 data_hold; + u8 address_setup; + u8 dsample_time; +}; + +#define GPMI_DMA_MAX_CHAIN 20 /* max DMA commands in chain */ + +#define GPMI_CMD_BUF_SZ 10 +#define GPMI_DATA_BUF_SZ 4096 +#define GPMI_WRITE_BUF_SZ 4096 +#define GPMI_OOB_BUF_SZ 218 + + +struct gpmi_perchip_data { + int valid; + struct nand_chip chip; + struct platform_device *dev; + int index; + + spinlock_t lock; /* protect chain operations */ + struct stmp37xx_circ_dma_chain chain; + struct stmp3xxx_dma_descriptor d[GPMI_DMA_MAX_CHAIN]; + int d_tail; + + struct completion done; + + u8 *cmd_buffer; + dma_addr_t cmd_buffer_handle; + int cmd_buffer_size, cmd_buffer_sz; + + u8 *cmdtail_buffer; + dma_addr_t cmdtail_buffer_handle; + int cmdtail_buffer_size, cmdtail_buffer_sz; + + u8 *write_buffer; + dma_addr_t write_buffer_handle; + int write_buffer_size, write_buffer_sz; + + u8 *data_buffer; + dma_addr_t data_buffer_handle; + u8 *data_buffer_cptr; + int data_buffer_size, data_buffer_sz, bytes2read; + + u8 *oob_buffer; + dma_addr_t oob_buffer_handle; + int oob_buffer_size; + + int cs; + unsigned dma_ch; + + int ecc_oob_bytes, oob_free; + + struct gpmi_nand_timing timing; + + void *p2w, *oob2w, *p2r, *oob2r; + size_t p2w_size, oob2w_size, p2r_size, oob2r_size; + dma_addr_t p2w_dma, oob2w_dma, p2r_dma, oob2r_dma; + unsigned read_memcpy:1, write_memcpy:1, + read_oob_memcpy:1, write_oob_memcpy:1; +}; + + +extern struct gpmi_nand_timing gpmi_safe_timing; + + +#endif diff --git a/drivers/mtd/nand/lba/lba-blk.c b/drivers/mtd/nand/lba/lba-blk.c new file mode 100644 index 000000000000..228aa9d27760 --- /dev/null +++ b/drivers/mtd/nand/lba/lba-blk.c @@ -0,0 +1,345 @@ +/* + * Freescale STMP37XX/STMP378X LBA/block driver + * + * Author: Dmitrij Frasenyak <sed@embeddedalley.com> + * + * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2009 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> + +#include <linux/sched.h> +#include <linux/kernel.h> /* printk() */ +#include <linux/slab.h> /* kmalloc() */ +#include <linux/fs.h> /* everything... */ +#include <linux/errno.h> /* error codes */ +#include <linux/kthread.h> +#include <linux/timer.h> +#include <linux/types.h> /* size_t */ +#include <linux/fcntl.h> /* O_ACCMODE */ +#include <linux/hdreg.h> /* HDIO_GETGEO */ +#include <linux/kdev_t.h> +#include <linux/vmalloc.h> +#include <linux/genhd.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> /* invalidate_bdev */ +#include <linux/bio.h> +#include <linux/dma-mapping.h> +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include "lba.h" + +static int lba_major; + +#define LBA_NAME "lba" + +#if 0 +#define TAG() printk(KERNE_ERR "%s: %d\n", __func__, __LINE__) +#else +#define TAG() +#endif + +/* + * The internal representation of our device. + */ +struct lba_blk_dev { + int size; /* Device size in sectors */ + spinlock_t lock; /* For mutual exclusion */ + int users; + struct request_queue *queue; /* The device request queue */ + struct gendisk *gd; /* The gendisk structure */ + struct lba_data *data; /* pointer from lba core */ + + struct task_struct *thread; + struct bio *bio_head; + struct bio *bio_tail; + wait_queue_head_t wait_q; + struct semaphore busy; + +}; + +static struct lba_blk_dev *g_lba_blk; + +static void blk_add_bio(struct lba_blk_dev *dev, struct bio *bio); + + +/* + * Transfer a single BIO. + */ +static int lba_blk_xfer_bio(struct lba_blk_dev *dev, struct bio *bio) +{ + int i; + struct bio_vec *bvec; + sector_t sector = bio->bi_sector; + enum dma_data_direction dir; + int status = 0; + int (*lba_xfer)(void *priv, + unsigned int sector, + unsigned int count, + void *buffer, + dma_addr_t handle); + + if (bio_data_dir(bio) == WRITE) { + lba_xfer = lba_write_sectors; + dir = DMA_TO_DEVICE; + } else { + lba_xfer = lba_read_sectors; + dir = DMA_FROM_DEVICE; + } + + /* Fixme: merge segments */ + bio_for_each_segment(bvec, bio, i) { + void *buffer = page_address(bvec->bv_page); + dma_addr_t handle ; + if (!buffer) + BUG(); + buffer += bvec->bv_offset; + handle = dma_map_single(&dev->data->nand->dev->dev, + buffer, + bvec->bv_len, + dir); + status = lba_xfer(dev->data->nand, sector, + bvec->bv_len >> 9, + buffer, + handle); + + dma_unmap_single(&dev->data->nand->dev->dev, + handle, + bvec->bv_len, + dir); + if (status) + break; + + sector += bio_cur_bytes(bio) >> 9; + } + + return status; +} + + +/* + * The direct make request version. + */ +static int lba_make_request(struct request_queue *q, struct bio *bio) +{ + struct lba_blk_dev *dev = q->queuedata; + + blk_add_bio(dev, bio); + return 0; +} + +/* + * Open and close. + */ + +static int lba_blk_open(struct block_device *bdev, fmode_t mode) +{ + struct lba_blk_dev *dev = bdev->bd_disk->private_data; + + TAG(); + + spin_lock_irq(&dev->lock); + dev->users++; + spin_unlock_irq(&dev->lock); + return 0; +} + + +static int lba_blk_release(struct gendisk *gd, fmode_t mode) +{ + struct lba_blk_dev *dev = gd->private_data; + + spin_lock(&dev->lock); + dev->users--; + spin_unlock(&dev->lock); + + return 0; +} + +static int lba_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + /* + * get geometry: we have to fake one... trim the size to a + * multiple of 2048 (1M): tell we have 32 sectors, 64 heads, + * whatever cylinders. + */ + geo->heads = 1 << 6; + geo->sectors = 1 << 5; + geo->cylinders = get_capacity(bdev->bd_disk) >> 11; + return 0; +} + +/* + * Add bio to back of pending list + */ +static void blk_add_bio(struct lba_blk_dev *dev, struct bio *bio) +{ + unsigned long flags; + spin_lock_irqsave(&dev->lock, flags); + if (dev->bio_tail) { + dev->bio_tail->bi_next = bio; + dev->bio_tail = bio; + } else + dev->bio_head = dev->bio_tail = bio; + wake_up(&dev->wait_q); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* + * Grab first pending buffer + */ +static struct bio *blk_get_bio(struct lba_blk_dev *dev) +{ + struct bio *bio; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + bio = dev->bio_head; + if (bio) { + if (bio == dev->bio_tail) { + dev->bio_tail = NULL; + dev->bio_head = NULL; + } + dev->bio_head = bio->bi_next; + bio->bi_next = NULL; + } + spin_unlock_irqrestore(&dev->lock, flags); + + return bio; +} + +static int lba_thread(void *data) +{ + struct lba_blk_dev *dev = data; + struct bio *bio; + int status; + + set_user_nice(current, -20); + + while (!kthread_should_stop() || dev->bio_head) { + + wait_event_interruptible(dev->wait_q, + dev->bio_head || kthread_should_stop()); + + if (!dev->bio_head) + continue; + + if (lba_core_lock_mode(dev->data, LBA_MODE_MDP)) + continue; + + bio = blk_get_bio(dev); + status = lba_blk_xfer_bio(dev, bio); + bio_endio(bio, status); + + lba_core_unlock_mode(dev->data); + } + + return 0; +} + + + +/* + * The device operations structure. + */ +static struct block_device_operations lba_blk_ops = { + .owner = THIS_MODULE, + .open = lba_blk_open, + .release = lba_blk_release, + .getgeo = lba_getgeo, +}; + + +int lba_blk_init(struct lba_data *data) +{ + + struct lba_blk_dev *dev; + int err; + if (!data) + BUG(); + + printk(KERN_INFO "LBA block driver v0.1\n"); + lba_major = LBA_MAJOR; + dev = g_lba_blk = kzalloc(sizeof(struct lba_blk_dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->data = data; + register_blkdev(lba_major, "lba"); + + spin_lock_init(&dev->lock); + init_waitqueue_head(&dev->wait_q); + sema_init(&dev->busy, 1); + + dev->queue = blk_alloc_queue(GFP_KERNEL); + if (!dev->queue) + goto out2; + blk_queue_make_request(dev->queue, lba_make_request); + /*dev->queue->unplug_fn = lba_unplug_device;*/ + blk_queue_logical_block_size(dev->queue, 512); + + dev->queue->queuedata = dev; + dev->gd = alloc_disk(32); + if (!dev->gd) { + printk(KERN_ERR "failed to alloc disk\n"); + goto out3; + } + dev->size = data->mdp_size ; + printk(KERN_INFO "%s: set capacity of the device to 0x%x\n", + __func__, dev->size); + dev->gd->major = lba_major; + dev->gd->first_minor = 0; + dev->gd->fops = &lba_blk_ops; + dev->gd->queue = dev->queue; + dev->gd->private_data = dev; + snprintf(dev->gd->disk_name, 8, LBA_NAME); + set_capacity(dev->gd, dev->size); + + dev->thread = kthread_create(lba_thread, dev, "lba-%d", 1); + if (IS_ERR(dev->thread)) { + err = PTR_ERR(dev->thread); + goto out3; + } + wake_up_process(dev->thread); + + add_disk(dev->gd); + + + TAG(); + + return 0; +out3: +out2: + unregister_blkdev(lba_major, "lba"); + return -ENOMEM; +} + +int lba_blk_remove(struct lba_data *data) +{ + + struct lba_blk_dev *dev = g_lba_blk; + + del_gendisk(dev->gd); + kthread_stop(dev->thread); + blk_cleanup_queue(dev->queue); + put_disk(dev->gd); + + unregister_blkdev(lba_major, LBA_NAME); + kfree(dev); + return 0; +} + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(254); diff --git a/drivers/mtd/nand/lba/lba-core.c b/drivers/mtd/nand/lba/lba-core.c new file mode 100644 index 000000000000..cdf28ca42b2a --- /dev/null +++ b/drivers/mtd/nand/lba/lba-core.c @@ -0,0 +1,619 @@ +/* + * Freescale STMP37XX/STMP378X LBA/core driver + * + * Author: Dmitrij Frasenyak <sed@embeddedalley.com> + * + * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2009 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/dma-mapping.h> +#include <linux/ctype.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <mach/stmp3xxx.h> +#include <mach/dma.h> +#include "gpmi.h" +#include "lba.h" + +#define LBA_SELFPM_TIMEOUT 2000 /* msecs */ +dma_addr_t g_cmd_handle; +dma_addr_t g_data_handle; +uint8_t *g_data_buffer; +uint8_t *g_cmd_buffer; + +uint8_t lba_get_status1(void *priv) +{ + uint8_t cmd_buf[] = { 0x70 } ; + struct lba_cmd lba_flags[] = { + {1 , F_CMD | FE_W4R}, + {0, F_DATA_READ | FE_END}, + }; + *g_data_buffer = 0; + queue_cmd(priv, cmd_buf, 0, 1, g_data_handle, 1, lba_flags); + queue_run(priv); + return *g_data_buffer; +} + + +int lba_wait_for_ready(void *priv) +{ + int stat; + unsigned long j_start = jiffies; + + stat = lba_get_status1(priv); + if ((stat & 0x60) != 0x60) { + while (((stat & 0x60) != 0x60) && + (jiffies - j_start < msecs_to_jiffies(2000))) { + schedule(); + stat = lba_get_status1(priv); + } + } + if (stat != 0x60) + return stat; + + return 0; +} + +int lba_write_sectors(void *priv, unsigned int sector, unsigned int count, + void *buffer, dma_addr_t handle) +{ + uint8_t cmd_buf[] = { + 0x80, + count & 0xff, (count >> 8) & 0xff, /* Count */ + (sector & 0xff), (sector >> 8) & 0xff, /* Address */ + (sector >> 16) & 0xff, (sector >> 24) & 0xff, /* Addres */ + /* Data goes here */ + 0x10 + }; + + struct lba_cmd flags_t1[] = { /* Transmition mode 1/A */ + {7 , F_CMD | FE_CMD_INC | FE_W4R}, + {0, F_DATA_WRITE}, + {1 , F_CMD | FE_END} + }; + + if (count > 8) + return -EINVAL; + + if (lba_wait_for_ready(priv)) + return -EIO; + + while (count) { + int cnt = (count < 8) ? count : 8; + int data_len = cnt * 512; + + queue_cmd(priv, cmd_buf, 0, 8, + handle, data_len, flags_t1); + + handle += data_len; + count -= cnt; + + } + + queue_run(priv); + + return count; + +} + +int lba_read_sectors(void *priv, unsigned int sector, unsigned int count, + void *buffer, dma_addr_t handle) +{ + + int data_len; + int cnt; + uint8_t cmd_buf[] = { + 0x00, + count & 0xff, (count >> 8) & 0xff, /* Count */ + (sector & 0xff), (sector >> 8) & 0xff, /* Addr */ + (sector >> 16) & 0xff, (sector >> 24) & 0xff, /* Addr */ + 0x30 + /* Data goes here <data> */ + + }; + + struct lba_cmd flags_r3[] = { /* Read mode 3/A */ + {7 , F_CMD | FE_CMD_INC | FE_W4R}, + {1 , F_CMD }, + {0 , F_DATA_READ | FE_W4R | FE_END }, + }; + struct lba_cmd flags_r3c[] = { /* Read mode 3/A */ + {0 , F_DATA_READ | FE_W4R | FE_END }, + }; + struct lba_cmd *flags = flags_r3; + int flags_len = 8; + + if (count > 8) + return -EINVAL; + + if (lba_wait_for_ready(priv)) + return -EIO; + + while (count) { + cnt = (count < 8) ? count : 8; + data_len = cnt * 512; + queue_cmd(priv, cmd_buf, 0, flags_len, handle, data_len, flags); + handle += data_len; + count -= cnt; + flags = flags_r3c; + flags_len = 0; + } + + queue_run(priv); + + return count; + +} + + +uint8_t lba_get_id1(void *priv, uint8_t *ret_buffer) +{ + uint8_t cmd_buf[] = { 0x90 , 0x00, /* Data read 5bytes*/ }; + struct lba_cmd lba_flags[] = { + {2 , F_CMD | FE_CMD_INC | FE_W4R}, + {0, F_DATA_READ | FE_END}, + }; + + queue_cmd(priv, cmd_buf, 0, 2, g_data_handle, 5, lba_flags); + queue_run(priv); + memcpy(ret_buffer, g_data_buffer, 5); + + return 0; +} + +uint8_t lba_get_id2(void *priv, uint8_t *ret_buffer) +{ + uint8_t cmd_buf[] = { 0x92 , 0x00, /* Data read 5bytes*/ }; + struct lba_cmd lba_flags[] = { + {2 , F_CMD | FE_CMD_INC | FE_W4R}, + {0, F_DATA_READ | FE_END}, + }; + + queue_cmd(priv, cmd_buf, 0, 2, g_data_handle, 5, lba_flags); + queue_run(priv); + memcpy(ret_buffer, g_data_buffer, 5); + return 0; +} + +uint8_t lba_get_status2(void *priv) +{ + uint8_t cmd_buf[] = { 0x71 }; + struct lba_cmd lba_flags[] = { + {1 , F_CMD | FE_CMD_INC | FE_W4R}, + {0 , F_DATA_READ | FE_END}, + }; + *g_data_buffer = 0; + queue_cmd(priv, cmd_buf, 0, 1, g_data_handle, 1, lba_flags); + queue_run(priv); + return *g_data_buffer; +} + +static uint8_t lba_parse_status2(void *priv) +{ + uint8_t stat; + + stat = lba_get_status2(priv); + printk(KERN_INFO "Status2:|"); + if (stat & 0x40) + printk(" C.PAR.ERR |"); /* no KERN_ here */ + if (stat & 0x20) + printk(" NO spare |"); + if (stat & 0x10) + printk(" ADDR OoRange |"); + if (stat & 0x8) + printk(" high speed |"); + if ((stat & 0x6) == 6) + printk(" MDP |"); + if ((stat & 0x6) == 4) + printk(" VFP |"); + if ((stat & 0x6) == 2) + printk(" PNP |"); + if (stat & 1) + printk(" PSW |"); + + printk("\n"); + return 0; +} + + +int lba_2mdp(void *priv) +{ + uint8_t cmd_buf[] = { 0xFC }; + struct lba_cmd lba_flags[] = { + {1 , F_CMD | FE_W4R | FE_END} + }; + + queue_cmd(priv, cmd_buf, 0, 1, 0, 0, lba_flags); + queue_run(priv); + return 0; +} + +void _lba_misc_cmd_set(void *priv, uint8_t *cmd_buf) +{ + struct lba_cmd lba_flags[] = { + {6 , F_CMD | FE_CMD_INC | FE_W4R }, + {1 , F_CMD | FE_END }, + }; + + queue_cmd(priv, cmd_buf, 0, 7, 0, 0, lba_flags); + queue_run(priv); +} + +uint8_t _lba_misc_cmd_get(void *priv, uint8_t *cmd_buf) +{ + struct lba_cmd lba_flags[] = { + {6 , F_CMD | FE_CMD_INC | FE_W4R }, + {1 , F_CMD }, + {0 , F_DATA_READ | FE_W4R | FE_END} + }; + + queue_cmd(priv, cmd_buf, 0, 7, g_data_handle, 1, lba_flags); + queue_run(priv); + return *g_data_buffer; +} +void lba_mdp2vfp(void *priv, uint8_t pass[2]) +{ + uint8_t cmd_buf[] = { 0x0, 0xbe, pass[0], pass[1], 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} + +void lba_bcm2vfp(void *priv, uint8_t pass[2]) +{ + lba_mdp2vfp(priv, pass); +} + +void lba_powersave_enable(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xba, 0, 0, 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} +void lba_powersave_disable(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xbb, 0, 0, 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} + +void lba_highspeed_enable(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xbc, 0, 0, 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} + +void lba_highspeed_disable(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xbd, 0, 0, 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} + +void lba_prot1_set(void *priv, uint8_t mode) +{ + uint8_t cmd_buf[] = { 0x0, 0xa2, mode, 0, 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} + +void lba_prot2_set(void *priv, uint8_t mode) +{ + uint8_t cmd_buf[] = { 0x0, 0xa3, mode, 0, 0, 0, 0x57 }; + _lba_misc_cmd_set(priv, cmd_buf); +} + +uint8_t lba_prot1_get(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xb2, 0, 0, 0, 0, 0x57 }; + return _lba_misc_cmd_get(priv, cmd_buf); +} + +uint8_t lba_prot2_get(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xb3, 0, 0, 0, 0, 0x57 }; + return _lba_misc_cmd_get(priv, cmd_buf); +} + +uint64_t lba_mdp_size_get(void *priv) +{ + uint8_t cmd_buf[] = { 0x0, 0xb0, 0, 0, 0, 0, 0x57 }; + struct lba_cmd lba_flags[] = { + {6 , F_CMD | FE_CMD_INC | FE_W4R }, + {1 , F_CMD }, + {0 , F_DATA_READ | FE_W4R | FE_END} + }; + + memset((void *)g_data_buffer, 0, 8); + queue_cmd(priv, cmd_buf, 0, 7, g_data_handle, 5, lba_flags); + queue_run(priv); + return le64_to_cpu(*(long long *)g_data_buffer); +} + +void lba_cache_flush(void *priv) +{ + uint8_t cmd_buf[] = { 0xF9 }; + struct lba_cmd lba_flags[] = { + {1 , F_CMD | FE_W4R }, + {0 , FE_W4R | FE_END} + }; + + queue_cmd(priv, cmd_buf, 0, 7, g_data_handle, 5, lba_flags); + queue_run(priv); +} + +void lba_reboot(void *priv) +{ + uint8_t cmd_buf[] = { 0xFD }; + struct lba_cmd lba_flags[] = { + {1 , F_CMD | FE_W4R }, + {0 , FE_W4R | FE_END} + }; + + queue_cmd(priv, cmd_buf, 0, 7, g_data_handle, 5, lba_flags); + queue_run(priv); +} + +void lba_def_state(void *priv) +{ + lba_wait_for_ready(priv); + lba_reboot(priv); + + lba_wait_for_ready(priv); + lba_parse_status2(priv); + + lba_wait_for_ready(priv); + lba_2mdp(priv); + + lba_wait_for_ready(priv); + lba_prot1_set(priv, LBA_T_SIZE8); /* 512 * 8 */ + + lba_wait_for_ready(priv); +/* Type C read; Type A write; */ + lba_prot2_set(priv, LBA_P_WRITE_A | LBA_P_READ_C); +} + +/* + * Should be called with mode locked + */ +void lba_core_setvfp_passwd(struct lba_data *data, uint8_t pass[2]) +{ + memcpy(data->pass, pass, 2); +} + +int lba_core_lock_mode(struct lba_data *data, int mode) +{ + void *priv = &data->nand; + + if (down_interruptible(&data->mode_lock)) + return -EAGAIN; + /* + * MDP and VFP are the only supported + * modes for now. + */ + if ((mode != LBA_MODE_MDP) && + (mode != LBA_MODE_VFP)) { + up(&data->mode_lock); + return -EINVAL; + } + + while ((data->mode & LBA_MODE_MASK) == LBA_MODE_SUSP) { + up(&data->mode_lock); + + if (wait_event_interruptible( + data->suspend_q, + (data->mode & LBA_MODE_MASK) != LBA_MODE_SUSP)) + return -EAGAIN; + + if (down_interruptible(&data->mode_lock)) + return -EAGAIN; + + data->last_access = jiffies; + } + + if (data->mode & LBA_MODE_SELFPM) { + queue_plug(data); + data->mode &= ~LBA_MODE_SELFPM; + } + + if (mode == data->mode) + return 0; + + /* + * mode = VFP || MDP only + * Revisit when more modes are added + */ + switch (data->mode) { + case LBA_MODE_RST: + case LBA_MODE_PNR: + case LBA_MODE_BCM: + lba_def_state(priv); + if (mode == LBA_MODE_MDP) { + data->mode = LBA_MODE_MDP; + break; + } + /*no break -> fall down to set VFP mode*/ + case LBA_MODE_MDP: + lba_wait_for_ready(priv); + lba_mdp2vfp(priv, data->pass); + data->mode = LBA_MODE_VFP; + break; + case LBA_MODE_VFP: + lba_wait_for_ready(priv); + lba_2mdp(priv); + data->mode = LBA_MODE_MDP; + break; + default: + up(&data->mode_lock); + return -EINVAL; + } + + return 0; +} + +int lba_core_unlock_mode(struct lba_data *data) +{ + data->last_access = jiffies; + up(&data->mode_lock); + wake_up(&data->selfpm_q); + return 0; +} + +static int selfpm_timeout_expired(struct lba_data *data) +{ + return jiffies_to_msecs(jiffies - data->last_access) > 2000; +} + +static int lba_selfpm_thread(void *d) +{ + struct lba_data *data = d; + + set_user_nice(current, -5); + + while (!kthread_should_stop()) { + + if (wait_event_interruptible(data->selfpm_q, + kthread_should_stop() || + !(data->mode & LBA_MODE_SELFPM))) + continue; + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(LBA_SELFPM_TIMEOUT)); + + if (down_trylock(&data->mode_lock)) + continue; + + if (!selfpm_timeout_expired(data)) { + up(&data->mode_lock); + continue; + } + data->mode |= LBA_MODE_SELFPM; + lba_wait_for_ready((void *)data->nand); + lba_cache_flush((void *)data->nand); + queue_release(data); + up(&data->mode_lock); + + } + + return 0; +} + +int lba_core_init(struct lba_data *data) +{ + uint8_t id_buf[5]; + uint8_t capacity; + uint8_t id1_template[5] = {0x98, 0xDC, 0x00, 0x15, 0x00}; + uint8_t id2_template[5] = {0x98, 0x21, 0x00, 0x55, 0xAA}; + void *priv = (void *)data->nand; + + + g_data = data; + g_cmd_handle = queue_get_cmd_handle(priv); + g_data_handle = queue_get_data_handle(priv); + g_data_buffer = queue_get_data_ptr(priv); + g_cmd_buffer = queue_get_cmd_ptr(priv); + + spin_lock_init(&data->lock); + sema_init(&data->mode_lock, 1); + init_waitqueue_head(&data->suspend_q); + init_waitqueue_head(&data->selfpm_q); + + + lba_get_id1(data->nand, id_buf); + if (!memcmp(id_buf, id1_template, 5)) + printk(KERN_INFO + "LBA: Found LBA/SLC NAND emulated ID\n"); + else + return -ENODEV; + + lba_get_id2(data->nand, id_buf); + capacity = id_buf[2]; + id_buf[2] = 0; + + if (memcmp(id_buf, id2_template, 5)) { + printk(KERN_INFO + "LBA: Uknown LBA device\n"); + return -ENODEV; + } + printk(KERN_INFO + "LBA: Found %dGbytes LBA NAND device\n", + 1 << capacity); + + lba_wait_for_ready(priv); + lba_parse_status2(priv); + + lba_def_state(priv); + data->mode = LBA_MODE_MDP; + + g_data->pnp_size = 0xff; + g_data->vfp_size = 16384; + + lba_wait_for_ready(priv); + g_data->mdp_size = lba_mdp_size_get(priv); + + lba_wait_for_ready(priv); + /*lba_powersave_enable(priv);*/ + /*lba_highspeed_enable(priv);*/ + + lba_wait_for_ready(priv); + lba_parse_status2(priv); + + data->thread = kthread_create(lba_selfpm_thread, + data, "lba-selfpm-%d", 1); + if (IS_ERR(data->thread)) + return PTR_ERR(data->thread); + + lba_blk_init(g_data); + + wake_up_process(data->thread); + return 0; + +}; + +int lba_core_remove(struct lba_data *data) +{ + kthread_stop(data->thread); + lba_blk_remove(data); + lba_wait_for_ready((void *)data->nand); + lba_cache_flush((void *)data->nand); + return 0; +} + +int lba_core_suspend(struct platform_device *pdev, struct lba_data *data) +{ + BUG_ON((data->mode & 0xffff) == LBA_MODE_SUSP); + if (down_interruptible(&data->mode_lock)) + return -EAGAIN; + if (data->mode & LBA_MODE_SELFPM) + queue_plug(data); + + data->mode = LBA_MODE_SUSP | LBA_MODE_SELFPM; + up(&data->mode_lock); + lba_wait_for_ready((void *)data->nand); + lba_cache_flush((void *)data->nand); + return 0; +} + +int lba_core_resume(struct platform_device *pdev, struct lba_data *data) +{ + BUG_ON((data->mode & 0xffff) != LBA_MODE_SUSP); + lba_def_state((void *)data->nand); + data->last_access = jiffies; + data->mode = LBA_MODE_MDP; + wake_up(&data->suspend_q); + return 0; +} diff --git a/drivers/mtd/nand/lba/lba.h b/drivers/mtd/nand/lba/lba.h new file mode 100644 index 000000000000..3466f96881eb --- /dev/null +++ b/drivers/mtd/nand/lba/lba.h @@ -0,0 +1,140 @@ +/* + * Freescale STMP37XX/STMP378X LBA interface + * + * Author: Dmitrij Frasenyak <sed@embeddedalley.com> + * + * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2009 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#ifndef __INCLUDE_LBA_H__ +#define __INCLUDE_LBA_H__ + + +#include <linux/spinlock.h> +#include <linux/kthread.h> +#include "gpmi.h" + +struct lba_cmd { + uint8_t len; +#define F_MASK 0x0f +#define F_ALE 0x01 +#define F_CMD 0x02 +#define F_DATA_READ 0x04 +#define F_DATA_WRITE 0x08 + +#define FE_W4R 0x10 +#define FE_CMD_INC 0x20 +#define FE_END 0x40 + + uint8_t flag; +}; + +#define LBA_P_READ_A 0 +#define LBA_P_READ_B 2 +#define LBA_P_READ_C 3 +#define LBA_P_WRITE_A 0 +#define LBA_P_WRITE_B 4 +#define LBA_T_SIZE1 1 +#define LBA_T_SIZE4 2 +#define LBA_T_SIZE8 4 +#define LBA_T_CRC (1 << 6) +#define LBA_T_ECC_CHECK (2 << 6) +#define LBA_T_ECC_CORRECT (3 << 6) + +struct lba_data { + void __iomem *io_base; + struct clk *clk; + int irq; + + spinlock_t lock; + int use_count; + int mode; + struct semaphore mode_lock; +#define LBA_MODE_MASK 0x0000ffff +#define LBA_FLAG_MASK 0xffff0000 +#define LBA_MODE_RST 0 +#define LBA_MODE_PNR 1 +#define LBA_MODE_BCM 2 +#define LBA_MODE_MDP 3 +#define LBA_MODE_VFP 4 +#define LBA_MODE_SUSP 5 +#define LBA_MODE_SELFPM 0x80000000 + wait_queue_head_t suspend_q; + wait_queue_head_t selfpm_q; + struct task_struct *thread; + long long last_access; + /* PNR specific */ + /* BCM specific */ + /* VFP specific */ + uint8_t pass[2]; + + /* Size of the partiotions: pages for PNP; sectors for others */ + unsigned int pnp_size; + unsigned int vfp_size; + long long mdp_size; + void *priv; + /*should be last*/ + struct gpmi_perchip_data nand[0]; + +}; + +extern struct lba_data *g_data; + +void stmp37cc_dma_print_chain(struct stmp37xx_circ_dma_chain *chain); + +int lba_blk_init(struct lba_data *data); +int lba_blk_remove(struct lba_data *data); +int lba_blk_suspend(struct platform_device *pdev, struct lba_data *data); +int lba_blk_resume(struct platform_device *pdev, struct lba_data *data); + + +int lba_core_init(struct lba_data *data); +int lba_core_remove(struct lba_data *data); +int lba_core_suspend(struct platform_device *pdev, struct lba_data *data); +int lba_core_resume(struct platform_device *pdev, struct lba_data *data); +int lba_core_lock_mode(struct lba_data *data, int mode); +int lba_core_unlock_mode(struct lba_data *data); + +int lba_write_sectors(void *priv, unsigned int sector, unsigned int count, + void *buffer, dma_addr_t handle); +int lba_read_sectors(void *priv, unsigned int sector, unsigned int count, + void *buffer, dma_addr_t handle); +void lba_protocol1_set(void *priv, uint8_t param); +uint8_t lba_protocol1_get(void *priv); +uint8_t lba_get_status1(void *priv); +uint8_t lba_get_status2(void *priv); + +uint8_t lba_get_id1(void *priv, uint8_t *ret_buffer); +uint8_t lba_get_id2(void *priv, uint8_t *); + + +int queue_cmd(void *priv, + uint8_t *cmd_buf, dma_addr_t cmd_handle, int cmd_len, + dma_addr_t data, int data_len, + struct lba_cmd *cmd_flags); + +int queue_run(void *priv); + +dma_addr_t queue_get_cmd_handle(void *priv); + +uint8_t *queue_get_cmd_ptr(void *priv); + +dma_addr_t queue_get_data_handle(void *priv); + +uint8_t *queue_get_data_ptr(void *priv); + +void queue_plug(struct lba_data *data); +void queue_release(struct lba_data *data); + + +#endif |