diff options
| author | Peter Pan <peterpandong@micron.com> | 2018-08-16 17:30:12 +0200 | 
|---|---|---|
| committer | Jagan Teki <jagan@amarulasolutions.com> | 2018-09-20 20:10:49 +0530 | 
| commit | 0a6d6bae03864938f073cc114992c40f2338a155 (patch) | |
| tree | 69b01fa2cbb8f1c2aebfd9ff277acb2f12bc8760 | |
| parent | d13f5b254a43e292814a618f60a2696ba01267a7 (diff) | |
mtd: nand: Add core infrastructure to support SPI NANDs
Add a SPI NAND framework based on the generic NAND framework and the
spi-mem infrastructure.
In its current state, this framework supports the following features:
- single/dual/quad IO modes
- on-die ECC
Signed-off-by: Peter Pan <peterpandong@micron.com>
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Acked-by: Jagan Teki <jagan@openedev.com>
| -rw-r--r-- | drivers/mtd/nand/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/mtd/nand/Makefile | 1 | ||||
| -rw-r--r-- | drivers/mtd/nand/spi/Kconfig | 7 | ||||
| -rw-r--r-- | drivers/mtd/nand/spi/Makefile | 4 | ||||
| -rw-r--r-- | drivers/mtd/nand/spi/core.c | 1235 | ||||
| -rw-r--r-- | include/linux/mtd/spinand.h | 427 | 
6 files changed, 1676 insertions, 0 deletions
| diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 1c1a1f487e2..78ae04bdcba 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -2,3 +2,5 @@ config MTD_NAND_CORE  	tristate  source "drivers/mtd/nand/raw/Kconfig" + +source "drivers/mtd/nand/spi/Kconfig" diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index cd492dbc14e..a358bc680eb 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -2,3 +2,4 @@  nandcore-objs := core.o bbt.o  obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o +obj-$(CONFIG_MTD_SPI_NAND) += spi/ diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig new file mode 100644 index 00000000000..2197cb531f3 --- /dev/null +++ b/drivers/mtd/nand/spi/Kconfig @@ -0,0 +1,7 @@ +menuconfig MTD_SPI_NAND +	bool "SPI NAND device Support" +	depends on MTD && DM_SPI +	select MTD_NAND_CORE +	select SPI_MEM +	help +	  This is the framework for the SPI NAND device drivers. diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile new file mode 100644 index 00000000000..f0c6e69d2eb --- /dev/null +++ b/drivers/mtd/nand/spi/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +spinand-objs := core.o +obj-$(CONFIG_MTD_SPI_NAND) += spinand.o diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c new file mode 100644 index 00000000000..08f853ae11e --- /dev/null +++ b/drivers/mtd/nand/spi/core.c @@ -0,0 +1,1235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016-2017 Micron Technology, Inc. + * + * Authors: + *	Peter Pan <peterpandong@micron.com> + *	Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#define pr_fmt(fmt)	"spi-nand: " fmt + +#ifndef __UBOOT__ +#include <linux/device.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mtd/spinand.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> +#else +#include <common.h> +#include <errno.h> +#include <spi.h> +#include <spi-mem.h> +#include <linux/mtd/spinand.h> +#endif + +/* SPI NAND index visible in MTD names */ +static int spi_nand_idx; + +static void spinand_cache_op_adjust_colum(struct spinand_device *spinand, +					  const struct nand_page_io_req *req, +					  u16 *column) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	unsigned int shift; + +	if (nand->memorg.planes_per_lun < 2) +		return; + +	/* The plane number is passed in MSB just above the column address */ +	shift = fls(nand->memorg.pagesize); +	*column |= req->pos.plane << shift; +} + +static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) +{ +	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, +						      spinand->scratchbuf); +	int ret; + +	ret = spi_mem_exec_op(spinand->slave, &op); +	if (ret) +		return ret; + +	*val = *spinand->scratchbuf; +	return 0; +} + +static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) +{ +	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, +						      spinand->scratchbuf); + +	*spinand->scratchbuf = val; +	return spi_mem_exec_op(spinand->slave, &op); +} + +static int spinand_read_status(struct spinand_device *spinand, u8 *status) +{ +	return spinand_read_reg_op(spinand, REG_STATUS, status); +} + +static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) +{ +	struct nand_device *nand = spinand_to_nand(spinand); + +	if (WARN_ON(spinand->cur_target < 0 || +		    spinand->cur_target >= nand->memorg.ntargets)) +		return -EINVAL; + +	*cfg = spinand->cfg_cache[spinand->cur_target]; +	return 0; +} + +static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	int ret; + +	if (WARN_ON(spinand->cur_target < 0 || +		    spinand->cur_target >= nand->memorg.ntargets)) +		return -EINVAL; + +	if (spinand->cfg_cache[spinand->cur_target] == cfg) +		return 0; + +	ret = spinand_write_reg_op(spinand, REG_CFG, cfg); +	if (ret) +		return ret; + +	spinand->cfg_cache[spinand->cur_target] = cfg; +	return 0; +} + +/** + * spinand_upd_cfg() - Update the configuration register + * @spinand: the spinand device + * @mask: the mask encoding the bits to update in the config reg + * @val: the new value to apply + * + * Update the configuration register. + * + * Return: 0 on success, a negative error code otherwise. + */ +int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) +{ +	int ret; +	u8 cfg; + +	ret = spinand_get_cfg(spinand, &cfg); +	if (ret) +		return ret; + +	cfg &= ~mask; +	cfg |= val; + +	return spinand_set_cfg(spinand, cfg); +} + +/** + * spinand_select_target() - Select a specific NAND target/die + * @spinand: the spinand device + * @target: the target/die to select + * + * Select a new target/die. If chip only has one die, this function is a NOOP. + * + * Return: 0 on success, a negative error code otherwise. + */ +int spinand_select_target(struct spinand_device *spinand, unsigned int target) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	int ret; + +	if (WARN_ON(target >= nand->memorg.ntargets)) +		return -EINVAL; + +	if (spinand->cur_target == target) +		return 0; + +	if (nand->memorg.ntargets == 1) { +		spinand->cur_target = target; +		return 0; +	} + +	ret = spinand->select_target(spinand, target); +	if (ret) +		return ret; + +	spinand->cur_target = target; +	return 0; +} + +static int spinand_init_cfg_cache(struct spinand_device *spinand) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	struct udevice *dev = spinand->slave->dev; +	unsigned int target; +	int ret; + +	spinand->cfg_cache = devm_kzalloc(dev, +					  sizeof(*spinand->cfg_cache) * +					  nand->memorg.ntargets, +					  GFP_KERNEL); +	if (!spinand->cfg_cache) +		return -ENOMEM; + +	for (target = 0; target < nand->memorg.ntargets; target++) { +		ret = spinand_select_target(spinand, target); +		if (ret) +			return ret; + +		/* +		 * We use spinand_read_reg_op() instead of spinand_get_cfg() +		 * here to bypass the config cache. +		 */ +		ret = spinand_read_reg_op(spinand, REG_CFG, +					  &spinand->cfg_cache[target]); +		if (ret) +			return ret; +	} + +	return 0; +} + +static int spinand_init_quad_enable(struct spinand_device *spinand) +{ +	bool enable = false; + +	if (!(spinand->flags & SPINAND_HAS_QE_BIT)) +		return 0; + +	if (spinand->op_templates.read_cache->data.buswidth == 4 || +	    spinand->op_templates.write_cache->data.buswidth == 4 || +	    spinand->op_templates.update_cache->data.buswidth == 4) +		enable = true; + +	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, +			       enable ? CFG_QUAD_ENABLE : 0); +} + +static int spinand_ecc_enable(struct spinand_device *spinand, +			      bool enable) +{ +	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, +			       enable ? CFG_ECC_ENABLE : 0); +} + +static int spinand_write_enable_op(struct spinand_device *spinand) +{ +	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); + +	return spi_mem_exec_op(spinand->slave, &op); +} + +static int spinand_load_page_op(struct spinand_device *spinand, +				const struct nand_page_io_req *req) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	unsigned int row = nanddev_pos_to_row(nand, &req->pos); +	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); + +	return spi_mem_exec_op(spinand->slave, &op); +} + +static int spinand_read_from_cache_op(struct spinand_device *spinand, +				      const struct nand_page_io_req *req) +{ +	struct spi_mem_op op = *spinand->op_templates.read_cache; +	struct nand_device *nand = spinand_to_nand(spinand); +	struct mtd_info *mtd = nanddev_to_mtd(nand); +	struct nand_page_io_req adjreq = *req; +	unsigned int nbytes = 0; +	void *buf = NULL; +	u16 column = 0; +	int ret; + +	if (req->datalen) { +		adjreq.datalen = nanddev_page_size(nand); +		adjreq.dataoffs = 0; +		adjreq.databuf.in = spinand->databuf; +		buf = spinand->databuf; +		nbytes = adjreq.datalen; +	} + +	if (req->ooblen) { +		adjreq.ooblen = nanddev_per_page_oobsize(nand); +		adjreq.ooboffs = 0; +		adjreq.oobbuf.in = spinand->oobbuf; +		nbytes += nanddev_per_page_oobsize(nand); +		if (!buf) { +			buf = spinand->oobbuf; +			column = nanddev_page_size(nand); +		} +	} + +	spinand_cache_op_adjust_colum(spinand, &adjreq, &column); +	op.addr.val = column; + +	/* +	 * Some controllers are limited in term of max RX data size. In this +	 * case, just repeat the READ_CACHE operation after updating the +	 * column. +	 */ +	while (nbytes) { +		op.data.buf.in = buf; +		op.data.nbytes = nbytes; +		ret = spi_mem_adjust_op_size(spinand->slave, &op); +		if (ret) +			return ret; + +		ret = spi_mem_exec_op(spinand->slave, &op); +		if (ret) +			return ret; + +		buf += op.data.nbytes; +		nbytes -= op.data.nbytes; +		op.addr.val += op.data.nbytes; +	} + +	if (req->datalen) +		memcpy(req->databuf.in, spinand->databuf + req->dataoffs, +		       req->datalen); + +	if (req->ooblen) { +		if (req->mode == MTD_OPS_AUTO_OOB) +			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, +						    spinand->oobbuf, +						    req->ooboffs, +						    req->ooblen); +		else +			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, +			       req->ooblen); +	} + +	return 0; +} + +static int spinand_write_to_cache_op(struct spinand_device *spinand, +				     const struct nand_page_io_req *req) +{ +	struct spi_mem_op op = *spinand->op_templates.write_cache; +	struct nand_device *nand = spinand_to_nand(spinand); +	struct mtd_info *mtd = nanddev_to_mtd(nand); +	struct nand_page_io_req adjreq = *req; +	unsigned int nbytes = 0; +	void *buf = NULL; +	u16 column = 0; +	int ret; + +	memset(spinand->databuf, 0xff, +	       nanddev_page_size(nand) + +	       nanddev_per_page_oobsize(nand)); + +	if (req->datalen) { +		memcpy(spinand->databuf + req->dataoffs, req->databuf.out, +		       req->datalen); +		adjreq.dataoffs = 0; +		adjreq.datalen = nanddev_page_size(nand); +		adjreq.databuf.out = spinand->databuf; +		nbytes = adjreq.datalen; +		buf = spinand->databuf; +	} + +	if (req->ooblen) { +		if (req->mode == MTD_OPS_AUTO_OOB) +			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, +						    spinand->oobbuf, +						    req->ooboffs, +						    req->ooblen); +		else +			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, +			       req->ooblen); + +		adjreq.ooblen = nanddev_per_page_oobsize(nand); +		adjreq.ooboffs = 0; +		nbytes += nanddev_per_page_oobsize(nand); +		if (!buf) { +			buf = spinand->oobbuf; +			column = nanddev_page_size(nand); +		} +	} + +	spinand_cache_op_adjust_colum(spinand, &adjreq, &column); + +	op = *spinand->op_templates.write_cache; +	op.addr.val = column; + +	/* +	 * Some controllers are limited in term of max TX data size. In this +	 * case, split the operation into one LOAD CACHE and one or more +	 * LOAD RANDOM CACHE. +	 */ +	while (nbytes) { +		op.data.buf.out = buf; +		op.data.nbytes = nbytes; + +		ret = spi_mem_adjust_op_size(spinand->slave, &op); +		if (ret) +			return ret; + +		ret = spi_mem_exec_op(spinand->slave, &op); +		if (ret) +			return ret; + +		buf += op.data.nbytes; +		nbytes -= op.data.nbytes; +		op.addr.val += op.data.nbytes; + +		/* +		 * We need to use the RANDOM LOAD CACHE operation if there's +		 * more than one iteration, because the LOAD operation resets +		 * the cache to 0xff. +		 */ +		if (nbytes) { +			column = op.addr.val; +			op = *spinand->op_templates.update_cache; +			op.addr.val = column; +		} +	} + +	return 0; +} + +static int spinand_program_op(struct spinand_device *spinand, +			      const struct nand_page_io_req *req) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	unsigned int row = nanddev_pos_to_row(nand, &req->pos); +	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); + +	return spi_mem_exec_op(spinand->slave, &op); +} + +static int spinand_erase_op(struct spinand_device *spinand, +			    const struct nand_pos *pos) +{ +	struct nand_device *nand = &spinand->base; +	unsigned int row = nanddev_pos_to_row(nand, pos); +	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); + +	return spi_mem_exec_op(spinand->slave, &op); +} + +static int spinand_wait(struct spinand_device *spinand, u8 *s) +{ +	unsigned long start, stop; +	u8 status; +	int ret; + +	start = get_timer(0); +	stop = 400; +	do { +		ret = spinand_read_status(spinand, &status); +		if (ret) +			return ret; + +		if (!(status & STATUS_BUSY)) +			goto out; +	} while (get_timer(start) < stop); + +	/* +	 * Extra read, just in case the STATUS_READY bit has changed +	 * since our last check +	 */ +	ret = spinand_read_status(spinand, &status); +	if (ret) +		return ret; + +out: +	if (s) +		*s = status; + +	return status & STATUS_BUSY ? -ETIMEDOUT : 0; +} + +static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf) +{ +	struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf, +						 SPINAND_MAX_ID_LEN); +	int ret; + +	ret = spi_mem_exec_op(spinand->slave, &op); +	if (!ret) +		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); + +	return ret; +} + +static int spinand_reset_op(struct spinand_device *spinand) +{ +	struct spi_mem_op op = SPINAND_RESET_OP; +	int ret; + +	ret = spi_mem_exec_op(spinand->slave, &op); +	if (ret) +		return ret; + +	return spinand_wait(spinand, NULL); +} + +static int spinand_lock_block(struct spinand_device *spinand, u8 lock) +{ +	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); +} + +static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) +{ +	struct nand_device *nand = spinand_to_nand(spinand); + +	if (spinand->eccinfo.get_status) +		return spinand->eccinfo.get_status(spinand, status); + +	switch (status & STATUS_ECC_MASK) { +	case STATUS_ECC_NO_BITFLIPS: +		return 0; + +	case STATUS_ECC_HAS_BITFLIPS: +		/* +		 * We have no way to know exactly how many bitflips have been +		 * fixed, so let's return the maximum possible value so that +		 * wear-leveling layers move the data immediately. +		 */ +		return nand->eccreq.strength; + +	case STATUS_ECC_UNCOR_ERROR: +		return -EBADMSG; + +	default: +		break; +	} + +	return -EINVAL; +} + +static int spinand_read_page(struct spinand_device *spinand, +			     const struct nand_page_io_req *req, +			     bool ecc_enabled) +{ +	u8 status; +	int ret; + +	ret = spinand_load_page_op(spinand, req); +	if (ret) +		return ret; + +	ret = spinand_wait(spinand, &status); +	if (ret < 0) +		return ret; + +	ret = spinand_read_from_cache_op(spinand, req); +	if (ret) +		return ret; + +	if (!ecc_enabled) +		return 0; + +	return spinand_check_ecc_status(spinand, status); +} + +static int spinand_write_page(struct spinand_device *spinand, +			      const struct nand_page_io_req *req) +{ +	u8 status; +	int ret; + +	ret = spinand_write_enable_op(spinand); +	if (ret) +		return ret; + +	ret = spinand_write_to_cache_op(spinand, req); +	if (ret) +		return ret; + +	ret = spinand_program_op(spinand, req); +	if (ret) +		return ret; + +	ret = spinand_wait(spinand, &status); +	if (!ret && (status & STATUS_PROG_FAILED)) +		ret = -EIO; + +	return ret; +} + +static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, +			    struct mtd_oob_ops *ops) +{ +	struct spinand_device *spinand = mtd_to_spinand(mtd); +	struct nand_device *nand = mtd_to_nanddev(mtd); +	unsigned int max_bitflips = 0; +	struct nand_io_iter iter; +	bool enable_ecc = false; +	bool ecc_failed = false; +	int ret = 0; + +	if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout) +		enable_ecc = true; + +#ifndef __UBOOT__ +	mutex_lock(&spinand->lock); +#endif + +	nanddev_io_for_each_page(nand, from, ops, &iter) { +		ret = spinand_select_target(spinand, iter.req.pos.target); +		if (ret) +			break; + +		ret = spinand_ecc_enable(spinand, enable_ecc); +		if (ret) +			break; + +		ret = spinand_read_page(spinand, &iter.req, enable_ecc); +		if (ret < 0 && ret != -EBADMSG) +			break; + +		if (ret == -EBADMSG) { +			ecc_failed = true; +			mtd->ecc_stats.failed++; +			ret = 0; +		} else { +			mtd->ecc_stats.corrected += ret; +			max_bitflips = max_t(unsigned int, max_bitflips, ret); +		} + +		ops->retlen += iter.req.datalen; +		ops->oobretlen += iter.req.ooblen; +	} + +#ifndef __UBOOT__ +	mutex_unlock(&spinand->lock); +#endif +	if (ecc_failed && !ret) +		ret = -EBADMSG; + +	return ret ? ret : max_bitflips; +} + +static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, +			     struct mtd_oob_ops *ops) +{ +	struct spinand_device *spinand = mtd_to_spinand(mtd); +	struct nand_device *nand = mtd_to_nanddev(mtd); +	struct nand_io_iter iter; +	bool enable_ecc = false; +	int ret = 0; + +	if (ops->mode != MTD_OPS_RAW && mtd->ooblayout) +		enable_ecc = true; + +#ifndef __UBOOT__ +	mutex_lock(&spinand->lock); +#endif + +	nanddev_io_for_each_page(nand, to, ops, &iter) { +		ret = spinand_select_target(spinand, iter.req.pos.target); +		if (ret) +			break; + +		ret = spinand_ecc_enable(spinand, enable_ecc); +		if (ret) +			break; + +		ret = spinand_write_page(spinand, &iter.req); +		if (ret) +			break; + +		ops->retlen += iter.req.datalen; +		ops->oobretlen += iter.req.ooblen; +	} + +#ifndef __UBOOT__ +	mutex_unlock(&spinand->lock); +#endif + +	return ret; +} + +static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) +{ +	struct spinand_device *spinand = nand_to_spinand(nand); +	struct nand_page_io_req req = { +		.pos = *pos, +		.ooblen = 2, +		.ooboffs = 0, +		.oobbuf.in = spinand->oobbuf, +		.mode = MTD_OPS_RAW, +	}; +	int ret; + +	memset(spinand->oobbuf, 0, 2); +	ret = spinand_select_target(spinand, pos->target); +	if (ret) +		return ret; + +	ret = spinand_read_page(spinand, &req, false); +	if (ret) +		return ret; + +	if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff) +		return true; + +	return false; +} + +static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) +{ +	struct nand_device *nand = mtd_to_nanddev(mtd); +#ifndef __UBOOT__ +	struct spinand_device *spinand = nand_to_spinand(nand); +#endif +	struct nand_pos pos; +	int ret; + +	nanddev_offs_to_pos(nand, offs, &pos); +#ifndef __UBOOT__ +	mutex_lock(&spinand->lock); +#endif +	ret = nanddev_isbad(nand, &pos); +#ifndef __UBOOT__ +	mutex_unlock(&spinand->lock); +#endif +	return ret; +} + +static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) +{ +	struct spinand_device *spinand = nand_to_spinand(nand); +	struct nand_page_io_req req = { +		.pos = *pos, +		.ooboffs = 0, +		.ooblen = 2, +		.oobbuf.out = spinand->oobbuf, +	}; +	int ret; + +	/* Erase block before marking it bad. */ +	ret = spinand_select_target(spinand, pos->target); +	if (ret) +		return ret; + +	ret = spinand_write_enable_op(spinand); +	if (ret) +		return ret; + +	ret = spinand_erase_op(spinand, pos); +	if (ret) +		return ret; + +	memset(spinand->oobbuf, 0, 2); +	return spinand_write_page(spinand, &req); +} + +static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) +{ +	struct nand_device *nand = mtd_to_nanddev(mtd); +#ifndef __UBOOT__ +	struct spinand_device *spinand = nand_to_spinand(nand); +#endif +	struct nand_pos pos; +	int ret; + +	nanddev_offs_to_pos(nand, offs, &pos); +#ifndef __UBOOT__ +	mutex_lock(&spinand->lock); +#endif +	ret = nanddev_markbad(nand, &pos); +#ifndef __UBOOT__ +	mutex_unlock(&spinand->lock); +#endif +	return ret; +} + +static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) +{ +	struct spinand_device *spinand = nand_to_spinand(nand); +	u8 status; +	int ret; + +	ret = spinand_select_target(spinand, pos->target); +	if (ret) +		return ret; + +	ret = spinand_write_enable_op(spinand); +	if (ret) +		return ret; + +	ret = spinand_erase_op(spinand, pos); +	if (ret) +		return ret; + +	ret = spinand_wait(spinand, &status); +	if (!ret && (status & STATUS_ERASE_FAILED)) +		ret = -EIO; + +	return ret; +} + +static int spinand_mtd_erase(struct mtd_info *mtd, +			     struct erase_info *einfo) +{ +#ifndef __UBOOT__ +	struct spinand_device *spinand = mtd_to_spinand(mtd); +#endif +	int ret; + +#ifndef __UBOOT__ +	mutex_lock(&spinand->lock); +#endif +	ret = nanddev_mtd_erase(mtd, einfo); +#ifndef __UBOOT__ +	mutex_unlock(&spinand->lock); +#endif + +	return ret; +} + +static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) +{ +#ifndef __UBOOT__ +	struct spinand_device *spinand = mtd_to_spinand(mtd); +#endif +	struct nand_device *nand = mtd_to_nanddev(mtd); +	struct nand_pos pos; +	int ret; + +	nanddev_offs_to_pos(nand, offs, &pos); +#ifndef __UBOOT__ +	mutex_lock(&spinand->lock); +#endif +	ret = nanddev_isreserved(nand, &pos); +#ifndef __UBOOT__ +	mutex_unlock(&spinand->lock); +#endif + +	return ret; +} + +const struct spi_mem_op * +spinand_find_supported_op(struct spinand_device *spinand, +			  const struct spi_mem_op *ops, +			  unsigned int nops) +{ +	unsigned int i; + +	for (i = 0; i < nops; i++) { +		if (spi_mem_supports_op(spinand->slave, &ops[i])) +			return &ops[i]; +	} + +	return NULL; +} + +static const struct nand_ops spinand_ops = { +	.erase = spinand_erase, +	.markbad = spinand_markbad, +	.isbad = spinand_isbad, +}; + +static int spinand_manufacturer_detect(struct spinand_device *spinand) +{ +	return -ENOTSUPP; +} + +static int spinand_manufacturer_init(struct spinand_device *spinand) +{ +	if (spinand->manufacturer->ops->init) +		return spinand->manufacturer->ops->init(spinand); + +	return 0; +} + +static void spinand_manufacturer_cleanup(struct spinand_device *spinand) +{ +	/* Release manufacturer private data */ +	if (spinand->manufacturer->ops->cleanup) +		return spinand->manufacturer->ops->cleanup(spinand); +} + +static const struct spi_mem_op * +spinand_select_op_variant(struct spinand_device *spinand, +			  const struct spinand_op_variants *variants) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	unsigned int i; + +	for (i = 0; i < variants->nops; i++) { +		struct spi_mem_op op = variants->ops[i]; +		unsigned int nbytes; +		int ret; + +		nbytes = nanddev_per_page_oobsize(nand) + +			 nanddev_page_size(nand); + +		while (nbytes) { +			op.data.nbytes = nbytes; +			ret = spi_mem_adjust_op_size(spinand->slave, &op); +			if (ret) +				break; + +			if (!spi_mem_supports_op(spinand->slave, &op)) +				break; + +			nbytes -= op.data.nbytes; +		} + +		if (!nbytes) +			return &variants->ops[i]; +	} + +	return NULL; +} + +/** + * spinand_match_and_init() - Try to find a match between a device ID and an + *			      entry in a spinand_info table + * @spinand: SPI NAND object + * @table: SPI NAND device description table + * @table_size: size of the device description table + * + * Should be used by SPI NAND manufacturer drivers when they want to find a + * match between a device ID retrieved through the READ_ID command and an + * entry in the SPI NAND description table. If a match is found, the spinand + * object will be initialized with information provided by the matching + * spinand_info entry. + * + * Return: 0 on success, a negative error code otherwise. + */ +int spinand_match_and_init(struct spinand_device *spinand, +			   const struct spinand_info *table, +			   unsigned int table_size, u8 devid) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	unsigned int i; + +	for (i = 0; i < table_size; i++) { +		const struct spinand_info *info = &table[i]; +		const struct spi_mem_op *op; + +		if (devid != info->devid) +			continue; + +		nand->memorg = table[i].memorg; +		nand->eccreq = table[i].eccreq; +		spinand->eccinfo = table[i].eccinfo; +		spinand->flags = table[i].flags; +		spinand->select_target = table[i].select_target; + +		op = spinand_select_op_variant(spinand, +					       info->op_variants.read_cache); +		if (!op) +			return -ENOTSUPP; + +		spinand->op_templates.read_cache = op; + +		op = spinand_select_op_variant(spinand, +					       info->op_variants.write_cache); +		if (!op) +			return -ENOTSUPP; + +		spinand->op_templates.write_cache = op; + +		op = spinand_select_op_variant(spinand, +					       info->op_variants.update_cache); +		spinand->op_templates.update_cache = op; + +		return 0; +	} + +	return -ENOTSUPP; +} + +static int spinand_detect(struct spinand_device *spinand) +{ +	struct nand_device *nand = spinand_to_nand(spinand); +	int ret; + +	ret = spinand_reset_op(spinand); +	if (ret) +		return ret; + +	ret = spinand_read_id_op(spinand, spinand->id.data); +	if (ret) +		return ret; + +	spinand->id.len = SPINAND_MAX_ID_LEN; + +	ret = spinand_manufacturer_detect(spinand); +	if (ret) { +		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, +			spinand->id.data); +		return ret; +	} + +	if (nand->memorg.ntargets > 1 && !spinand->select_target) { +		dev_err(dev, +			"SPI NANDs with more than one die must implement ->select_target()\n"); +		return -EINVAL; +	} + +	dev_info(spinand->slave->dev, +		 "%s SPI NAND was found.\n", spinand->manufacturer->name); +	dev_info(spinand->slave->dev, +		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", +		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, +		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); + +	return 0; +} + +static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, +				       struct mtd_oob_region *region) +{ +	return -ERANGE; +} + +static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, +					struct mtd_oob_region *region) +{ +	if (section) +		return -ERANGE; + +	/* Reserve 2 bytes for the BBM. */ +	region->offset = 2; +	region->length = 62; + +	return 0; +} + +static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { +	.ecc = spinand_noecc_ooblayout_ecc, +	.free = spinand_noecc_ooblayout_free, +}; + +static int spinand_init(struct spinand_device *spinand) +{ +	struct mtd_info *mtd = spinand_to_mtd(spinand); +	struct nand_device *nand = mtd_to_nanddev(mtd); +	int ret, i; + +	/* +	 * We need a scratch buffer because the spi_mem interface requires that +	 * buf passed in spi_mem_op->data.buf be DMA-able. +	 */ +	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); +	if (!spinand->scratchbuf) +		return -ENOMEM; + +	ret = spinand_detect(spinand); +	if (ret) +		goto err_free_bufs; + +	/* +	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers +	 * may use this buffer for DMA access. +	 * Memory allocated by devm_ does not guarantee DMA-safe alignment. +	 */ +	spinand->databuf = kzalloc(nanddev_page_size(nand) + +			       nanddev_per_page_oobsize(nand), +			       GFP_KERNEL); +	if (!spinand->databuf) { +		ret = -ENOMEM; +		goto err_free_bufs; +	} + +	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); + +	ret = spinand_init_cfg_cache(spinand); +	if (ret) +		goto err_free_bufs; + +	ret = spinand_init_quad_enable(spinand); +	if (ret) +		goto err_free_bufs; + +	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); +	if (ret) +		goto err_free_bufs; + +	ret = spinand_manufacturer_init(spinand); +	if (ret) { +		dev_err(dev, +			"Failed to initialize the SPI NAND chip (err = %d)\n", +			ret); +		goto err_free_bufs; +	} + +	/* After power up, all blocks are locked, so unlock them here. */ +	for (i = 0; i < nand->memorg.ntargets; i++) { +		ret = spinand_select_target(spinand, i); +		if (ret) +			goto err_free_bufs; + +		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); +		if (ret) +			goto err_free_bufs; +	} + +	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); +	if (ret) +		goto err_manuf_cleanup; + +	/* +	 * Right now, we don't support ECC, so let the whole oob +	 * area is available for user. +	 */ +	mtd->_read_oob = spinand_mtd_read; +	mtd->_write_oob = spinand_mtd_write; +	mtd->_block_isbad = spinand_mtd_block_isbad; +	mtd->_block_markbad = spinand_mtd_block_markbad; +	mtd->_block_isreserved = spinand_mtd_block_isreserved; +	mtd->_erase = spinand_mtd_erase; + +	if (spinand->eccinfo.ooblayout) +		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); +	else +		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); + +	ret = mtd_ooblayout_count_freebytes(mtd); +	if (ret < 0) +		goto err_cleanup_nanddev; + +	mtd->oobavail = ret; + +	return 0; + +err_cleanup_nanddev: +	nanddev_cleanup(nand); + +err_manuf_cleanup: +	spinand_manufacturer_cleanup(spinand); + +err_free_bufs: +	kfree(spinand->databuf); +	kfree(spinand->scratchbuf); +	return ret; +} + +static void spinand_cleanup(struct spinand_device *spinand) +{ +	struct nand_device *nand = spinand_to_nand(spinand); + +	nanddev_cleanup(nand); +	spinand_manufacturer_cleanup(spinand); +	kfree(spinand->databuf); +	kfree(spinand->scratchbuf); +} + +static int spinand_probe(struct udevice *dev) +{ +	struct spinand_device *spinand = dev_get_priv(dev); +	struct spi_slave *slave = dev_get_parent_priv(dev); +	struct mtd_info *mtd = dev_get_uclass_priv(dev); +	struct nand_device *nand = spinand_to_nand(spinand); +	int ret; + +#ifndef __UBOOT__ +	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), +			       GFP_KERNEL); +	if (!spinand) +		return -ENOMEM; + +	spinand->spimem = mem; +	spi_mem_set_drvdata(mem, spinand); +	spinand_set_of_node(spinand, mem->spi->dev.of_node); +	mutex_init(&spinand->lock); + +	mtd = spinand_to_mtd(spinand); +	mtd->dev.parent = &mem->spi->dev; +#else +	nand->mtd = mtd; +	mtd->priv = nand; +	mtd->dev = dev; +	mtd->name = malloc(20); +	if (!mtd->name) +		return -ENOMEM; +	sprintf(mtd->name, "spi-nand%d", spi_nand_idx++); +	spinand->slave = slave; +	spinand_set_of_node(spinand, dev->node.np); +#endif + +	ret = spinand_init(spinand); +	if (ret) +		return ret; + +#ifndef __UBOOT__ +	ret = mtd_device_register(mtd, NULL, 0); +#else +	ret = add_mtd_device(mtd); +#endif +	if (ret) +		goto err_spinand_cleanup; + +	return 0; + +err_spinand_cleanup: +	spinand_cleanup(spinand); + +	return ret; +} + +#ifndef __UBOOT__ +static int spinand_remove(struct udevice *slave) +{ +	struct spinand_device *spinand; +	struct mtd_info *mtd; +	int ret; + +	spinand = spi_mem_get_drvdata(slave); +	mtd = spinand_to_mtd(spinand); +	free(mtd->name); + +	ret = mtd_device_unregister(mtd); +	if (ret) +		return ret; + +	spinand_cleanup(spinand); + +	return 0; +} + +static const struct spi_device_id spinand_ids[] = { +	{ .name = "spi-nand" }, +	{ /* sentinel */ }, +}; + +#ifdef CONFIG_OF +static const struct of_device_id spinand_of_ids[] = { +	{ .compatible = "spi-nand" }, +	{ /* sentinel */ }, +}; +#endif + +static struct spi_mem_driver spinand_drv = { +	.spidrv = { +		.id_table = spinand_ids, +		.driver = { +			.name = "spi-nand", +			.of_match_table = of_match_ptr(spinand_of_ids), +		}, +	}, +	.probe = spinand_probe, +	.remove = spinand_remove, +}; +module_spi_mem_driver(spinand_drv); + +MODULE_DESCRIPTION("SPI NAND framework"); +MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); +MODULE_LICENSE("GPL v2"); +#endif /* __UBOOT__ */ + +static const struct udevice_id spinand_ids[] = { +	{ .compatible = "spi-nand" }, +	{ /* sentinel */ }, +}; + +U_BOOT_DRIVER(spinand) = { +	.name = "spi_nand", +	.id = UCLASS_MTD, +	.of_match = spinand_ids, +	.priv_auto_alloc_size = sizeof(struct spinand_device), +	.probe = spinand_probe, +}; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h new file mode 100644 index 00000000000..ad59fc01ef9 --- /dev/null +++ b/include/linux/mtd/spinand.h @@ -0,0 +1,427 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2017 Micron Technology, Inc. + * + *  Authors: + *	Peter Pan <peterpandong@micron.com> + */ +#ifndef __LINUX_MTD_SPINAND_H +#define __LINUX_MTD_SPINAND_H + +#ifndef __UBOOT__ +#include <linux/mutex.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> +#else +#include <common.h> +#include <spi.h> +#include <spi-mem.h> +#include <linux/mtd/nand.h> +#endif + +/** + * Standard SPI NAND flash operations + */ + +#define SPINAND_RESET_OP						\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1),				\ +		   SPI_MEM_OP_NO_ADDR,					\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_NO_DATA) + +#define SPINAND_WR_EN_DIS_OP(enable)					\ +	SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1),		\ +		   SPI_MEM_OP_NO_ADDR,					\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_NO_DATA) + +#define SPINAND_READID_OP(ndummy, buf, len)				\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1),				\ +		   SPI_MEM_OP_NO_ADDR,					\ +		   SPI_MEM_OP_DUMMY(ndummy, 1),				\ +		   SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_SET_FEATURE_OP(reg, valptr)				\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1),				\ +		   SPI_MEM_OP_ADDR(1, reg, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_DATA_OUT(1, valptr, 1)) + +#define SPINAND_GET_FEATURE_OP(reg, valptr)				\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1),				\ +		   SPI_MEM_OP_ADDR(1, reg, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_DATA_IN(1, valptr, 1)) + +#define SPINAND_BLK_ERASE_OP(addr)					\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1),				\ +		   SPI_MEM_OP_ADDR(3, addr, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_NO_DATA) + +#define SPINAND_PAGE_READ_OP(addr)					\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1),				\ +		   SPI_MEM_OP_ADDR(3, addr, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_NO_DATA) + +#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len)	\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1),		\ +		   SPI_MEM_OP_ADDR(2, addr, 1),				\ +		   SPI_MEM_OP_DUMMY(ndummy, 1),				\ +		   SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len)	\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1),				\ +		   SPI_MEM_OP_ADDR(2, addr, 1),				\ +		   SPI_MEM_OP_DUMMY(ndummy, 1),				\ +		   SPI_MEM_OP_DATA_IN(len, buf, 2)) + +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len)	\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1),				\ +		   SPI_MEM_OP_ADDR(2, addr, 1),				\ +		   SPI_MEM_OP_DUMMY(ndummy, 1),				\ +		   SPI_MEM_OP_DATA_IN(len, buf, 4)) + +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len)	\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1),				\ +		   SPI_MEM_OP_ADDR(2, addr, 2),				\ +		   SPI_MEM_OP_DUMMY(ndummy, 2),				\ +		   SPI_MEM_OP_DATA_IN(len, buf, 2)) + +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len)	\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1),				\ +		   SPI_MEM_OP_ADDR(2, addr, 4),				\ +		   SPI_MEM_OP_DUMMY(ndummy, 4),				\ +		   SPI_MEM_OP_DATA_IN(len, buf, 4)) + +#define SPINAND_PROG_EXEC_OP(addr)					\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1),				\ +		   SPI_MEM_OP_ADDR(3, addr, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_NO_DATA) + +#define SPINAND_PROG_LOAD(reset, addr, buf, len)			\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1),		\ +		   SPI_MEM_OP_ADDR(2, addr, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_DATA_OUT(len, buf, 1)) + +#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len)			\ +	SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1),		\ +		   SPI_MEM_OP_ADDR(2, addr, 1),				\ +		   SPI_MEM_OP_NO_DUMMY,					\ +		   SPI_MEM_OP_DATA_OUT(len, buf, 4)) + +/** + * Standard SPI NAND flash commands + */ +#define SPINAND_CMD_PROG_LOAD_X4		0x32 +#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4	0x34 + +/* feature register */ +#define REG_BLOCK_LOCK		0xa0 +#define BL_ALL_UNLOCKED		0x00 + +/* configuration register */ +#define REG_CFG			0xb0 +#define CFG_OTP_ENABLE		BIT(6) +#define CFG_ECC_ENABLE		BIT(4) +#define CFG_QUAD_ENABLE		BIT(0) + +/* status register */ +#define REG_STATUS		0xc0 +#define STATUS_BUSY		BIT(0) +#define STATUS_ERASE_FAILED	BIT(2) +#define STATUS_PROG_FAILED	BIT(3) +#define STATUS_ECC_MASK		GENMASK(5, 4) +#define STATUS_ECC_NO_BITFLIPS	(0 << 4) +#define STATUS_ECC_HAS_BITFLIPS	(1 << 4) +#define STATUS_ECC_UNCOR_ERROR	(2 << 4) + +struct spinand_op; +struct spinand_device; + +#define SPINAND_MAX_ID_LEN	4 + +/** + * struct spinand_id - SPI NAND id structure + * @data: buffer containing the id bytes. Currently 4 bytes large, but can + *	  be extended if required + * @len: ID length + * + * struct_spinand_id->data contains all bytes returned after a READ_ID command, + * including dummy bytes if the chip does not emit ID bytes right after the + * READ_ID command. The responsibility to extract real ID bytes is left to + * struct_manufacurer_ops->detect(). + */ +struct spinand_id { +	u8 data[SPINAND_MAX_ID_LEN]; +	int len; +}; + +/** + * struct manufacurer_ops - SPI NAND manufacturer specific operations + * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed + *	    the core calls the struct_manufacurer_ops->detect() hook of each + *	    registered manufacturer until one of them return 1. Note that + *	    the first thing to check in this hook is that the manufacturer ID + *	    in struct_spinand_device->id matches the manufacturer whose + *	    ->detect() hook has been called. Should return 1 if there's a + *	    match, 0 if the manufacturer ID does not match and a negative + *	    error code otherwise. When true is returned, the core assumes + *	    that properties of the NAND chip (spinand->base.memorg and + *	    spinand->base.eccreq) have been filled + * @init: initialize a SPI NAND device + * @cleanup: cleanup a SPI NAND device + * + * Each SPI NAND manufacturer driver should implement this interface so that + * NAND chips coming from this vendor can be detected and initialized properly. + */ +struct spinand_manufacturer_ops { +	int (*detect)(struct spinand_device *spinand); +	int (*init)(struct spinand_device *spinand); +	void (*cleanup)(struct spinand_device *spinand); +}; + +/** + * struct spinand_manufacturer - SPI NAND manufacturer instance + * @id: manufacturer ID + * @name: manufacturer name + * @ops: manufacturer operations + */ +struct spinand_manufacturer { +	u8 id; +	char *name; +	const struct spinand_manufacturer_ops *ops; +}; + +/** + * struct spinand_op_variants - SPI NAND operation variants + * @ops: the list of variants for a given operation + * @nops: the number of variants + * + * Some operations like read-from-cache/write-to-cache have several variants + * depending on the number of IO lines you use to transfer data or address + * cycles. This structure is a way to describe the different variants supported + * by a chip and let the core pick the best one based on the SPI mem controller + * capabilities. + */ +struct spinand_op_variants { +	const struct spi_mem_op *ops; +	unsigned int nops; +}; + +#define SPINAND_OP_VARIANTS(name, ...)					\ +	const struct spinand_op_variants name = {			\ +		.ops = (struct spi_mem_op[]) { __VA_ARGS__ },		\ +		.nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) /	\ +			sizeof(struct spi_mem_op),			\ +	} + +/** + * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND + *		      chip + * @get_status: get the ECC status. Should return a positive number encoding + *		the number of corrected bitflips if correction was possible or + *		-EBADMSG if there are uncorrectable errors. I can also return + *		other negative error codes if the error is not caused by + *		uncorrectable bitflips + * @ooblayout: the OOB layout used by the on-die ECC implementation + */ +struct spinand_ecc_info { +	int (*get_status)(struct spinand_device *spinand, u8 status); +	const struct mtd_ooblayout_ops *ooblayout; +}; + +#define SPINAND_HAS_QE_BIT		BIT(0) + +/** + * struct spinand_info - Structure used to describe SPI NAND chips + * @model: model name + * @devid: device ID + * @flags: OR-ing of the SPINAND_XXX flags + * @memorg: memory organization + * @eccreq: ECC requirements + * @eccinfo: on-die ECC info + * @op_variants: operations variants + * @op_variants.read_cache: variants of the read-cache operation + * @op_variants.write_cache: variants of the write-cache operation + * @op_variants.update_cache: variants of the update-cache operation + * @select_target: function used to select a target/die. Required only for + *		   multi-die chips + * + * Each SPI NAND manufacturer driver should have a spinand_info table + * describing all the chips supported by the driver. + */ +struct spinand_info { +	const char *model; +	u8 devid; +	u32 flags; +	struct nand_memory_organization memorg; +	struct nand_ecc_req eccreq; +	struct spinand_ecc_info eccinfo; +	struct { +		const struct spinand_op_variants *read_cache; +		const struct spinand_op_variants *write_cache; +		const struct spinand_op_variants *update_cache; +	} op_variants; +	int (*select_target)(struct spinand_device *spinand, +			     unsigned int target); +}; + +#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update)		\ +	{								\ +		.read_cache = __read,					\ +		.write_cache = __write,					\ +		.update_cache = __update,				\ +	} + +#define SPINAND_ECCINFO(__ooblayout, __get_status)			\ +	.eccinfo = {							\ +		.ooblayout = __ooblayout,				\ +		.get_status = __get_status,				\ +	} + +#define SPINAND_SELECT_TARGET(__func)					\ +	.select_target = __func, + +#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants,	\ +		     __flags, ...)					\ +	{								\ +		.model = __model,					\ +		.devid = __id,						\ +		.memorg = __memorg,					\ +		.eccreq = __eccreq,					\ +		.op_variants = __op_variants,				\ +		.flags = __flags,					\ +		__VA_ARGS__						\ +	} + +/** + * struct spinand_device - SPI NAND device instance + * @base: NAND device instance + * @slave: pointer to the SPI slave object + * @lock: lock used to serialize accesses to the NAND + * @id: NAND ID as returned by READ_ID + * @flags: NAND flags + * @op_templates: various SPI mem op templates + * @op_templates.read_cache: read cache op template + * @op_templates.write_cache: write cache op template + * @op_templates.update_cache: update cache op template + * @select_target: select a specific target/die. Usually called before sending + *		   a command addressing a page or an eraseblock embedded in + *		   this die. Only required if your chip exposes several dies + * @cur_target: currently selected target/die + * @eccinfo: on-die ECC information + * @cfg_cache: config register cache. One entry per die + * @databuf: bounce buffer for data + * @oobbuf: bounce buffer for OOB data + * @scratchbuf: buffer used for everything but page accesses. This is needed + *		because the spi-mem interface explicitly requests that buffers + *		passed in spi_mem_op be DMA-able, so we can't based the bufs on + *		the stack + * @manufacturer: SPI NAND manufacturer information + * @priv: manufacturer private data + */ +struct spinand_device { +	struct nand_device base; +#ifndef __UBOOT__ +	struct spi_mem *spimem; +	struct mutex lock; +#else +	struct spi_slave *slave; +#endif +	struct spinand_id id; +	u32 flags; + +	struct { +		const struct spi_mem_op *read_cache; +		const struct spi_mem_op *write_cache; +		const struct spi_mem_op *update_cache; +	} op_templates; + +	int (*select_target)(struct spinand_device *spinand, +			     unsigned int target); +	unsigned int cur_target; + +	struct spinand_ecc_info eccinfo; + +	u8 *cfg_cache; +	u8 *databuf; +	u8 *oobbuf; +	u8 *scratchbuf; +	const struct spinand_manufacturer *manufacturer; +	void *priv; +}; + +/** + * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance + * @mtd: MTD instance + * + * Return: the SPI NAND device attached to @mtd. + */ +static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd) +{ +	return container_of(mtd_to_nanddev(mtd), struct spinand_device, base); +} + +/** + * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device + * @spinand: SPI NAND device + * + * Return: the MTD device embedded in @spinand. + */ +static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand) +{ +	return nanddev_to_mtd(&spinand->base); +} + +/** + * nand_to_spinand() - Get the SPI NAND device embedding an NAND object + * @nand: NAND object + * + * Return: the SPI NAND device embedding @nand. + */ +static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) +{ +	return container_of(nand, struct spinand_device, base); +} + +/** + * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object + * @spinand: SPI NAND device + * + * Return: the NAND device embedded in @spinand. + */ +static inline struct nand_device * +spinand_to_nand(struct spinand_device *spinand) +{ +	return &spinand->base; +} + +/** + * spinand_set_of_node - Attach a DT node to a SPI NAND device + * @spinand: SPI NAND device + * @np: DT node + * + * Attach a DT node to a SPI NAND device. + */ +static inline void spinand_set_of_node(struct spinand_device *spinand, +				       const struct device_node *np) +{ +	nanddev_set_of_node(&spinand->base, np); +} + +int spinand_match_and_init(struct spinand_device *dev, +			   const struct spinand_info *table, +			   unsigned int table_size, u8 devid); + +int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); +int spinand_select_target(struct spinand_device *spinand, unsigned int target); + +#endif /* __LINUX_MTD_SPINAND_H */ | 
