summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand/spi/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand/spi/core.c')
-rw-r--r--drivers/mtd/nand/spi/core.c975
1 files changed, 712 insertions, 263 deletions
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 3a1e7e18736..0c435059546 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -32,6 +32,7 @@
#include <linux/bug.h>
#include <linux/mtd/spinand.h>
#include <linux/printk.h>
+#include <linux/delay.h>
#endif
struct spinand_plat {
@@ -41,24 +42,9 @@ struct spinand_plat {
/* SPI NAND index visible in MTD names */
static int spi_nand_idx;
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- u16 *column)
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct nand_device *nand = spinand_to_nand(spinand);
- unsigned int shift;
-
- if (nand->memorg.planes_per_lun < 2)
- return;
-
- /* The plane number is passed in MSB just above the column address */
- shift = fls(nand->memorg.pagesize);
- *column |= req->pos.plane << shift;
-}
-
-static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
-{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -70,9 +56,9 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
return 0;
}
-static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -174,20 +160,12 @@ int spinand_select_target(struct spinand_device *spinand, unsigned int target)
return 0;
}
-static int spinand_init_cfg_cache(struct spinand_device *spinand)
+static int spinand_read_cfg(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct udevice *dev = spinand->slave->dev;
unsigned int target;
int ret;
- spinand->cfg_cache = devm_kzalloc(dev,
- sizeof(*spinand->cfg_cache) *
- nand->memorg.ntargets,
- GFP_KERNEL);
- if (!spinand->cfg_cache)
- return -ENOMEM;
-
for (target = 0; target < nand->memorg.ntargets; target++) {
ret = spinand_select_target(spinand, target);
if (ret)
@@ -206,6 +184,21 @@ static int spinand_init_cfg_cache(struct spinand_device *spinand)
return 0;
}
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct udevice *dev = spinand->slave->dev;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int spinand_init_quad_enable(struct spinand_device *spinand)
{
bool enable = false;
@@ -229,9 +222,144 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
-static int spinand_write_enable_op(struct spinand_device *spinand)
+
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand->set_cont_read(spinand, enable);
+}
+
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .rfree = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ return 0;
+}
+
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
+{
+}
+
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ bool enable = (req->mode != MTD_OPS_RAW);
+
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
+ memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
+
+ /* Only enable or disable the engine */
+ return spinand_ecc_enable(spinand, enable);
+}
+
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ int ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* Nothing to do when finishing a page write */
+ if (req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ /* Finish a page read: check the status, report errors/bitflips */
+ ret = spinand_check_ecc_status(spinand, spinand->last_wait_status);
+ if (ret == -EBADMSG) {
+ mtd->ecc_stats.failed++;
+ } else if (ret > 0) {
+ unsigned int pages;
+
+ /*
+ * Continuous reads don't allow us to get the detail,
+ * so we may exagerate the actual number of corrected bitflips.
+ */
+ if (!req->continuous)
+ pages = 1;
+ else
+ pages = req->datalen / nanddev_page_size(nand);
+
+ mtd->ecc_stats.corrected += ret * pages;
+ }
+
+ return ret;
+}
+
+static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+
+ spinand->last_wait_status = status;
+}
+
+int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->slave, &op);
}
@@ -241,7 +369,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->slave, &op);
}
@@ -249,27 +377,25 @@ static int spinand_load_page_op(struct spinand_device *spinand,
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.read_cache;
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
- int ret;
+ ssize_t ret;
if (req->datalen) {
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.dataoffs = 0;
- adjreq.databuf.in = spinand->databuf;
buf = spinand->databuf;
- nbytes = adjreq.datalen;
+ if (!req->continuous)
+ nbytes = nanddev_page_size(nand);
+ else
+ nbytes = round_up(req->dataoffs + req->datalen,
+ nanddev_page_size(nand));
+ column = 0;
}
if (req->ooblen) {
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.in = spinand->oobbuf;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
@@ -277,28 +403,40 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
- op.addr.val = column;
+ if (req->mode == MTD_OPS_RAW)
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ else
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
+
+ if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
- /*
- * Some controllers are limited in term of max RX data size. In this
- * case, just repeat the READ_CACHE operation after updating the
- * column.
- */
while (nbytes) {
- op.data.buf.in = buf;
- op.data.nbytes = nbytes;
- ret = spi_mem_adjust_op_size(spinand->slave, &op);
- if (ret)
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- ret = spi_mem_exec_op(spinand->slave, &op);
- if (ret)
- return ret;
+ if (!ret || ret > nbytes)
+ return -EIO;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
+
+ /*
+ * Dirmap accesses are allowed to toggle the CS.
+ * Toggling the CS during a continuous read is forbidden.
+ */
+ if (nbytes && req->continuous) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected. Disable future use of
+ * continuous reading and return -EAGAIN to retry
+ * reading within regular mode.
+ */
+ spinand->cont_read_possible = false;
+ return -EAGAIN;
+ }
}
if (req->datalen)
@@ -322,14 +460,12 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.write_cache;
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
- unsigned int nbytes = 0;
- void *buf = NULL;
- u16 column = 0;
- int ret;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
+ void *buf = spinand->databuf;
+ ssize_t ret;
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -337,20 +473,16 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
+ *
+ * Only reset the data buffer manually, the OOB buffer is prepared by
+ * ECC engines ->prepare_io_req() callback.
*/
- memset(spinand->databuf, 0xff,
- nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand));
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ memset(spinand->databuf, 0xff, nanddev_page_size(nand));
- if (req->datalen) {
+ if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- nbytes = adjreq.datalen;
- buf = spinand->databuf;
- }
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
@@ -361,52 +493,27 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
-
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- nbytes += nanddev_per_page_oobsize(nand);
- if (!buf) {
- buf = spinand->oobbuf;
- column = nanddev_page_size(nand);
- }
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ if (req->mode == MTD_OPS_RAW)
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ else
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
- op = *spinand->op_templates.write_cache;
- op.addr.val = column;
+ if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
- /*
- * Some controllers are limited in term of max TX data size. In this
- * case, split the operation into one LOAD CACHE and one or more
- * LOAD RANDOM CACHE.
- */
while (nbytes) {
- op.data.buf.out = buf;
- op.data.nbytes = nbytes;
-
- ret = spi_mem_adjust_op_size(spinand->slave, &op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(spinand->slave, &op);
- if (ret)
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ if (!ret || ret > nbytes)
+ return -EIO;
- /*
- * We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation resets
- * the cache to 0xff.
- */
- if (nbytes) {
- column = op.addr.val;
- op = *spinand->op_templates.update_cache;
- op.addr.val = column;
- }
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
return 0;
@@ -417,7 +524,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->slave, &op);
}
@@ -425,28 +532,48 @@ static int spinand_program_op(struct spinand_device *spinand,
static int spinand_erase_op(struct spinand_device *spinand,
const struct nand_pos *pos)
{
- struct nand_device *nand = &spinand->base;
+ struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->slave, &op);
}
-static int spinand_wait(struct spinand_device *spinand, u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
{
unsigned long start, stop;
u8 status;
int ret;
+ udelay(initial_delay_us);
start = get_timer(0);
- stop = 400;
+ stop = SPINAND_WAITRDY_TIMEOUT_MS;
do {
+ schedule();
+
ret = spinand_read_status(spinand, &status);
if (ret)
return ret;
if (!(status & STATUS_BUSY))
goto out;
+
+ udelay(poll_delay_us);
} while (get_timer(start) < stop);
/*
@@ -467,9 +594,8 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(naddr, ndummy,
- spinand->scratchbuf,
- SPINAND_MAX_ID_LEN);
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->slave, &op);
@@ -481,14 +607,17 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->slave, &op);
if (ret)
return ret;
- return spinand_wait(spinand, NULL);
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -496,66 +625,64 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
-
- if (spinand->eccinfo.get_status)
- return spinand->eccinfo.get_status(spinand, status);
-
- switch (status & STATUS_ECC_MASK) {
- case STATUS_ECC_NO_BITFLIPS:
- return 0;
-
- case STATUS_ECC_HAS_BITFLIPS:
- /*
- * We have no way to know exactly how many bitflips have been
- * fixed, so let's return the maximum possible value so that
- * wear-leveling layers move the data immediately.
- */
- return nand->eccreq.strength;
-
- case STATUS_ECC_UNCOR_ERROR:
- return -EBADMSG;
-
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- bool ecc_enabled)
-{
u8 status;
int ret;
+ ret = spinand_ondie_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_load_page_op(spinand, req);
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
if (ret < 0)
return ret;
+ spinand_ondie_ecc_save_status(nand, status);
+
ret = spinand_read_from_cache_op(spinand, req);
if (ret)
return ret;
- if (!ecc_enabled)
- return 0;
-
- return spinand_check_ecc_status(spinand, status);
+ return spinand_ondie_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
+ struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
+ ret = spinand_ondie_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
@@ -568,63 +695,234 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
- if (!ret && (status & STATUS_PROG_FAILED))
- ret = -EIO;
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (ret)
+ return ret;
- return ret;
+ if (status & STATUS_PROG_FAILED)
+ return -EIO;
+
+ return spinand_ondie_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
- unsigned int max_bitflips = 0;
+ struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
bool ecc_failed = false;
- int ret = 0;
+ unsigned int retry_mode = 0;
+ int ret;
- if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
- enable_ecc = true;
+ old_stats = mtd->ecc_stats;
-#ifndef __UBOOT__
- mutex_lock(&spinand->lock);
-#endif
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
- nanddev_io_for_each_page(nand, from, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
schedule();
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
- ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+read_retry:
+ ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG) {
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- mtd->ecc_stats.failed++;
} else {
- mtd->ecc_stats.corrected += ret;
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
}
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret;
+}
+
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ u8 status;
+ int ret;
+
+ ret = spinand_cont_read_enable(spinand, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The cache is divided into two halves. While one half of the cache has
+ * the requested data, the other half is loaded with the next chunk of data.
+ * Therefore, the host can read out the data continuously from page to page.
+ * Each data read must be a multiple of 4-bytes and full pages should be read;
+ * otherwise, the data output might get out of sequence from one read command
+ * to another.
+ */
+ nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+ schedule();
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_ondie_ecc_prepare_io_req(nand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_load_page_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US, NULL);
+ if (ret < 0)
+ goto end_cont_read;
+
+ ret = spinand_read_from_cache_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ops->retlen += iter.req.datalen;
+
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ goto end_cont_read;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = spinand_ondie_ecc_finish_io_req(nand, &iter.req);
+ if (ret < 0)
+ goto end_cont_read;
+
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ ret = 0;
+ }
+
+end_cont_read:
+ /*
+ * Once all the data has been read out, the host can either pull CS#
+ * high and wait for tRST or manually clear the bit in the configuration
+ * register to terminate the continuous read operation. We have no
+ * guarantee the SPI controller drivers will effectively deassert the CS
+ * when we expect them to, so take the register based approach.
+ */
+ spinand_cont_read_enable(spinand, false);
+
+ return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+ /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+ if (spinand->set_cont_read) {
+ spinand->cont_read_possible = true;
+ }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos start_pos, end_pos;
+
+ if (!spinand->cont_read_possible)
+ return false;
+
+ /* OOBs won't be retrieved */
+ if (ops->ooblen || ops->oobbuf)
+ return false;
+
+ nanddev_offs_to_pos(nand, from, &start_pos);
+ nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+ /*
+ * Continuous reads never cross LUN boundaries. Some devices don't
+ * support crossing planes boundaries. Some devices don't even support
+ * crossing blocks boundaries. The common case being to read through UBI,
+ * we will very rarely read two consequent blocks or more, so it is safer
+ * and easier (can be improved) to only enable continuous reads when
+ * reading within the same erase block.
+ */
+ if (start_pos.target != end_pos.target ||
+ start_pos.plane != end_pos.plane ||
+ start_pos.eraseblock != end_pos.eraseblock)
+ return false;
+
+ return start_pos.page < end_pos.page;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ unsigned int max_bitflips = 0;
+ int ret;
+
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+
+ if (spinand_use_cont_read(mtd, from, ops)) {
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
+ if (ret == -EAGAIN && !spinand->cont_read_possible) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected (see spinand_read_from_cache_op()),
+ * repeat reading in regular mode.
+ */
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
+ } else {
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
}
#ifndef __UBOOT__
mutex_unlock(&spinand->lock);
#endif
- if (ecc_failed && !ret)
- ret = -EBADMSG;
return ret ? ret : max_bitflips;
}
@@ -635,23 +933,22 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
#ifndef __UBOOT__
mutex_lock(&spinand->lock);
#endif
- nanddev_io_for_each_page(nand, to, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
schedule();
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -681,9 +978,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req, false);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -727,11 +1032,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -751,6 +1059,7 @@ static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
#ifndef __UBOOT__
mutex_unlock(&spinand->lock);
#endif
+
return ret;
}
@@ -772,7 +1081,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
@@ -819,6 +1132,91 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static struct spi_mem_dirmap_desc *spinand_create_rdesc(
+ struct spinand_device *spinand,
+ struct spi_mem_dirmap_info *info)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_desc *desc = NULL;
+
+ if (spinand->cont_read_possible) {
+ /*
+ * spi controller may return an error if info->length is
+ * too large
+ */
+ info->length = nanddev_eraseblock_size(nand);
+ desc = spi_mem_dirmap_create(spinand->slave, info);
+ }
+
+ if (IS_ERR_OR_NULL(desc)) {
+ /*
+ * continuous reading is not supported by flash or
+ * its spi controller, use regular reading
+ */
+ spinand->cont_read_possible = false;
+
+ info->length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand);
+ desc = spi_mem_dirmap_create(spinand->slave, info);
+ }
+
+ return desc;
+}
+
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = { 0 };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = spi_mem_dirmap_create(spinand->slave, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = spinand_create_rdesc(spinand, &info);
+ if (IS_ERR(desc)) {
+ spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
+ return PTR_ERR(desc);
+ }
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
+ spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
+
+ return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(spinand->slave->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
@@ -826,13 +1224,17 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &alliancememory_spinand_manufacturer,
+ &ato_spinand_manufacturer,
+ &esmt_c8_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
- &esmt_c8_spinand_manufacturer,
&xtx_spinand_manufacturer,
};
@@ -860,7 +1262,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand,
spinand->manufacturer = manufacturer;
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
@@ -894,8 +1296,19 @@ static int spinand_id_detect(struct spinand_device *spinand)
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
- if (spinand->manufacturer->ops->init)
- return spinand->manufacturer->ops->init(spinand);
+ int ret;
+
+ if (spinand->manufacturer->ops->init) {
+ ret = spinand->manufacturer->ops->init(spinand);
+ if (ret)
+ return ret;
+ }
+
+ if (spinand->configure_chip) {
+ ret = spinand->configure_chip(spinand);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -912,10 +1325,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -932,13 +1348,17 @@ spinand_select_op_variant(struct spinand_device *spinand,
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(&op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
static int spinand_setup_slave(struct spinand_device *spinand,
@@ -994,11 +1414,17 @@ int spinand_match_and_init(struct spinand_device *spinand,
return ret;
nand->memorg = table[i].memorg;
- nand->eccreq = table[i].eccreq;
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->configure_chip = table[i].configure_chip;
+ spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1057,35 +1483,55 @@ static int spinand_detect(struct spinand_device *spinand)
return 0;
}
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int spinand_init_flash(struct spinand_device *spinand)
{
- return -ERANGE;
-}
+ struct udevice *dev = spinand->slave->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret, i;
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
-{
- if (section)
- return -ERANGE;
+ ret = spinand_read_cfg(spinand);
+ if (ret)
+ return ret;
- /* Reserve 2 bytes for the BBM. */
- region->offset = 2;
- region->length = 62;
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ return ret;
- return 0;
-}
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ return ret;
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
- .ecc = spinand_noecc_ooblayout_ecc,
- .rfree = spinand_noecc_ooblayout_free,
-};
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ break;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ spinand_manufacturer_cleanup(spinand);
+
+ return ret;
+}
static int spinand_init(struct spinand_device *spinand)
{
+ struct udevice *dev = spinand->slave->dev;
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct nand_device *nand = mtd_to_nanddev(mtd);
- int ret, i;
+ int ret;
/*
* We need a scratch buffer because the spi_mem interface requires that
@@ -1104,9 +1550,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
- spinand->databuf = kzalloc(nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- GFP_KERNEL);
+ spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+ GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
@@ -1118,41 +1563,25 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_free_bufs;
- ret = spinand_init_quad_enable(spinand);
+ ret = spinand_init_flash(spinand);
if (ret)
goto err_free_bufs;
- ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
- if (ret)
- goto err_free_bufs;
-
- ret = spinand_manufacturer_init(spinand);
- if (ret) {
- dev_err(spinand->slave->dev,
- "Failed to initialize the SPI NAND chip (err = %d)\n",
- ret);
- goto err_free_bufs;
- }
-
- /* After power up, all blocks are locked, so unlock them here. */
- for (i = 0; i < nand->memorg.ntargets; i++) {
- ret = spinand_select_target(spinand, i);
- if (ret)
- goto err_manuf_cleanup;
-
- ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
- if (ret)
- goto err_manuf_cleanup;
- }
-
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
+ spinand_ecc_enable(spinand, false);
+ ret = spinand_ondie_ecc_init_ctx(nand);
+ if (ret)
+ goto err_cleanup_nanddev;
+
/*
- * Right now, we don't support ECC, so let the whole oob
- * area is available for user.
+ * Continuous read can only be enabled with an on-die ECC engine, so the
+ * ECC initialization must have happened previously.
*/
+ spinand_cont_read_init(spinand);
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1160,19 +1589,36 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
- if (spinand->eccinfo.ooblayout)
- mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
- else
- mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
- goto err_cleanup_nanddev;
+ goto err_cleanup_ecc_engine;
mtd->oobavail = ret;
+ /* Propagate ECC information to mtd_info */
+ mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
+ mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_cleanup_ecc_engine;
+ }
+
return 0;
+err_cleanup_ecc_engine:
+ spinand_ondie_ecc_cleanup_ctx(nand);
+
err_cleanup_nanddev:
nanddev_cleanup(nand);
@@ -1189,6 +1635,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ spinand_ondie_ecc_cleanup_ctx(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
@@ -1294,12 +1741,14 @@ static const struct spi_device_id spinand_ids[] = {
{ .name = "spi-nand" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(spi, spinand_ids);
#ifdef CONFIG_OF
static const struct of_device_id spinand_of_ids[] = {
{ .compatible = "spi-nand" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, spinand_of_ids);
#endif
static struct spi_mem_driver spinand_drv = {