summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/Kconfig38
-rw-r--r--cmd/mtd.c480
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/nand/raw/Kconfig15
-rw-r--r--drivers/mtd/nand/spi/Makefile5
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c155
-rw-r--r--drivers/mtd/nand/spi/ato.c88
-rw-r--r--drivers/mtd/nand/spi/core.c975
-rw-r--r--drivers/mtd/nand/spi/esmt.c123
-rw-r--r--drivers/mtd/nand/spi/foresee.c107
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c86
-rw-r--r--drivers/mtd/nand/spi/macronix.c289
-rw-r--r--drivers/mtd/nand/spi/micron.c180
-rw-r--r--drivers/mtd/nand/spi/otp.c369
-rw-r--r--drivers/mtd/nand/spi/paragon.c24
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c149
-rw-r--r--drivers/mtd/nand/spi/toshiba.c63
-rw-r--r--drivers/mtd/nand/spi/winbond.c361
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/spi/spi-mem.c45
-rw-r--r--include/linux/mtd/nand.h157
-rw-r--r--include/linux/mtd/spinand.h374
-rw-r--r--include/spi-mem.h93
24 files changed, 3675 insertions, 524 deletions
diff --git a/cmd/Kconfig b/cmd/Kconfig
index eb615552a00..dcf63f423fd 100644
--- a/cmd/Kconfig
+++ b/cmd/Kconfig
@@ -1513,6 +1513,44 @@ config CMD_MTD_OTP
help
MTD commands for OTP access.
+config CMD_MTD_MARKBAD
+ bool "mtd markbad"
+ depends on CMD_MTD
+ help
+ MTD markbad command support.
+
+ This is a clone of "nand markbad" command, but for 'mtd' subsystem.
+
+config CMD_MTD_NAND_WRITE_TEST
+ bool "mtd nand_write_test (destructive)"
+ depends on CMD_MTD
+ help
+ MTD nand_write_test command support.
+
+ Writes blocks of NAND flash with different patterns.
+ This is useful to determine if a block that caused a write error
+ is still good or should be marked as bad.
+
+ This is a clone of "nand torture" command, but for 'mtd' subsystem.
+
+ WARNING: This test will destroy any data on blocks being tested.
+
+config CMD_MTD_NAND_READ_TEST
+ bool "mtd nand_read_test"
+ depends on CMD_MTD
+ help
+ MTD nand_read_test command support.
+
+ Reads blocks of NAND flash in normal and raw modes and compares results.
+ The following statuses can be returned for a block:
+ * non-ecc reading failed,
+ * ecc reading failed,
+ * block is bad,
+ * bitflips is above maximum,
+ * actual number of biflips above reported one,
+ * bitflips reached it maximum value,
+ * block is ok.
+
config CMD_MUX
bool "mux"
depends on MULTIPLEXER
diff --git a/cmd/mtd.c b/cmd/mtd.c
index 2520b89eed2..acd886da6da 100644
--- a/cmd/mtd.c
+++ b/cmd/mtd.c
@@ -20,6 +20,7 @@
#include <time.h>
#include <dm/devres.h>
#include <linux/err.h>
+#include <memalign.h>
#include <linux/ctype.h>
@@ -468,7 +469,7 @@ static int do_mtd_io(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
bool dump, read, raw, woob, benchmark, write_empty_pages, has_pages = false;
- u64 start_off, off, len, remaining, default_len;
+ u64 start_off, off, len, remaining, default_len, speed;
unsigned long bench_start, bench_end;
struct mtd_oob_ops io_op = {};
uint user_addr = 0, npages;
@@ -594,9 +595,10 @@ static int do_mtd_io(struct cmd_tbl *cmdtp, int flag, int argc,
if (benchmark && bench_start) {
bench_end = timer_get_us();
+ speed = (len * 1000000) / (bench_end - bench_start);
printf("%s speed: %lukiB/s\n",
read ? "Read" : "Write",
- ((io_op.len * 1000000) / (bench_end - bench_start)) / 1024);
+ (unsigned long)(speed / 1024));
}
led_activity_off();
@@ -711,6 +713,439 @@ out_put_mtd:
return ret;
}
+#ifdef CONFIG_CMD_MTD_MARKBAD
+static int do_mtd_markbad(struct cmd_tbl *cmdtp, int flag, int argc,
+ char *const argv[])
+{
+ struct mtd_info *mtd;
+ loff_t off;
+ int ret = 0;
+
+ if (argc < 3)
+ return CMD_RET_USAGE;
+
+ mtd = get_mtd_by_name(argv[1]);
+ if (IS_ERR_OR_NULL(mtd))
+ return CMD_RET_FAILURE;
+
+ if (!mtd_can_have_bb(mtd)) {
+ printf("Only NAND-based devices can have bad blocks\n");
+ goto out_put_mtd;
+ }
+
+ argc -= 2;
+ argv += 2;
+ while (argc > 0) {
+ off = hextoul(argv[0], NULL);
+ if (!mtd_is_aligned_with_block_size(mtd, off)) {
+ printf("Offset not aligned with a block (0x%x)\n",
+ mtd->erasesize);
+ ret = CMD_RET_FAILURE;
+ goto out_put_mtd;
+ }
+
+ ret = mtd_block_markbad(mtd, off);
+ if (ret) {
+ printf("block 0x%08llx NOT marked as bad! ERROR %d\n",
+ off, ret);
+ ret = CMD_RET_FAILURE;
+ } else {
+ printf("block 0x%08llx successfully marked as bad\n",
+ off);
+ }
+ --argc;
+ ++argv;
+ }
+
+out_put_mtd:
+ put_mtd_device(mtd);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_CMD_MTD_NAND_WRITE_TEST
+/**
+ * nand_check_pattern:
+ *
+ * Check if buffer contains only a certain byte pattern.
+ *
+ * @param buf buffer to check
+ * @param patt the pattern to check
+ * @param size buffer size in bytes
+ * Return: 1 if there are only patt bytes in buf
+ * 0 if something else was found
+ */
+static int nand_check_pattern(const u_char *buf, u_char patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (buf[i] != patt)
+ return 0;
+ return 1;
+}
+
+/**
+ * nand_write_test:
+ *
+ * Writes a block of NAND flash with different patterns.
+ * This is useful to determine if a block that caused a write error is still
+ * good or should be marked as bad.
+ *
+ * @param mtd nand mtd instance
+ * @param offset offset in flash
+ * Return: 0 if the block is still good
+ */
+static int nand_write_test(struct mtd_info *mtd, loff_t offset)
+{
+ u_char patterns[] = {0xa5, 0x5a, 0x00};
+ struct erase_info instr = {
+ .mtd = mtd,
+ .addr = offset,
+ .len = mtd->erasesize,
+ };
+ size_t retlen;
+ int err, ret = -1, i, patt_count;
+ u_char *buf;
+
+ if ((offset & (mtd->erasesize - 1)) != 0) {
+ puts("Attempt to torture a block at a non block-aligned offset\n");
+ return -EINVAL;
+ }
+
+ if (offset + mtd->erasesize > mtd->size) {
+ puts("Attempt to torture a block outside the flash area\n");
+ return -EINVAL;
+ }
+
+ patt_count = ARRAY_SIZE(patterns);
+
+ buf = malloc_cache_aligned(mtd->erasesize);
+ if (buf == NULL) {
+ puts("Out of memory for erase block buffer\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < patt_count; i++) {
+ err = mtd_erase(mtd, &instr);
+ if (err) {
+ printf("%s: erase() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ /* Make sure the block contains only 0xff bytes */
+ err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
+ if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
+ printf("%s: read() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ err = nand_check_pattern(buf, 0xff, mtd->erasesize);
+ if (!err) {
+ printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
+ offset);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Write a pattern and check it */
+ memset(buf, patterns[i], mtd->erasesize);
+ err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
+ if (err || retlen != mtd->erasesize) {
+ printf("%s: write() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
+ if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
+ printf("%s: read() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ err = nand_check_pattern(buf, patterns[i], mtd->erasesize);
+ if (!err) {
+ printf("Pattern 0x%.2x checking failed for block at "
+ "0x%llx\n", patterns[i], offset);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ free(buf);
+ return ret;
+}
+
+static int do_nand_write_test(struct cmd_tbl *cmdtp, int flag, int argc,
+ char *const argv[])
+{
+ struct mtd_info *mtd;
+ loff_t off, len;
+ int ret = 0;
+ unsigned int failed = 0, passed = 0;
+
+ if (argc < 2)
+ return CMD_RET_USAGE;
+
+ mtd = get_mtd_by_name(argv[1]);
+ if (IS_ERR_OR_NULL(mtd))
+ return CMD_RET_FAILURE;
+
+ if (!mtd_can_have_bb(mtd)) {
+ printf("Only NAND-based devices can be tortured\n");
+ goto out_put_mtd;
+ }
+
+ argc -= 2;
+ argv += 2;
+
+ off = argc > 0 ? hextoul(argv[0], NULL) : 0;
+ len = argc > 1 ? hextoul(argv[1], NULL) : mtd->size;
+
+ if (!mtd_is_aligned_with_block_size(mtd, off)) {
+ printf("Offset not aligned with a block (0x%x)\n",
+ mtd->erasesize);
+ ret = CMD_RET_FAILURE;
+ goto out_put_mtd;
+ }
+
+ if (!mtd_is_aligned_with_block_size(mtd, len)) {
+ printf("Size not a multiple of a block (0x%x)\n",
+ mtd->erasesize);
+ ret = CMD_RET_FAILURE;
+ goto out_put_mtd;
+ }
+
+ printf("\nNAND write test: device '%s' offset 0x%llx size 0x%llx (block size 0x%x)\n",
+ mtd->name, off, len, mtd->erasesize);
+ while (len > 0) {
+ printf("\r block at %llx ", off);
+ if (mtd_block_isbad(mtd, off)) {
+ printf("marked bad, skipping\n");
+ } else {
+ ret = nand_write_test(mtd, off);
+ if (ret) {
+ failed++;
+ printf("failed\n");
+ } else {
+ passed++;
+ }
+ }
+ off += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ printf("\n Passed: %u, failed: %u\n", passed, failed);
+ if (failed != 0)
+ ret = CMD_RET_FAILURE;
+
+out_put_mtd:
+ put_mtd_device(mtd);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_CMD_MTD_NAND_READ_TEST
+enum nand_read_status {
+ NAND_READ_STATUS_UNKNOWN = 0,
+ NAND_READ_STATUS_NONECC_READ_FAIL,
+ NAND_READ_STATUS_ECC_READ_FAIL,
+ NAND_READ_STATUS_BAD_BLOCK,
+ NAND_READ_STATUS_BITFLIP_ABOVE_MAX,
+ NAND_READ_STATUS_BITFLIP_MISMATCH,
+ NAND_READ_STATUS_BITFLIP_MAX,
+ NAND_READ_STATUS_UNRELIABLE,
+ NAND_READ_STATUS_OK,
+};
+
+/* test_buf MUST be not smaller than 2 * blocksize bytes */
+static enum nand_read_status nand_read_block_check(struct mtd_info *mtd,
+ loff_t off,
+ size_t blocksize,
+ u_char *test_buf)
+{
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_RAW,
+ .len = blocksize,
+ .datbuf = test_buf,
+ };
+ int i, d, ret, len, pos, cnt, max;
+
+ if (blocksize % mtd->writesize != 0) {
+ printf("\r block at 0x%llx: bad block size\n", off);
+ return NAND_READ_STATUS_UNKNOWN;
+ }
+
+ ret = mtd->_read_oob(mtd, off, &ops);
+ if (ret < 0) {
+ printf("\r block at 0x%llx: non-ecc reading error %d\n",
+ off, ret);
+ return NAND_READ_STATUS_NONECC_READ_FAIL;
+ }
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.datbuf = test_buf + blocksize;
+
+ ret = mtd->_read_oob(mtd, off, &ops);
+ if (ret == -EBADMSG) {
+ printf("\r block at 0x%llx: bad block\n", off);
+ return NAND_READ_STATUS_BAD_BLOCK;
+ }
+
+ if (ret < 0) {
+ printf("\r block at 0x%llx: ecc reading error %d\n", off, ret);
+ return NAND_READ_STATUS_ECC_READ_FAIL;
+ }
+
+ if (mtd->ecc_strength == 0)
+ return NAND_READ_STATUS_OK;
+
+ if (ret > mtd->ecc_strength) {
+ printf("\r block at 0x%llx: returned bit-flips value %d "
+ "is above maximum value %d\n",
+ off, ret, mtd->ecc_strength);
+ return NAND_READ_STATUS_BITFLIP_ABOVE_MAX;
+ }
+
+ max = 0;
+ pos = 0;
+ len = blocksize;
+ while (len > 0) {
+ cnt = 0;
+ for (i = 0; i < mtd->ecc_step_size; i++) {
+ d = test_buf[pos + i] ^ test_buf[blocksize + pos + i];
+ if (d == 0)
+ continue;
+
+ while (d > 0) {
+ d &= (d - 1);
+ cnt++;
+ }
+ }
+ if (cnt > max)
+ max = cnt;
+
+ len -= mtd->ecc_step_size;
+ pos += mtd->ecc_step_size;
+ }
+
+ if (max > ret) {
+ printf("\r block at 0x%llx: bitflip mismatch, "
+ "read %d but actual %d\n", off, ret, max);
+ return NAND_READ_STATUS_BITFLIP_MISMATCH;
+ }
+
+ if (ret == mtd->ecc_strength) {
+ printf("\r block at 0x%llx: max bitflip reached, "
+ "block is unreliable\n", off);
+ return NAND_READ_STATUS_BITFLIP_MAX;
+ }
+
+ if (ret >= mtd->bitflip_threshold) {
+ printf("\r block at 0x%llx: bitflip threshold reached, "
+ "block is unreliable\n", off);
+ return NAND_READ_STATUS_UNRELIABLE;
+ }
+
+ return NAND_READ_STATUS_OK;
+}
+
+static int do_mtd_nand_read_test(struct cmd_tbl *cmdtp, int flag, int argc,
+ char *const argv[])
+{
+ struct mtd_info *mtd;
+ u64 off, blocks;
+ int stat[NAND_READ_STATUS_OK + 1];
+ enum nand_read_status ret;
+ u_char *buf;
+
+ if (argc < 2)
+ return CMD_RET_USAGE;
+
+ mtd = get_mtd_by_name(argv[1]);
+ if (IS_ERR_OR_NULL(mtd))
+ return CMD_RET_FAILURE;
+
+ if (!mtd_can_have_bb(mtd)) {
+ printf("Only NAND-based devices can be checked\n");
+ goto test_error;
+ }
+
+ if (!mtd->_read_oob) {
+ printf("RAW reading is not supported\n");
+ goto test_error;
+ }
+
+ buf = malloc_cache_aligned(2 * mtd->erasesize);
+ if (!buf) {
+ printf("Can't allocate memory for the test\n");
+ goto test_error;
+ }
+
+ blocks = mtd->size;
+ do_div(blocks, mtd->erasesize);
+
+ printf("ECC strength: %d\n", mtd->ecc_strength);
+ printf("ECC theshold: %d\n", mtd->bitflip_threshold);
+ printf("ECC step size: %d\n", mtd->ecc_step_size);
+ printf("Erase block size: 0x%x\n", mtd->erasesize);
+ printf("Total blocks: %lld\n", blocks);
+
+ printf("\nworking...\n");
+ memset(stat, 0, sizeof(stat));
+ for (off = 0; off < mtd->size; off += mtd->erasesize) {
+ ret = nand_read_block_check(mtd, off, mtd->erasesize, buf);
+ stat[ret]++;
+
+ switch (ret) {
+ case NAND_READ_STATUS_BAD_BLOCK:
+ case NAND_READ_STATUS_BITFLIP_MAX:
+ case NAND_READ_STATUS_UNRELIABLE:
+ if (!mtd_block_isbad(mtd, off))
+ printf("\r block at 0x%llx: should be marked "
+ "as BAD\n", off);
+ break;
+
+ case NAND_READ_STATUS_OK:
+ if (mtd_block_isbad(mtd, off))
+ printf("\r block at 0x%llx: marked as BAD, but "
+ "probably is GOOD\n", off);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ free(buf);
+
+ put_mtd_device(mtd);
+ printf("\n");
+ printf("results:\n");
+ printf(" Good blocks: %d\n", stat[NAND_READ_STATUS_OK]);
+ printf(" Physically bad blocks: %d\n", stat[NAND_READ_STATUS_BAD_BLOCK]);
+ printf(" Unreliable blocks: %d\n", stat[NAND_READ_STATUS_BITFLIP_MAX] +
+ stat[NAND_READ_STATUS_UNRELIABLE]);
+ printf(" Non checked blocks: %d\n", stat[NAND_READ_STATUS_UNKNOWN]);
+ printf(" Failed to check blocks: %d\n", stat[NAND_READ_STATUS_NONECC_READ_FAIL] +
+ stat[NAND_READ_STATUS_ECC_READ_FAIL]);
+ printf(" Suspictious blocks: %d\n", stat[NAND_READ_STATUS_BITFLIP_ABOVE_MAX] +
+ stat[NAND_READ_STATUS_BITFLIP_MISMATCH]);
+ return CMD_RET_SUCCESS;
+
+test_error:
+ put_mtd_device(mtd);
+ return CMD_RET_FAILURE;
+}
+#endif
+
static int do_mtd_bad(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
@@ -781,18 +1216,27 @@ static int mtd_name_complete(int argc, char *const argv[], char last_char,
U_BOOT_LONGHELP(mtd,
"- generic operations on memory technology devices\n\n"
"mtd list\n"
- "mtd read[.raw][.oob] <name> <addr> [<off> [<size>]]\n"
- "mtd dump[.raw][.oob] <name> [<off> [<size>]]\n"
- "mtd write[.raw][.oob][.dontskipff] <name> <addr> [<off> [<size>]]\n"
- "mtd erase[.dontskipbad] <name> [<off> [<size>]]\n"
+ "mtd read[.raw][.oob][.benchmark] <name> <addr> [<off> [<size>]]\n"
+ "mtd dump[.raw][.oob] <name> [<off> [<size>]]\n"
+ "mtd write[.raw][.oob][.dontskipff][.benchmark] <name> <addr> [<off> [<size>]]\n"
+ "mtd erase[.dontskipbad] <name> [<off> [<size>]]\n"
"\n"
"Specific functions:\n"
- "mtd bad <name>\n"
+ "mtd bad <name>\n"
#if CONFIG_IS_ENABLED(CMD_MTD_OTP)
- "mtd otpread <name> [u|f] <off> <size>\n"
- "mtd otpwrite <name> <off> <hex string>\n"
- "mtd otplock <name> <off> <size>\n"
- "mtd otpinfo <name> [u|f]\n"
+ "mtd otpread <name> [u|f] <off> <size>\n"
+ "mtd otpwrite <name> <off> <hex string>\n"
+ "mtd otplock <name> <off> <size>\n"
+ "mtd otpinfo <name> [u|f]\n"
+#endif
+#if CONFIG_IS_ENABLED(CMD_MTD_MARKBAD)
+ "mtd markbad <name> <off> [<off> ...]\n"
+#endif
+#if CONFIG_IS_ENABLED(CMD_MTD_NAND_WRITE_TEST)
+ "mtd nand_write_test <name> [<off> [<size>]]\n"
+#endif
+#if CONFIG_IS_ENABLED(CMD_MTD_NAND_READ_TEST)
+ "mtd nand_read_test <name>\n"
#endif
"\n"
"With:\n"
@@ -827,5 +1271,19 @@ U_BOOT_CMD_WITH_SUBCMDS(mtd, "MTD utils", mtd_help_text,
mtd_name_complete),
U_BOOT_SUBCMD_MKENT_COMPLETE(erase, 4, 0, do_mtd_erase,
mtd_name_complete),
+#if CONFIG_IS_ENABLED(CMD_MTD_MARKBAD)
+ U_BOOT_SUBCMD_MKENT_COMPLETE(markbad, 20, 0, do_mtd_markbad,
+ mtd_name_complete),
+#endif
+#if CONFIG_IS_ENABLED(CMD_MTD_NAND_WRITE_TEST)
+ U_BOOT_SUBCMD_MKENT_COMPLETE(nand_write_test, 4, 0,
+ do_nand_write_test,
+ mtd_name_complete),
+#endif
+#if CONFIG_IS_ENABLED(CMD_MTD_NAND_READ_TEST)
+ U_BOOT_SUBCMD_MKENT_COMPLETE(nand_read_test, 2, 0,
+ do_mtd_nand_read_test,
+ mtd_name_complete),
+#endif
U_BOOT_SUBCMD_MKENT_COMPLETE(bad, 2, 1, do_mtd_bad,
mtd_name_complete));
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index eaee739c6aa..591d9d9c656 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -14,7 +14,7 @@ config MEMORY
For now this uclass has no methods yet.
config ATMEL_EBI
- bool "Support for Atmel EBI"
+ bool
help
Driver for Atmel EBI controller. This is a dummy
driver. Doesn't provide an access to EBI controller. Select
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 4afe769ef32..06941a4fa8b 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -223,6 +223,7 @@ config SYS_MAX_FLASH_SECT
config SAMSUNG_ONENAND
bool "Samsung OneNAND driver support"
+ depends on S5P
config USE_SYS_MAX_FLASH_BANKS
bool "Enable Max number of Flash memory banks"
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 9c8a32bb0a8..754b99bf3eb 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -50,6 +50,8 @@ config SYS_NAND_NO_SUBPAGE_WRITE
config DM_NAND_ATMEL
bool "Support Atmel NAND controller with DM support"
+ depends on ARCH_AT91
+ select ATMEL_EBI
select SYS_NAND_SELF_INIT
imply SYS_NAND_USE_FLASH_BBT
help
@@ -58,6 +60,7 @@ config DM_NAND_ATMEL
config NAND_ATMEL
bool "Support Atmel NAND controller"
+ depends on ARCH_AT91
select SYS_NAND_SELF_INIT
imply SYS_NAND_USE_FLASH_BBT
help
@@ -115,6 +118,7 @@ endif
config NAND_BRCMNAND
bool "Support Broadcom NAND controller"
depends on OF_CONTROL && DM && DM_MTD
+ depends on ARCH_BCMBCA || ARCH_BMIPS || TARGET_BCMNS || TARGET_BCMNS3
select SYS_NAND_SELF_INIT
help
Enable the driver for NAND flash on platforms using a Broadcom NAND
@@ -148,6 +152,7 @@ config NAND_BRCMNAND_IPROC
config NAND_DAVINCI
bool "Support TI Davinci NAND controller"
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
select SYS_NAND_SELF_INIT if TARGET_DA850EVM
help
Enable this driver for NAND flash controllers available in TI Davinci
@@ -192,7 +197,7 @@ config SPL_NAND_LOAD
config NAND_CADENCE
bool "Support Cadence NAND controller as a DT device"
- depends on OF_CONTROL && DM_MTD
+ depends on OF_CONTROL && DM_MTD && ARCH_SOCFPGA
select SYS_NAND_SELF_INIT
select SPL_SYS_NAND_SELF_INIT
select SPL_NAND_BASE
@@ -234,6 +239,7 @@ config NAND_FSL_ELBC_DT
config NAND_FSL_IFC
bool "Support Freescale Integrated Flash Controller NAND driver"
+ depends on ARCH_LS1021A || FSL_LSCH2 || FSL_LSCH3 || PPC
select TPL_SYS_NAND_SELF_INIT if TPL_NAND_SUPPORT
select TPL_NAND_INIT if TPL && !TPL_FRAMEWORK
select SPL_SYS_NAND_SELF_INIT
@@ -257,13 +263,14 @@ config NAND_KMETER1
config NAND_LPC32XX_MLC
bool "Support LPC32XX_MLC controller"
+ depends on ARCH_LPC32XX
select SYS_NAND_SELF_INIT
help
Enable the LPC32XX MLC NAND controller.
config NAND_OMAP_GPMC
bool "Support OMAP GPMC NAND controller"
- depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3
select SYS_NAND_SELF_INIT if ARCH_K3
select SPL_NAND_INIT if ARCH_K3
select SPL_SYS_NAND_SELF_INIT if ARCH_K3
@@ -431,6 +438,7 @@ endif
config NAND_PXA3XX
bool "Support for NAND on PXA3xx and Armada 370/XP/38x"
+ depends on ARCH_MVEBU
select SYS_NAND_SELF_INIT
select DM_MTD
select REGMAP
@@ -490,7 +498,7 @@ endif
config NAND_ARASAN
bool "Configure Arasan Nand"
select SYS_NAND_SELF_INIT
- depends on DM_MTD
+ depends on DM_MTD && ARCH_ZYNQMP
imply CMD_NAND
help
This enables Nand driver support for Arasan nand flash
@@ -553,6 +561,7 @@ endif
config NAND_ZYNQ
bool "Support for Zynq Nand controller"
+ depends on ARCH_ZYNQ
select SPL_SYS_NAND_SELF_INIT
select SYS_NAND_SELF_INIT
select DM_MTD
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 65b836b34ca..152aa1a3783 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o esmt.o gigadevice.o macronix.o micron.o paragon.o
-spinand-objs += toshiba.o winbond.o xtx.o
+spinand-objs := core.o otp.o
+spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
+spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
new file mode 100644
index 00000000000..a3772b8c2f0
--- /dev/null
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Mario Kicherer <dev@kicherer.org>
+ */
+
+#ifndef __UBOOT__
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_ALLIANCEMEMORY 0x52
+
+#define AM_STATUS_ECC_BITMASK (3 << 4)
+
+#define AM_STATUS_ECC_NONE_DETECTED (0 << 4)
+#define AM_STATUS_ECC_CORRECTED (1 << 4)
+#define AM_STATUS_ECC_ERRORED (2 << 4)
+#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int am_get_eccsize(struct mtd_info *mtd)
+{
+ if (mtd->oobsize == 64)
+ return 0x20;
+ else if (mtd->oobsize == 128)
+ return 0x38;
+ else if (mtd->oobsize == 256)
+ return 0x70;
+ else
+ return -EINVAL;
+}
+
+static int am_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ int ecc_bytes;
+
+ ecc_bytes = am_get_eccsize(mtd);
+ if (ecc_bytes < 0)
+ return ecc_bytes;
+
+ region->offset = mtd->oobsize - ecc_bytes;
+ region->length = ecc_bytes;
+
+ return 0;
+}
+
+static int am_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ int ecc_bytes;
+
+ if (section)
+ return -ERANGE;
+
+ ecc_bytes = am_get_eccsize(mtd);
+ if (ecc_bytes < 0)
+ return ecc_bytes;
+
+ /*
+ * It is unclear how many bytes are used for the bad block marker. We
+ * reserve the common two bytes here.
+ *
+ * The free area in this kind of flash is divided into chunks where the
+ * first 4 bytes of each chunk are unprotected. The number of chunks
+ * depends on the specific model. The models with 4096+256 bytes pages
+ * have 8 chunks, the others 4 chunks.
+ */
+
+ region->offset = 2;
+ region->length = mtd->oobsize - 2 - ecc_bytes;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops am_ooblayout = {
+ .ecc = am_ooblayout_ecc,
+ .rfree = am_ooblayout_free,
+};
+
+static int am_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ switch (status & AM_STATUS_ECC_BITMASK) {
+ case AM_STATUS_ECC_NONE_DETECTED:
+ return 0;
+
+ case AM_STATUS_ECC_CORRECTED:
+ /*
+ * use oobsize to determine the flash model and the maximum of
+ * correctable errors and return maximum - 1 by convention
+ */
+ if (spinand->base.mtd->oobsize == 64)
+ return 3;
+ else
+ return 7;
+
+ case AM_STATUS_ECC_ERRORED:
+ return -EBADMSG;
+
+ case AM_STATUS_ECC_MAX_CORRECTED:
+ /*
+ * use oobsize to determine the flash model and the maximum of
+ * correctable errors
+ */
+ if (spinand->base.mtd->oobsize == 64)
+ return 4;
+ else
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info alliancememory_spinand_table[] = {
+ SPINAND_INFO("AS5F34G04SND",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2f),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&am_ooblayout,
+ am_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops alliancememory_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer alliancememory_spinand_manufacturer = {
+ .id = SPINAND_MFR_ALLIANCEMEMORY,
+ .name = "AllianceMemory",
+ .chips = alliancememory_spinand_table,
+ .nchips = ARRAY_SIZE(alliancememory_spinand_table),
+ .ops = &alliancememory_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
new file mode 100644
index 00000000000..a726df3eb98
--- /dev/null
+++ b/drivers/mtd/nand/spi/ato.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Aidan MacDonald
+ *
+ * Author: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+ */
+
+#ifndef __UBOOT__
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_ATO 0x9b
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+
+static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+ return 0;
+}
+
+static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = (16 * section);
+ region->length = 8;
+ } else {
+ /* first byte of section 0 is reserved for the BBM */
+ region->offset = 1;
+ region->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
+ .ecc = ato25d1ga_ooblayout_ecc,
+ .rfree = ato25d1ga_ooblayout_free,
+};
+
+
+static const struct spinand_info ato_spinand_table[] = {
+ SPINAND_INFO("ATO25D1GA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer ato_spinand_manufacturer = {
+ .id = SPINAND_MFR_ATO,
+ .name = "ATO",
+ .chips = ato_spinand_table,
+ .nchips = ARRAY_SIZE(ato_spinand_table),
+ .ops = &ato_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 3a1e7e18736..0c435059546 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -32,6 +32,7 @@
#include <linux/bug.h>
#include <linux/mtd/spinand.h>
#include <linux/printk.h>
+#include <linux/delay.h>
#endif
struct spinand_plat {
@@ -41,24 +42,9 @@ struct spinand_plat {
/* SPI NAND index visible in MTD names */
static int spi_nand_idx;
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- u16 *column)
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct nand_device *nand = spinand_to_nand(spinand);
- unsigned int shift;
-
- if (nand->memorg.planes_per_lun < 2)
- return;
-
- /* The plane number is passed in MSB just above the column address */
- shift = fls(nand->memorg.pagesize);
- *column |= req->pos.plane << shift;
-}
-
-static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
-{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -70,9 +56,9 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
return 0;
}
-static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -174,20 +160,12 @@ int spinand_select_target(struct spinand_device *spinand, unsigned int target)
return 0;
}
-static int spinand_init_cfg_cache(struct spinand_device *spinand)
+static int spinand_read_cfg(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct udevice *dev = spinand->slave->dev;
unsigned int target;
int ret;
- spinand->cfg_cache = devm_kzalloc(dev,
- sizeof(*spinand->cfg_cache) *
- nand->memorg.ntargets,
- GFP_KERNEL);
- if (!spinand->cfg_cache)
- return -ENOMEM;
-
for (target = 0; target < nand->memorg.ntargets; target++) {
ret = spinand_select_target(spinand, target);
if (ret)
@@ -206,6 +184,21 @@ static int spinand_init_cfg_cache(struct spinand_device *spinand)
return 0;
}
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct udevice *dev = spinand->slave->dev;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int spinand_init_quad_enable(struct spinand_device *spinand)
{
bool enable = false;
@@ -229,9 +222,144 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
-static int spinand_write_enable_op(struct spinand_device *spinand)
+
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand->set_cont_read(spinand, enable);
+}
+
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .rfree = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ return 0;
+}
+
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
+{
+}
+
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ bool enable = (req->mode != MTD_OPS_RAW);
+
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
+ memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
+
+ /* Only enable or disable the engine */
+ return spinand_ecc_enable(spinand, enable);
+}
+
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ int ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* Nothing to do when finishing a page write */
+ if (req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ /* Finish a page read: check the status, report errors/bitflips */
+ ret = spinand_check_ecc_status(spinand, spinand->last_wait_status);
+ if (ret == -EBADMSG) {
+ mtd->ecc_stats.failed++;
+ } else if (ret > 0) {
+ unsigned int pages;
+
+ /*
+ * Continuous reads don't allow us to get the detail,
+ * so we may exagerate the actual number of corrected bitflips.
+ */
+ if (!req->continuous)
+ pages = 1;
+ else
+ pages = req->datalen / nanddev_page_size(nand);
+
+ mtd->ecc_stats.corrected += ret * pages;
+ }
+
+ return ret;
+}
+
+static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+
+ spinand->last_wait_status = status;
+}
+
+int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->slave, &op);
}
@@ -241,7 +369,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->slave, &op);
}
@@ -249,27 +377,25 @@ static int spinand_load_page_op(struct spinand_device *spinand,
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.read_cache;
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
- int ret;
+ ssize_t ret;
if (req->datalen) {
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.dataoffs = 0;
- adjreq.databuf.in = spinand->databuf;
buf = spinand->databuf;
- nbytes = adjreq.datalen;
+ if (!req->continuous)
+ nbytes = nanddev_page_size(nand);
+ else
+ nbytes = round_up(req->dataoffs + req->datalen,
+ nanddev_page_size(nand));
+ column = 0;
}
if (req->ooblen) {
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.in = spinand->oobbuf;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
@@ -277,28 +403,40 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
- op.addr.val = column;
+ if (req->mode == MTD_OPS_RAW)
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ else
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
+
+ if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
- /*
- * Some controllers are limited in term of max RX data size. In this
- * case, just repeat the READ_CACHE operation after updating the
- * column.
- */
while (nbytes) {
- op.data.buf.in = buf;
- op.data.nbytes = nbytes;
- ret = spi_mem_adjust_op_size(spinand->slave, &op);
- if (ret)
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- ret = spi_mem_exec_op(spinand->slave, &op);
- if (ret)
- return ret;
+ if (!ret || ret > nbytes)
+ return -EIO;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
+
+ /*
+ * Dirmap accesses are allowed to toggle the CS.
+ * Toggling the CS during a continuous read is forbidden.
+ */
+ if (nbytes && req->continuous) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected. Disable future use of
+ * continuous reading and return -EAGAIN to retry
+ * reading within regular mode.
+ */
+ spinand->cont_read_possible = false;
+ return -EAGAIN;
+ }
}
if (req->datalen)
@@ -322,14 +460,12 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.write_cache;
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
- unsigned int nbytes = 0;
- void *buf = NULL;
- u16 column = 0;
- int ret;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
+ void *buf = spinand->databuf;
+ ssize_t ret;
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -337,20 +473,16 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
+ *
+ * Only reset the data buffer manually, the OOB buffer is prepared by
+ * ECC engines ->prepare_io_req() callback.
*/
- memset(spinand->databuf, 0xff,
- nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand));
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ memset(spinand->databuf, 0xff, nanddev_page_size(nand));
- if (req->datalen) {
+ if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- nbytes = adjreq.datalen;
- buf = spinand->databuf;
- }
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
@@ -361,52 +493,27 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
-
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- nbytes += nanddev_per_page_oobsize(nand);
- if (!buf) {
- buf = spinand->oobbuf;
- column = nanddev_page_size(nand);
- }
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ if (req->mode == MTD_OPS_RAW)
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ else
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
- op = *spinand->op_templates.write_cache;
- op.addr.val = column;
+ if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
- /*
- * Some controllers are limited in term of max TX data size. In this
- * case, split the operation into one LOAD CACHE and one or more
- * LOAD RANDOM CACHE.
- */
while (nbytes) {
- op.data.buf.out = buf;
- op.data.nbytes = nbytes;
-
- ret = spi_mem_adjust_op_size(spinand->slave, &op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(spinand->slave, &op);
- if (ret)
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ if (!ret || ret > nbytes)
+ return -EIO;
- /*
- * We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation resets
- * the cache to 0xff.
- */
- if (nbytes) {
- column = op.addr.val;
- op = *spinand->op_templates.update_cache;
- op.addr.val = column;
- }
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
return 0;
@@ -417,7 +524,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->slave, &op);
}
@@ -425,28 +532,48 @@ static int spinand_program_op(struct spinand_device *spinand,
static int spinand_erase_op(struct spinand_device *spinand,
const struct nand_pos *pos)
{
- struct nand_device *nand = &spinand->base;
+ struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->slave, &op);
}
-static int spinand_wait(struct spinand_device *spinand, u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
{
unsigned long start, stop;
u8 status;
int ret;
+ udelay(initial_delay_us);
start = get_timer(0);
- stop = 400;
+ stop = SPINAND_WAITRDY_TIMEOUT_MS;
do {
+ schedule();
+
ret = spinand_read_status(spinand, &status);
if (ret)
return ret;
if (!(status & STATUS_BUSY))
goto out;
+
+ udelay(poll_delay_us);
} while (get_timer(start) < stop);
/*
@@ -467,9 +594,8 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(naddr, ndummy,
- spinand->scratchbuf,
- SPINAND_MAX_ID_LEN);
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->slave, &op);
@@ -481,14 +607,17 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->slave, &op);
if (ret)
return ret;
- return spinand_wait(spinand, NULL);
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -496,66 +625,64 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
-
- if (spinand->eccinfo.get_status)
- return spinand->eccinfo.get_status(spinand, status);
-
- switch (status & STATUS_ECC_MASK) {
- case STATUS_ECC_NO_BITFLIPS:
- return 0;
-
- case STATUS_ECC_HAS_BITFLIPS:
- /*
- * We have no way to know exactly how many bitflips have been
- * fixed, so let's return the maximum possible value so that
- * wear-leveling layers move the data immediately.
- */
- return nand->eccreq.strength;
-
- case STATUS_ECC_UNCOR_ERROR:
- return -EBADMSG;
-
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- bool ecc_enabled)
-{
u8 status;
int ret;
+ ret = spinand_ondie_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_load_page_op(spinand, req);
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
if (ret < 0)
return ret;
+ spinand_ondie_ecc_save_status(nand, status);
+
ret = spinand_read_from_cache_op(spinand, req);
if (ret)
return ret;
- if (!ecc_enabled)
- return 0;
-
- return spinand_check_ecc_status(spinand, status);
+ return spinand_ondie_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
+ struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
+ ret = spinand_ondie_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
@@ -568,63 +695,234 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
- if (!ret && (status & STATUS_PROG_FAILED))
- ret = -EIO;
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (ret)
+ return ret;
- return ret;
+ if (status & STATUS_PROG_FAILED)
+ return -EIO;
+
+ return spinand_ondie_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
- unsigned int max_bitflips = 0;
+ struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
bool ecc_failed = false;
- int ret = 0;
+ unsigned int retry_mode = 0;
+ int ret;
- if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
- enable_ecc = true;
+ old_stats = mtd->ecc_stats;
-#ifndef __UBOOT__
- mutex_lock(&spinand->lock);
-#endif
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
- nanddev_io_for_each_page(nand, from, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
schedule();
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
- ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+read_retry:
+ ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG) {
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- mtd->ecc_stats.failed++;
} else {
- mtd->ecc_stats.corrected += ret;
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
}
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret;
+}
+
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ u8 status;
+ int ret;
+
+ ret = spinand_cont_read_enable(spinand, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The cache is divided into two halves. While one half of the cache has
+ * the requested data, the other half is loaded with the next chunk of data.
+ * Therefore, the host can read out the data continuously from page to page.
+ * Each data read must be a multiple of 4-bytes and full pages should be read;
+ * otherwise, the data output might get out of sequence from one read command
+ * to another.
+ */
+ nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+ schedule();
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_ondie_ecc_prepare_io_req(nand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_load_page_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US, NULL);
+ if (ret < 0)
+ goto end_cont_read;
+
+ ret = spinand_read_from_cache_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ops->retlen += iter.req.datalen;
+
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ goto end_cont_read;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = spinand_ondie_ecc_finish_io_req(nand, &iter.req);
+ if (ret < 0)
+ goto end_cont_read;
+
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ ret = 0;
+ }
+
+end_cont_read:
+ /*
+ * Once all the data has been read out, the host can either pull CS#
+ * high and wait for tRST or manually clear the bit in the configuration
+ * register to terminate the continuous read operation. We have no
+ * guarantee the SPI controller drivers will effectively deassert the CS
+ * when we expect them to, so take the register based approach.
+ */
+ spinand_cont_read_enable(spinand, false);
+
+ return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+ /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+ if (spinand->set_cont_read) {
+ spinand->cont_read_possible = true;
+ }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos start_pos, end_pos;
+
+ if (!spinand->cont_read_possible)
+ return false;
+
+ /* OOBs won't be retrieved */
+ if (ops->ooblen || ops->oobbuf)
+ return false;
+
+ nanddev_offs_to_pos(nand, from, &start_pos);
+ nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+ /*
+ * Continuous reads never cross LUN boundaries. Some devices don't
+ * support crossing planes boundaries. Some devices don't even support
+ * crossing blocks boundaries. The common case being to read through UBI,
+ * we will very rarely read two consequent blocks or more, so it is safer
+ * and easier (can be improved) to only enable continuous reads when
+ * reading within the same erase block.
+ */
+ if (start_pos.target != end_pos.target ||
+ start_pos.plane != end_pos.plane ||
+ start_pos.eraseblock != end_pos.eraseblock)
+ return false;
+
+ return start_pos.page < end_pos.page;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ unsigned int max_bitflips = 0;
+ int ret;
+
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+
+ if (spinand_use_cont_read(mtd, from, ops)) {
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
+ if (ret == -EAGAIN && !spinand->cont_read_possible) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected (see spinand_read_from_cache_op()),
+ * repeat reading in regular mode.
+ */
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
+ } else {
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
}
#ifndef __UBOOT__
mutex_unlock(&spinand->lock);
#endif
- if (ecc_failed && !ret)
- ret = -EBADMSG;
return ret ? ret : max_bitflips;
}
@@ -635,23 +933,22 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
#ifndef __UBOOT__
mutex_lock(&spinand->lock);
#endif
- nanddev_io_for_each_page(nand, to, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
schedule();
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -681,9 +978,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req, false);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -727,11 +1032,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -751,6 +1059,7 @@ static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
#ifndef __UBOOT__
mutex_unlock(&spinand->lock);
#endif
+
return ret;
}
@@ -772,7 +1081,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
@@ -819,6 +1132,91 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static struct spi_mem_dirmap_desc *spinand_create_rdesc(
+ struct spinand_device *spinand,
+ struct spi_mem_dirmap_info *info)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_desc *desc = NULL;
+
+ if (spinand->cont_read_possible) {
+ /*
+ * spi controller may return an error if info->length is
+ * too large
+ */
+ info->length = nanddev_eraseblock_size(nand);
+ desc = spi_mem_dirmap_create(spinand->slave, info);
+ }
+
+ if (IS_ERR_OR_NULL(desc)) {
+ /*
+ * continuous reading is not supported by flash or
+ * its spi controller, use regular reading
+ */
+ spinand->cont_read_possible = false;
+
+ info->length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand);
+ desc = spi_mem_dirmap_create(spinand->slave, info);
+ }
+
+ return desc;
+}
+
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = { 0 };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = spi_mem_dirmap_create(spinand->slave, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = spinand_create_rdesc(spinand, &info);
+ if (IS_ERR(desc)) {
+ spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
+ return PTR_ERR(desc);
+ }
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
+ spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
+
+ return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(spinand->slave->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
@@ -826,13 +1224,17 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &alliancememory_spinand_manufacturer,
+ &ato_spinand_manufacturer,
+ &esmt_c8_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
- &esmt_c8_spinand_manufacturer,
&xtx_spinand_manufacturer,
};
@@ -860,7 +1262,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand,
spinand->manufacturer = manufacturer;
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
@@ -894,8 +1296,19 @@ static int spinand_id_detect(struct spinand_device *spinand)
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
- if (spinand->manufacturer->ops->init)
- return spinand->manufacturer->ops->init(spinand);
+ int ret;
+
+ if (spinand->manufacturer->ops->init) {
+ ret = spinand->manufacturer->ops->init(spinand);
+ if (ret)
+ return ret;
+ }
+
+ if (spinand->configure_chip) {
+ ret = spinand->configure_chip(spinand);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -912,10 +1325,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -932,13 +1348,17 @@ spinand_select_op_variant(struct spinand_device *spinand,
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(&op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
static int spinand_setup_slave(struct spinand_device *spinand,
@@ -994,11 +1414,17 @@ int spinand_match_and_init(struct spinand_device *spinand,
return ret;
nand->memorg = table[i].memorg;
- nand->eccreq = table[i].eccreq;
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->configure_chip = table[i].configure_chip;
+ spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1057,35 +1483,55 @@ static int spinand_detect(struct spinand_device *spinand)
return 0;
}
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int spinand_init_flash(struct spinand_device *spinand)
{
- return -ERANGE;
-}
+ struct udevice *dev = spinand->slave->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret, i;
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
-{
- if (section)
- return -ERANGE;
+ ret = spinand_read_cfg(spinand);
+ if (ret)
+ return ret;
- /* Reserve 2 bytes for the BBM. */
- region->offset = 2;
- region->length = 62;
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ return ret;
- return 0;
-}
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ return ret;
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
- .ecc = spinand_noecc_ooblayout_ecc,
- .rfree = spinand_noecc_ooblayout_free,
-};
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ break;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ spinand_manufacturer_cleanup(spinand);
+
+ return ret;
+}
static int spinand_init(struct spinand_device *spinand)
{
+ struct udevice *dev = spinand->slave->dev;
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct nand_device *nand = mtd_to_nanddev(mtd);
- int ret, i;
+ int ret;
/*
* We need a scratch buffer because the spi_mem interface requires that
@@ -1104,9 +1550,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
- spinand->databuf = kzalloc(nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- GFP_KERNEL);
+ spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+ GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
@@ -1118,41 +1563,25 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_free_bufs;
- ret = spinand_init_quad_enable(spinand);
+ ret = spinand_init_flash(spinand);
if (ret)
goto err_free_bufs;
- ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
- if (ret)
- goto err_free_bufs;
-
- ret = spinand_manufacturer_init(spinand);
- if (ret) {
- dev_err(spinand->slave->dev,
- "Failed to initialize the SPI NAND chip (err = %d)\n",
- ret);
- goto err_free_bufs;
- }
-
- /* After power up, all blocks are locked, so unlock them here. */
- for (i = 0; i < nand->memorg.ntargets; i++) {
- ret = spinand_select_target(spinand, i);
- if (ret)
- goto err_manuf_cleanup;
-
- ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
- if (ret)
- goto err_manuf_cleanup;
- }
-
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
+ spinand_ecc_enable(spinand, false);
+ ret = spinand_ondie_ecc_init_ctx(nand);
+ if (ret)
+ goto err_cleanup_nanddev;
+
/*
- * Right now, we don't support ECC, so let the whole oob
- * area is available for user.
+ * Continuous read can only be enabled with an on-die ECC engine, so the
+ * ECC initialization must have happened previously.
*/
+ spinand_cont_read_init(spinand);
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1160,19 +1589,36 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
- if (spinand->eccinfo.ooblayout)
- mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
- else
- mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
- goto err_cleanup_nanddev;
+ goto err_cleanup_ecc_engine;
mtd->oobavail = ret;
+ /* Propagate ECC information to mtd_info */
+ mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
+ mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_cleanup_ecc_engine;
+ }
+
return 0;
+err_cleanup_ecc_engine:
+ spinand_ondie_ecc_cleanup_ctx(nand);
+
err_cleanup_nanddev:
nanddev_cleanup(nand);
@@ -1189,6 +1635,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ spinand_ondie_ecc_cleanup_ctx(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
@@ -1294,12 +1741,14 @@ static const struct spi_device_id spinand_ids[] = {
{ .name = "spi-nand" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(spi, spinand_ids);
#ifdef CONFIG_OF
static const struct of_device_id spinand_of_ids[] = {
{ .compatible = "spi-nand" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, spinand_of_ids);
#endif
static struct spi_mem_driver spinand_drv = {
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 7e07b26827a..6a46f3a3bfc 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -8,25 +8,33 @@
#ifndef __UBOOT__
#include <linux/device.h>
#include <linux/kernel.h>
+#else
+#include <dm/device_compat.h>
+#include <spi-mem.h>
+#include <spi.h>
#endif
#include <linux/mtd/spinand.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
+#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7)
+#define ESMT_F50L1G41LB_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
@@ -104,24 +112,117 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.rfree = f50l1g41lb_ooblayout_free,
};
+static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen, bool user)
+{
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ buf->locked = 0;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK,
+ ESMT_F50L1G41LB_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->slave, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) {
+ dev_warn(spinand->slave->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = {
+ .info = f50l1g41lb_user_otp_info,
+ .lock = f50l1g41lb_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = {
+ .info = f50l1g41lb_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
+ 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
+ SPINAND_INFO("F50D2G41KA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
+ 0x7f, 0x7f),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
};
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
new file mode 100644
index 00000000000..370f8494fb5
--- /dev/null
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#ifndef __UBOOT__
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_FORESEE 0xCD
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
+ .ecc = f35sqa002g_ooblayout_ecc,
+ .rfree = f35sqa002g_ooblayout_free,
+};
+
+static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ default:
+ break;
+ }
+
+ /* More than 1-bit error was detected in one or more sectors and
+ * cannot be corrected.
+ */
+ return -EBADMSG;
+}
+
+static const struct spinand_info foresee_spinand_table[] = {
+ SPINAND_INFO("F35SQA002G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
+ SPINAND_INFO("F35SQA001G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer foresee_spinand_manufacturer = {
+ .id = SPINAND_MFR_FORESEE,
+ .name = "FORESEE",
+ .chips = foresee_spinand_table,
+ .nchips = ARRAY_SIZE(foresee_spinand_table),
+ .ops = &foresee_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index fe8c76acac6..32fbe11e908 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -28,44 +28,44 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -189,8 +189,8 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -211,6 +211,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -231,8 +232,8 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -252,6 +253,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
@@ -535,6 +537,26 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM9UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91, 0x01),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM9RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
};
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 86bffc2800b..f21103bb15a 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -5,6 +5,7 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
+#include <linux/bitfield.h>
#ifndef __UBOOT__
#include <linux/device.h>
#include <linux/kernel.h>
@@ -13,21 +14,35 @@
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
-#define MACRONIX_ECCSR_MASK 0x0F
+#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
+#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
+#define MACRONIX_CFG_CONT_READ BIT(2)
+#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70
+#define MACRONIX_NUM_READ_RETRY_MODES 5
+
+#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
+
+/* Bitflip theshold configuration register */
+#define REG_CFG_BFT 0x10
+#define CFG_BFT(x) FIELD_PREP(GENMASK(7, 4), (x))
+
+struct macronix_priv {
+ bool cont_read;
+};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -52,8 +67,9 @@ static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
.rfree = mx35lfxge4ab_ooblayout_free,
};
-static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+static int macronix_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
{
+ struct macronix_priv *priv = spinand->priv;
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_DUMMY(1, 1),
@@ -63,12 +79,21 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
if (ret)
return ret;
- *eccsr &= MACRONIX_ECCSR_MASK;
+ /*
+ * ECCSR exposes the number of bitflips for the last read page in bits [3:0].
+ * Continuous read compatible chips also expose the maximum number of
+ * bitflips for the whole (continuous) read operation in bits [7:4].
+ */
+ if (!priv->cont_read)
+ *eccsr = MACRONIX_ECCSR_BF_LAST_PAGE(*eccsr);
+ else
+ *eccsr = MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(*eccsr);
+
return 0;
}
-static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int macronix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 eccsr;
@@ -86,14 +111,14 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
- if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
- return nand->eccreq.strength;
+ if (macronix_get_eccsr(spinand, spinand->scratchbuf))
+ return nanddev_get_ecc_conf(nand)->strength;
- if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
- return nand->eccreq.strength;
+ eccsr = *spinand->scratchbuf;
+ if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || !eccsr))
+ return nanddev_get_ecc_conf(nand)->strength;
return eccsr;
-
default:
break;
}
@@ -101,6 +126,38 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
+{
+ struct macronix_priv *priv = spinand->priv;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, MACRONIX_CFG_CONT_READ,
+ enable ? MACRONIX_CFG_CONT_READ : 0);
+ if (ret)
+ return ret;
+
+ priv->cont_read = enable;
+
+ return 0;
+}
+
+/**
+ * macronix_set_read_retry - Set the retry mode
+ * @spinand: SPI NAND device
+ * @retry_mode: Specify which retry mode to set
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int macronix_set_read_retry(struct spinand_device *spinand,
+ unsigned int retry_mode)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = retry_mode;
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -111,7 +168,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -119,10 +176,12 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26, 0x03),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -130,9 +189,12 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -140,34 +202,67 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF1G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35LF2G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35LF4G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -177,7 +272,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX31UF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -187,7 +282,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
@@ -196,21 +291,38 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35UF4G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -218,7 +330,10 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -226,21 +341,38 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
+ SPINAND_INFO("MX35UF2G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -248,9 +380,12 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AC",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -258,7 +393,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF1G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -268,9 +404,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF1G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -278,9 +414,11 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -288,9 +426,12 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AC",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -298,11 +439,51 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
-
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_INFO("MX31LF2GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ macronix_ecc_get_status)),
+ SPINAND_INFO("MX3UF2GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ macronix_ecc_get_status)),
};
+static int macronix_spinand_init(struct spinand_device *spinand)
+{
+ struct macronix_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spinand->priv = priv;
+
+ return 0;
+}
+
+static void macronix_spinand_cleanup(struct spinand_device *spinand)
+{
+ kfree(spinand->priv);
+}
+
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+ .init = macronix_spinand_init,
+ .cleanup = macronix_spinand_cleanup,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index b538213ed8e..9af3e99664f 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -9,12 +9,18 @@
#ifndef __UBOOT__
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/spi/spi-mem.h>
+#else
+#include <dm/device_compat.h>
+#include <spi-mem.h>
+#include <spi.h>
#endif
#include <linux/mtd/spinand.h>
+#include <linux/string.h>
#define SPINAND_MFR_MICRON 0x2c
-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
@@ -30,34 +36,38 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
+
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -133,7 +143,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = {
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
@@ -170,6 +180,136 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static inline bool mem_is_zero(const void *s, size_t n)
+{
+ return !memchr_inv(s, 0, n);
+}
+
+static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand)
+{
+ size_t bufsize = spinand_otp_page_size(spinand);
+ size_t retlen;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_STATE);
+ if (ret)
+ goto free_buf;
+
+ ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf);
+
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ 0)) {
+ dev_warn(spinand->slave->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ if (ret)
+ goto free_buf;
+
+ /* If all zeros, then the OTP area is locked. */
+ if (mem_is_zero(buf, bufsize))
+ ret = 1;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen,
+ bool user)
+{
+ int locked;
+
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ locked = mt29f2g01abagd_otp_is_locked(spinand);
+ if (locked < 0)
+ return locked;
+
+ buf->locked = locked;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->slave, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) {
+ dev_warn(spinand->slave->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = {
+ .info = mt29f2g01abagd_user_otp_info,
+ .lock = mt29f2g01abagd_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = {
+ .info = mt29f2g01abagd_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
@@ -181,7 +321,9 @@ static const struct spinand_info micron_spinand_table[] = {
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
- micron_8_ecc_get_status)),
+ micron_8_ecc_get_status),
+ SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
diff --git a/drivers/mtd/nand/spi/otp.c b/drivers/mtd/nand/spi/otp.c
new file mode 100644
index 00000000000..e6ef78d9464
--- /dev/null
+++ b/drivers/mtd/nand/spi/otp.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spinand.h>
+#ifdef __UBOOT__
+#include <spi.h>
+#include <dm/device_compat.h>
+#endif
+
+/**
+ * spinand_otp_page_size() - Get SPI-NAND OTP page size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP page size.
+ */
+size_t spinand_otp_page_size(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+}
+
+static size_t spinand_otp_size(struct spinand_device *spinand,
+ const struct spinand_otp_layout *layout)
+{
+ return layout->npages * spinand_otp_page_size(spinand);
+}
+
+/**
+ * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_fact_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_size() - Get SPI-NAND user OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_user_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->user_otp->layout);
+}
+
+static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs,
+ size_t len,
+ const struct spinand_otp_layout *layout)
+{
+ if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int spinand_user_otp_check_bounds(struct spinand_device *spinand,
+ loff_t ofs, size_t len)
+{
+ return spinand_otp_check_bounds(spinand, ofs, len,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf, bool is_write,
+ const struct spinand_otp_layout *layout)
+{
+ struct nand_page_io_req req = {};
+ unsigned long long page;
+ size_t copied = 0;
+ size_t otp_pagesize = spinand_otp_page_size(spinand);
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len, layout);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE);
+ if (ret)
+ return ret;
+
+ page = ofs;
+ req.dataoffs = do_div(page, otp_pagesize);
+ req.pos.page = page + layout->start_page;
+ req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ;
+ req.mode = MTD_OPS_RAW;
+ req.databuf.in = buf;
+
+ while (copied < len) {
+ req.datalen = min_t(unsigned int,
+ otp_pagesize - req.dataoffs,
+ len - copied);
+
+ if (is_write)
+ ret = spinand_write_page(spinand, &req);
+ else
+ ret = spinand_read_page(spinand, &req);
+
+ if (ret < 0)
+ break;
+
+ req.databuf.in += req.datalen;
+ req.pos.page++;
+ req.dataoffs = 0;
+ copied += req.datalen;
+ }
+
+ *retlen = copied;
+
+ if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) {
+ dev_warn(spinand->slave->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * spinand_fact_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->user_otp->layout);
+}
+
+/**
+ * spinand_user_otp_write() - Write to OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the pointer to variable to store the number of written bytes
+ * @buf: the buffer with data to write
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf,
+ bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen);
+ else
+ ret = spinand->user_otp->ops->info(spinand, len, buf, retlen);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, false);
+}
+
+static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u8 *buf, bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len,
+ is_fact ? &spinand->fact_otp->layout :
+ &spinand->user_otp->layout);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+ else
+ ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false);
+}
+
+static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->write(spinand, ofs, len, retlen, buf);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+#ifndef __UBOOT__
+static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->erase(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+#endif
+
+static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->lock(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+/**
+ * spinand_set_mtd_otp_ops() - Setup OTP methods
+ * @spinand: the spinand device
+ *
+ * Setup OTP methods.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand)
+{
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops;
+ const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops;
+
+ if (!user_ops && !fact_ops)
+ return -EINVAL;
+
+ if (user_ops) {
+ if (user_ops->info)
+ mtd->_get_user_prot_info = spinand_mtd_user_otp_info;
+
+ if (user_ops->read)
+ mtd->_read_user_prot_reg = spinand_mtd_user_otp_read;
+
+ if (user_ops->write)
+ mtd->_write_user_prot_reg = spinand_mtd_user_otp_write;
+
+ if (user_ops->lock)
+ mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock;
+#ifndef __UBOOT__
+ if (user_ops->erase)
+ mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase;
+#endif
+ }
+
+ if (fact_ops) {
+ if (fact_ops->info)
+ mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info;
+
+ if (fact_ops->read)
+ mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 079431cea8f..a7106ae194b 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -11,8 +11,10 @@
#endif
#include <linux/mtd/spinand.h>
+
#define SPINAND_MFR_PARAGON 0xa1
+
#define PN26G0XA_STATUS_ECC_BITMASK (3 << 4)
#define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4)
@@ -20,21 +22,23 @@
#define PN26G0XA_STATUS_ECC_ERRORED (2 << 4)
#define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
new file mode 100644
index 00000000000..5e9487bd27a
--- /dev/null
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 SkyHigh Memory Limited
+ *
+ * Author: Takahiro Kuwano <takahiro.kuwano@infineon.com>
+ * Co-Author: KR Kim <kr.kim@skyhighmemory.com>
+ */
+
+#ifndef __UBOOT__
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_SKYHIGH 0x01
+#define SKYHIGH_STATUS_ECC_1TO2_BITFLIPS (1 << 4)
+#define SKYHIGH_STATUS_ECC_3TO6_BITFLIPS (2 << 4)
+#define SKYHIGH_STATUS_ECC_UNCOR_ERROR (3 << 4)
+#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ /* ECC bytes are stored in hidden area. */
+ return -ERANGE;
+}
+
+static int skyhigh_spinand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* ECC bytes are stored in hidden area. Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops skyhigh_spinand_ooblayout = {
+ .ecc = skyhigh_spinand_ooblayout_ecc,
+ .rfree = skyhigh_spinand_ooblayout_free,
+};
+
+static int skyhigh_spinand_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case SKYHIGH_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case SKYHIGH_STATUS_ECC_1TO2_BITFLIPS:
+ return 2;
+
+ case SKYHIGH_STATUS_ECC_3TO6_BITFLIPS:
+ return 6;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info skyhigh_spinand_table[] = {
+ SPINAND_INFO("S35ML01G301",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML01G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML02G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML04G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+};
+
+static int skyhigh_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * Config_Protect_En (bit 1 in Block Lock register) must be set to 1
+ * before writing other bits. Do it here before core unlocks all blocks
+ * by writing block protection bits.
+ */
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK,
+ SKYHIGH_CONFIG_PROTECT_EN);
+}
+
+static const struct spinand_manufacturer_ops skyhigh_spinand_manuf_ops = {
+ .init = skyhigh_spinand_init,
+};
+
+const struct spinand_manufacturer skyhigh_spinand_manufacturer = {
+ .id = SPINAND_MFR_SKYHIGH,
+ .name = "SkyHigh",
+ .chips = skyhigh_spinand_table,
+ .nchips = ARRAY_SIZE(skyhigh_spinand_table),
+ .ops = &skyhigh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index b9908e79271..2e7572d72b4 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -18,28 +18,28 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -76,7 +76,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -93,12 +93,12 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
* data around if it's not necessary.
*/
if (spi_mem_exec_op(spinand->slave, &op))
- return nand->eccreq.strength;
+ return nanddev_get_ecc_conf(nand)->strength;
- mbf >>= 4;
+ mbf = *(spinand->scratchbuf) >> 4;
- if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
- return nand->eccreq.strength;
+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ return nanddev_get_ecc_conf(nand)->strength;
return mbf;
@@ -269,6 +269,39 @@ static const struct spinand_info toshiba_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58NYG0S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TH58NYG2S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (1st generation) */
+ SPINAND_INFO("TH58NYG3S0HBAI6",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 16abf89dbbf..a89aaec516b 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -14,28 +14,83 @@
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/mtd/spinand.h>
+#include <linux/delay.h>
+
+#define HZ_PER_MHZ 1000000UL
#define SPINAND_MFR_WINBOND 0xEF
#define WINBOND_CFG_BUF_READ BIT(3)
-#define W25N04KV_STATUS_ECC_5_8_BITFLIPS FIELD_PREP_CONST(STATUS_ECC_MASK, 0x3)
+#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+
+#define W25N0XJW_SR4 0xD0
+#define W25N0XJW_SR4_HS BIT(2)
+
+#define W35N01JW_VCR_IO_MODE 0x00
+#define W35N01JW_VCR_IO_MODE_SINGLE_SDR 0xFF
+#define W35N01JW_VCR_IO_MODE_OCTAL_SDR 0xDF
+#define W35N01JW_VCR_IO_MODE_OCTAL_DDR_DS 0xE7
+#define W35N01JW_VCR_IO_MODE_OCTAL_DDR 0xC7
+#define W35N01JW_VCR_DUMMY_CLOCK_REG 0x01
+
+/*
+ * "X2" in the core is equivalent to "dual output" in the datasheets,
+ * "X4" in the core is equivalent to "quad output" in the datasheets.
+ * Quad and octal capable chips feature an absolute maximum frequency of 166MHz.
+ */
+
+static SPINAND_OP_VARIANTS(read_cache_octal_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 3, NULL, 0, 120 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 20, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 12, NULL, 0, 124 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 8, NULL, 0, 86 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -80,6 +135,18 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->slave, &op);
}
+static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = 64 + (8 * section);
+ region->length = 7;
+
+ return 0;
+}
+
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -104,17 +171,57 @@ static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
+static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
+ .ecc = w25n01kv_ooblayout_ecc,
+ .rfree = w25n02kv_ooblayout_free,
+};
+
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.rfree = w25n02kv_ooblayout_free,
};
+static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 16 * section;
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
+ .ecc = w35n01jw_ooblayout_ecc,
+ .rfree = w35n01jw_ooblayout_free,
+};
+
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -131,12 +238,12 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
* data around if it's not necessary.
*/
if (spi_mem_exec_op(spinand->slave, &op))
- return nand->eccreq.strength;
+ return nanddev_get_ecc_conf(nand)->strength;
- mbf >>= 4;
+ mbf = *(spinand->scratchbuf) >> 4;
- if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
- return nand->eccreq.strength;
+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ return nanddev_get_ecc_conf(nand)->strength;
return mbf;
@@ -147,18 +254,126 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int w25n0xjw_hs_cfg(struct spinand_device *spinand)
+{
+ const struct spi_mem_op *op;
+ bool hs;
+ u8 sr4;
+ int ret;
+
+ op = spinand->op_templates.read_cache;
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ hs = false;
+ else if (op->cmd.buswidth == 1 && op->addr.buswidth == 1 &&
+ op->dummy.buswidth == 1 && op->data.buswidth == 1)
+ hs = false;
+ else if (!op->max_freq)
+ hs = true;
+ else
+ hs = false;
+
+ ret = spinand_read_reg_op(spinand, W25N0XJW_SR4, &sr4);
+ if (ret)
+ return ret;
+
+ if (hs)
+ sr4 |= W25N0XJW_SR4_HS;
+ else
+ sr4 &= ~W25N0XJW_SR4_HS;
+
+ ret = spinand_write_reg_op(spinand, W25N0XJW_SR4, sr4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int w35n0xjw_write_vcr(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x81, 1),
+ SPI_MEM_OP_ADDR(3, reg, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, spinand->scratchbuf, 1));
+ int ret;
+
+ *spinand->scratchbuf = val;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * Write VCR operation doesn't set the busy bit in SR, which means we
+ * cannot perform a status poll. Minimum time of 50ns is needed to
+ * complete the write.
+ */
+ ndelay(50);
+
+ return 0;
+}
+
+static int w35n0xjw_vcr_cfg(struct spinand_device *spinand)
+{
+ const struct spi_mem_op *op;
+ unsigned int dummy_cycles;
+ bool dtr, single;
+ u8 io_mode;
+ int ret;
+
+ op = spinand->op_templates.read_cache;
+
+ single = (op->cmd.buswidth == 1 && op->addr.buswidth == 1 && op->data.buswidth == 1);
+ dtr = (op->cmd.dtr || op->addr.dtr || op->data.dtr);
+ if (single && !dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_SINGLE_SDR;
+ else if (!single && !dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_OCTAL_SDR;
+ else if (!single && dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_OCTAL_DDR;
+ else
+ return -EINVAL;
+
+ ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_IO_MODE, io_mode);
+ if (ret)
+ return ret;
+
+ dummy_cycles = ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+ switch (dummy_cycles) {
+ case 8:
+ case 12:
+ case 16:
+ case 20:
+ case 24:
+ case 28:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_DUMMY_CLOCK_REG, dummy_cycles);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct spinand_info winbond_spinand_table[] = {
- SPINAND_INFO("W25M02GV",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
- NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
+ /* 512M-bit densities */
+ SPINAND_INFO("W25N512GW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20),
+ NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
- SPINAND_SELECT_TARGET(w25m02gv_select_target)),
- SPINAND_INFO("W25N01GV",
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ /* 1G-bit densities */
+ SPINAND_INFO("W25N01GV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
@@ -167,7 +382,86 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
- SPINAND_INFO("W25N02KV",
+ SPINAND_INFO("W25N01GW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_INFO("W25N01JW", /* high-speed 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
+ SPINAND_INFO("W25N01KV", /* 3.3V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
+ NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W35N01JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
+ SPINAND_INFO("W35N02JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
+ SPINAND_INFO("W35N04JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
+ /* 2G-bit densities */
+ SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+ SPINAND_INFO("W25N02JW", /* high-speed 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
+ SPINAND_INFO("W25N02KV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
@@ -176,7 +470,17 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
- SPINAND_INFO("W25N04KV",
+ SPINAND_INFO("W25N02KW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x22),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ /* 4G-bit densities */
+ SPINAND_INFO("W25N04KV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23),
NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
@@ -185,6 +489,15 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N04KW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x23),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
};
static int winbond_spinand_init(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index aee1849a71f..3e1f884fd89 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -25,20 +25,20 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 3579b7d7db5..db44a7b26eb 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -120,18 +120,21 @@ static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
return 0;
case 2:
- if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
- (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
+ if ((tx &&
+ (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
+ (!tx &&
+ (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
return 0;
break;
case 4:
- if ((tx && (mode & SPI_TX_QUAD)) ||
- (!tx && (mode & SPI_RX_QUAD)))
+ if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
+ (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
return 0;
break;
+
case 8:
if ((tx && (mode & SPI_TX_OCTAL)) ||
(!tx && (mode & SPI_RX_OCTAL)))
@@ -300,7 +303,7 @@ int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP) {
+ if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
spi_release_bus(slave);
return ret;
}
@@ -496,6 +499,38 @@ int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+/**
+ * spi_mem_calc_op_duration() - Derives the theoretical length (in cpu cycles)
+ * of an operation. This helps finding the best
+ * variant among a list of possible choices.
+ * @op: the operation to benchmark
+ *
+ * Some chips have per-op frequency limitations, PCBs usually have their own
+ * limitations as well, and controllers can support dual, quad or even octal
+ * modes, sometimes in DTR. All these combinations make it impossible to
+ * statically list the best combination for all situations. If we want something
+ * accurate, all these combinations should be rated (eg. with a time estimate)
+ * and the best pick should be taken based on these calculations.
+ *
+ * Returns a estimate for the time this op would take.
+ */
+u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
+{
+ u64 ncycles = 0;
+
+ ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
+ ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
+
+ /* Dummy bytes are optional for some SPI flash memory operations */
+ if (op->dummy.nbytes)
+ ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+
+ ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
+
+ return ncycles;
+}
+EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration);
+
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 651f8706df5..243955ac1a1 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -19,7 +19,7 @@
* @oobsize: OOB area size
* @pages_per_eraseblock: number of pages per eraseblock
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
- * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
+ * @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN
* @planes_per_lun: number of planes per LUN
* @luns_per_target: number of LUN per target (target is a synonym for die)
* @ntargets: total number of targets exposed by the NAND device
@@ -81,7 +81,18 @@ struct nand_pos {
};
/**
+ * enum nand_page_io_req_type - Direction of an I/O request
+ * @NAND_PAGE_READ: from the chip, to the controller
+ * @NAND_PAGE_WRITE: from the controller, to the chip
+ */
+enum nand_page_io_req_type {
+ NAND_PAGE_READ = 0,
+ NAND_PAGE_WRITE,
+};
+
+/**
* struct nand_page_io_req - NAND I/O request object
+ * @type: the type of page I/O: read or write
* @pos: the position this I/O request is targeting
* @dataoffs: the offset within the page
* @datalen: number of data bytes to read from/write to this page
@@ -90,6 +101,8 @@ struct nand_pos {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, the
+ * NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -97,6 +110,7 @@ struct nand_pos {
* specific commands/operations.
*/
struct nand_page_io_req {
+ enum nand_page_io_req_type type;
struct nand_pos pos;
unsigned int dataoffs;
unsigned int datalen;
@@ -111,6 +125,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
+ bool continuous;
};
/**
@@ -272,6 +287,20 @@ nanddev_pages_per_eraseblock(const struct nand_device *nand)
}
/**
+ * nanddev_pages_per_target() - Get the number of pages per target
+ * @nand: NAND device
+ *
+ * Return: the number of pages per target.
+ */
+static inline unsigned int
+nanddev_pages_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.luns_per_target;
+}
+
+/**
* nanddev_per_page_oobsize() - Get NAND erase block size
* @nand: NAND device
*
@@ -295,6 +324,18 @@ nanddev_eraseblocks_per_lun(const struct nand_device *nand)
}
/**
+ * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per target.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
+}
+
+/**
* nanddev_target_size() - Get the total size provided by a single target/die
* @nand: NAND device
*
@@ -320,7 +361,7 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
}
/**
- * nanddev_neraseblocks() - Get the total number of erasablocks
+ * nanddev_neraseblocks() - Get the total number of eraseblocks
* @nand: NAND device
*
* Return: the total number of eraseblocks exposed by @nand.
@@ -358,6 +399,29 @@ nanddev_get_memorg(struct nand_device *nand)
return &nand->memorg;
}
+/**
+ * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_req *
+nanddev_get_ecc_conf(struct nand_device *nand)
+{
+ return &nand->eccreq;
+}
+
+/**
+ * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
+ * device
+ * @nand: NAND device
+ * @reqs: Requirements
+ */
+static inline void
+nanddev_set_ecc_requirements(struct nand_device *nand,
+ const struct nand_ecc_req *reqs)
+{
+ nand->eccreq = *reqs;
+}
+
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
struct module *owner);
void nanddev_cleanup(struct nand_device *nand);
@@ -603,21 +667,23 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
- * nand_io_iter_init - Initialize a NAND I/O iterator
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
- * layer.
+ * layer for page jumps.
*/
-static inline void nanddev_io_iter_init(struct nand_device *nand,
- loff_t offs, struct mtd_oob_ops *req,
- struct nand_io_iter *iter)
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
+ iter->req.type = reqtype;
iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = req->ooboffs;
@@ -632,6 +698,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
+ iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ unsigned int offs_in_eb;
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = 0;
+ iter->oobbytes_per_page = 0;
+ iter->dataleft = req->len;
+ iter->oobleft = 0;
+ iter->req.databuf.in = req->datbuf;
+ offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
+ iter->req.datalen = min_t(unsigned int,
+ nanddev_eraseblock_size(nand) - offs_in_eb,
+ iter->dataleft);
+ iter->req.oobbuf.in = NULL;
+ iter->req.ooblen = 0;
+ iter->req.continuous = true;
}
/**
@@ -658,6 +761,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
}
/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->req.dataoffs = 0;
+ iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+ iter->dataleft);
+}
+
+/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
* @iter: NAND I/O iterator
@@ -685,13 +807,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
- * Should be used for iterate over pages that are contained in an MTD request.
+ * Should be used for iterating over pages that are contained in an MTD request.
*/
-#define nanddev_io_for_each_page(nand, start, req, iter) \
- for (nanddev_io_iter_init(nand, start, req, iter); \
+#define nanddev_io_for_each_page(nand, type, start, req, iter) \
+ for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ * request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter) \
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_block(nand, iter))
+
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6fe6fd520a4..cf9b9656d05 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -26,126 +26,218 @@
* Standard SPI NAND flash operations
*/
-#define SPINAND_RESET_OP \
+#define SPINAND_RESET_1S_0_0_OP \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_WR_EN_DIS_OP(enable) \
+#define SPINAND_WR_EN_DIS_1S_0_0_OP(enable) \
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
+#define SPINAND_READID_1S_1S_1S_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
-#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+#define SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, valptr, 1))
-#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+#define SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, valptr, 1))
-#define SPINAND_BLK_ERASE_OP(addr) \
+#define SPINAND_BLK_ERASE_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_OP(addr) \
+#define SPINAND_PAGE_READ_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
- SPI_MEM_OP_ADDR(3, addr, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
- SPI_MEM_OP_ADDR(2, addr, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 2), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
-
-#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 4), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x8b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xcb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PROG_EXEC_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+#define SPINAND_PROG_LOAD_1S_1S_1S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 1))
-#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+#define SPINAND_PROG_LOAD_1S_1S_4S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 4))
+#define SPINAND_PROG_LOAD_1S_1S_8S_OP(addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x82, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
+#define SPINAND_PROG_LOAD_1S_8S_8S_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0xc2 : 0xc4, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
/**
* Standard SPI NAND flash commands
*/
@@ -175,7 +267,29 @@
struct spinand_op;
struct spinand_device;
-#define SPINAND_MAX_ID_LEN 4
+#define SPINAND_MAX_ID_LEN 5
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure
@@ -244,13 +358,17 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
+extern const struct spinand_manufacturer skyhigh_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
-extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
extern const struct spinand_manufacturer xtx_spinand_manufacturer;
/**
@@ -291,8 +409,73 @@ struct spinand_ecc_info {
const struct mtd_ooblayout_ops *ooblayout;
};
-#define SPINAND_HAS_QE_BIT BIT(0)
-#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
+#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
+#define SPINAND_NO_RAW_ACCESS BIT(4)
+
+/**
+ * struct spinand_otp_layout - structure to describe the SPI NAND OTP area
+ * @npages: number of pages in the OTP
+ * @start_page: start page of the user/factory OTP area.
+ */
+struct spinand_otp_layout {
+ unsigned int npages;
+ unsigned int start_page;
+};
+
+/**
+ * struct spinand_fact_otp_ops - SPI NAND OTP methods for factory area
+ * @info: get the OTP area information
+ * @read: read from the SPI NAND OTP area
+ */
+struct spinand_fact_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+};
+
+/**
+ * struct spinand_user_otp_ops - SPI NAND OTP methods for user area
+ * @info: get the OTP area information
+ * @lock: lock an OTP region
+ * @erase: erase an OTP region
+ * @read: read from the SPI NAND OTP area
+ * @write: write to the SPI NAND OTP area
+ */
+struct spinand_user_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*lock)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*erase)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+ int (*write)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, const u8 *buf);
+};
+
+/**
+ * struct spinand_fact_otp - SPI NAND OTP grouping structure for factory area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_fact_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_fact_otp_ops *ops;
+};
+
+/**
+ * struct spinand_user_otp - SPI NAND OTP grouping structure for user area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_user_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_user_otp_ops *ops;
+};
+
/**
* struct spinand_info - Structure used to describe SPI NAND chips
@@ -308,6 +491,12 @@ struct spinand_ecc_info {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @configure_chip: Align the chip configuration with the core settings
+ * @set_cont_read: enable/disable continuous cached reads
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: enable/disable read retry for data recovery
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -326,6 +515,14 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*configure_chip)(struct spinand_device *spinand);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+ struct spinand_fact_otp fact_otp;
+ struct spinand_user_otp user_otp;
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int read_retry);
};
#define SPINAND_ID(__method, ...) \
@@ -349,7 +546,35 @@ struct spinand_info {
}
#define SPINAND_SELECT_TARGET(__func) \
- .select_target = __func,
+ .select_target = __func
+
+#define SPINAND_CONFIGURE_CHIP(__configure_chip) \
+ .configure_chip = __configure_chip
+
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read
+
+#define SPINAND_FACT_OTP_INFO(__npages, __start_page, __ops) \
+ .fact_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_USER_OTP_INFO(__npages, __start_page, __ops) \
+ .user_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_READ_RETRY(__read_retries, __set_read_retry) \
+ .read_retries = __read_retries, \
+ .set_read_retry = __set_read_retry
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
@@ -363,6 +588,13 @@ struct spinand_info {
__VA_ARGS__ \
}
+struct spinand_dirmap {
+ struct spi_mem_dirmap_desc *wdesc;
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc_ecc;
+ struct spi_mem_dirmap_desc *rdesc_ecc;
+};
+
/**
* struct spinand_device - SPI NAND device instance
* @base: NAND device instance
@@ -387,7 +619,20 @@ struct spinand_info {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @configure_chip: Align the chip configuration with the core settings
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: Enable/disable the read retry feature
+ * @last_wait_status: status of the last wait operation that will be used in case
+ * ->get_status() is not populated by the spinand device.
*/
struct spinand_device {
struct nand_device base;
@@ -406,6 +651,8 @@ struct spinand_device {
const struct spi_mem_op *update_cache;
} op_templates;
+ struct spinand_dirmap *dirmaps;
+
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
unsigned int cur_target;
@@ -418,6 +665,20 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
+
+ u8 last_wait_status;
+
+ int (*configure_chip)(struct spinand_device *spinand);
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+
+ const struct spinand_fact_otp *fact_otp;
+ const struct spinand_user_otp *user_otp;
+
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int retry_mode);
};
/**
@@ -499,6 +760,31 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val);
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
+int spinand_write_enable_op(struct spinand_device *spinand);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s);
+
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+size_t spinand_otp_page_size(struct spinand_device *spinand);
+size_t spinand_fact_otp_size(struct spinand_device *spinand);
+size_t spinand_user_otp_size(struct spinand_device *spinand);
+
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf);
+
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand);
+
#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/spi-mem.h b/include/spi-mem.h
index 2eb05a2e5bc..36281a62f77 100644
--- a/include/spi-mem.h
+++ b/include/spi-mem.h
@@ -17,16 +17,32 @@ struct udevice;
#define SPI_MEM_OP_CMD(__opcode, __buswidth) \
{ \
+ .nbytes = 1, \
.buswidth = __buswidth, \
.opcode = __opcode, \
+ }
+
+#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \
+ { \
.nbytes = 1, \
+ .opcode = __opcode, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
{ \
.nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ .val = __val, \
+ }
+
+#define SPI_MEM_DTR_OP_ADDR(__nbytes, __val, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
.val = __val, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_NO_ADDR { }
@@ -37,22 +53,47 @@ struct udevice;
.buswidth = __buswidth, \
}
+#define SPI_MEM_DTR_OP_DUMMY(__nbytes, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
#define SPI_MEM_OP_NO_DUMMY { }
#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \
{ \
+ .buswidth = __buswidth, \
+ .dir = SPI_MEM_DATA_IN, \
+ .nbytes = __nbytes, \
+ .buf.in = __buf, \
+ }
+
+#define SPI_MEM_DTR_OP_DATA_IN(__nbytes, __buf, __buswidth) \
+ { \
.dir = SPI_MEM_DATA_IN, \
.nbytes = __nbytes, \
.buf.in = __buf, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
{ \
+ .buswidth = __buswidth, \
+ .dir = SPI_MEM_DATA_OUT, \
+ .nbytes = __nbytes, \
+ .buf.out = __buf, \
+ }
+
+#define SPI_MEM_DTR_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
+ { \
.dir = SPI_MEM_DATA_OUT, \
.nbytes = __nbytes, \
.buf.out = __buf, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_NO_DATA { }
@@ -62,7 +103,7 @@ struct udevice;
* transfer from the controller perspective
* @SPI_MEM_NO_DATA: no data transferred
* @SPI_MEM_DATA_IN: data coming from the SPI memory
- * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent to the SPI memory
*/
enum spi_mem_data_dir {
SPI_MEM_NO_DATA,
@@ -70,6 +111,9 @@ enum spi_mem_data_dir {
SPI_MEM_DATA_OUT,
};
+#define SPI_MEM_OP_MAX_FREQ(__freq) \
+ .max_freq = __freq
+
/**
* struct spi_mem_op - describes a SPI memory operation
* @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is
@@ -80,26 +124,35 @@ enum spi_mem_data_dir {
* @addr.nbytes: number of address bytes to send. Can be zero if the operation
* does not need to send an address
* @addr.buswidth: number of IO lines used to transmit the address cycles
+ * @addr.dtr: whether the address should be sent in DTR mode or not
* @addr.val: address value. This value is always sent MSB first on the bus.
* Note that only @addr.nbytes are taken into account in this
* address value, so users should make sure the value fits in the
* assigned number of bytes.
- * @addr.dtr: whether the address should be sent in DTR mode or not
* @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
* @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dtr: whether the data should be sent in DTR mode or not
+ * @data.ecc: whether error correction is required or not
+ * @data.swap16: whether the byte order of 16-bit words is swapped when read
+ * or written in Octal DTR mode compared to STR mode.
* @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ * operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
+ * @max_freq: frequency limitation wrt this operation. 0 means there is no
+ * specific constraint and the highest achievable frequency can be
+ * attempted.
*/
struct spi_mem_op {
struct {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u16 opcode;
} cmd;
@@ -107,6 +160,7 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u64 val;
} addr;
@@ -114,28 +168,35 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
} dummy;
struct {
u8 buswidth;
u8 dtr : 1;
+ u8 ecc : 1;
+ u8 swap16 : 1;
+ u8 __pad : 5;
enum spi_mem_data_dir dir;
unsigned int nbytes;
- /* buf.{in,out} must be DMA-able. */
union {
void *in;
const void *out;
} buf;
} data;
+
+ unsigned int max_freq;
};
-#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \
+#define SPI_MEM_OP(__cmd, __addr, __dummy, __data, ...) \
{ \
.cmd = __cmd, \
.addr = __addr, \
.dummy = __dummy, \
.data = __data, \
+ __VA_ARGS__ \
}
+
/**
* struct spi_mem_dirmap_info - Direct mapping information
* @op_tmpl: operation template that should be used by the direct mapping when
@@ -143,7 +204,7 @@ struct spi_mem_op {
* @offset: absolute offset this direct mapping is pointing to
* @length: length in byte of this direct mapping
*
- * This information is used by the controller specific implementation to know
+ * These information are used by the controller specific implementation to know
* the portion of memory that is directly mapped and the spi_mem_op that should
* be used to access the device.
* A direct mapping is only valid for one direction (read or write) and this
@@ -223,10 +284,12 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
/**
* struct spi_controller_mem_ops - SPI memory operations
* @adjust_op_size: shrink the data xfer of an operation to match controller's
- * limitations (can be alignment of max RX/TX size
+ * limitations (can be alignment or max RX/TX size
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @dirmap_create: create a direct mapping descriptor that can later be used to
* access the memory device. This method is optional
* @dirmap_destroy: destroy a memory descriptor previous created by
@@ -300,13 +363,16 @@ int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg);
+
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op);
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
static inline void
@@ -315,10 +381,18 @@ spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
struct sg_table *sg)
{
}
+
+static inline
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ return false;
+}
#endif /* CONFIG_SPI_MEM */
#endif /* __UBOOT__ */
int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op);
+u64 spi_mem_calc_op_duration(struct spi_mem_op *op);
bool spi_mem_supports_op(struct spi_slave *slave, const struct spi_mem_op *op);
bool spi_mem_dtr_supports_op(struct spi_slave *slave,
@@ -337,7 +411,6 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf);
ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
-
#ifndef __UBOOT__
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);