diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-04 15:50:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-04 15:50:37 -0700 |
commit | cc07b0a3afc8c15c1308497033453b44f7ccfc49 (patch) | |
tree | 6619681c2eea5e60db6990f8810bc473b2cc9bf3 | |
parent | 20f868da2cc6c2d53e8a3c7a2a4d1edf5c730eae (diff) | |
parent | efebdf4b722143dc5073cee21276baf1673d451e (diff) |
Merge tag 'mtd/for-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
Pull MTD updates from Miquel Raynal:
"MTD core:
- Bad blocks increment is skipped if the block is already known bad
(improves user statistics relevance)
- Expose the OOB layout via debugfs
Raw NAND:
- Add support for Loongson-2K1000 and Loongson-2K0500 NAND
controllers, including extra features, such as chip select
and 6-byte NAND ID reading support
- Drop the s3c2410 driver
SPI NAND:
- Important SPI NAND continuous read improvements and fixes
- Add support for FudanMicro FM25S01A
- Add support for continuous reads in Gigadevice vendor driver
ECC:
- Add support for the Realtek ECC engine
SPI NOR:
- Some flashes can't perform reads or writes with start or end being
an odd number in Octal DTR mode. File systems like UBIFS can
request such reads or writes, causing the transaction to error out.
Pad the read or write transactions with extra bytes to avoid this
problem.
And the usual amount of various miscellaneous fixes"
* tag 'mtd/for-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (46 commits)
mtd: rawnand: sunxi: drop unused module alias
mtd: rawnand: stm32_fmc2: drop unused module alias
mtd: rawnand: rockchip: drop unused module alias
mtd: rawnand: pl353: drop unused module alias
mtd: rawnand: omap2: drop unused module alias
mtd: rawnand: atmel: drop unused module alias
mtd: onenand: omap2: drop unused module alias
mtd: hyperbus: hbmc-am654: drop unused module alias
mtd: jedec_probe: use struct_size() helper for cfiq allocation
mtd: cfi: use struct_size() helper for cfiq allocation
mtd: nand: raw: gpmi: fix clocks when CONFIG_PM=N
mtd: rawnand: omap2: fix device leak on probe failure
mtd: rawnand: atmel: Fix error handling path in atmel_nand_controller_add_nands
mtd: nand: realtek-ecc: Add Realtek external ECC engine support
dt-bindings: mtd: Add realtek,rtl9301-ecc
mtd: spinand: repeat reading in regular mode if continuous reading fails
mtd: spinand: try a regular dirmap if creating a dirmap for continuous reading fails
mtd: spinand: fix direct mapping creation sizes
mtd: rawnand: fsmc: Default to autodetect buswidth
mtd: nand: move nand_check_erased_ecc_chunk() to nand/core
...
49 files changed, 2257 insertions, 2457 deletions
diff --git a/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml b/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml index a09e92e416c4..cf85d0cede00 100644 --- a/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml +++ b/Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml @@ -4,13 +4,14 @@ $id: http://devicetree.org/schemas/mtd/loongson,ls1b-nand-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Loongson-1 NAND Controller +title: Loongson NAND Controller maintainers: - Keguang Zhang <keguang.zhang@gmail.com> + - Binbin Zhou <zhoubinbin@loongson.cn> description: - The Loongson-1 NAND controller abstracts all supported operations, + The Loongson NAND controller abstracts all supported operations, meaning it does not support low-level access to raw NAND flash chips. Moreover, the controller is paired with the DMA engine to perform READ and PROGRAM functions. @@ -24,18 +25,23 @@ properties: - enum: - loongson,ls1b-nand-controller - loongson,ls1c-nand-controller + - loongson,ls2k0500-nand-controller + - loongson,ls2k1000-nand-controller - items: - enum: - loongson,ls1a-nand-controller - const: loongson,ls1b-nand-controller reg: - maxItems: 2 + minItems: 2 + maxItems: 3 reg-names: + minItems: 2 items: - const: nand - const: nand-dma + - const: dma-config dmas: maxItems: 1 @@ -52,6 +58,27 @@ required: unevaluatedProperties: false +if: + properties: + compatible: + contains: + enum: + - loongson,ls2k1000-nand-controller + +then: + properties: + reg: + minItems: 3 + reg-names: + minItems: 3 + +else: + properties: + reg: + maxItems: 2 + reg-names: + maxItems: 2 + examples: - | nand-controller@1fe78000 { @@ -70,3 +97,26 @@ examples: nand-ecc-algo = "hamming"; }; }; + + - | + nand-controller@1fe26000 { + compatible = "loongson,ls2k1000-nand-controller"; + reg = <0x1fe26000 0x24>, + <0x1fe26040 0x4>, + <0x1fe00438 0x8>; + reg-names = "nand", "nand-dma", "dma-config"; + dmas = <&apbdma0 0>; + dma-names = "rxtx"; + + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + label = "ls2k1000-nand"; + nand-use-soft-ecc-engine; + nand-ecc-algo = "bch"; + nand-ecc-strength = <8>; + nand-ecc-step-size = <512>; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/realtek,rtl9301-ecc.yaml b/Documentation/devicetree/bindings/mtd/realtek,rtl9301-ecc.yaml new file mode 100644 index 000000000000..55b35c3db0ff --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/realtek,rtl9301-ecc.yaml @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/realtek,rtl9301-ecc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek SoCs NAND ECC engine + +maintainers: + - Markus Stockhausen <markus.stockhausen@gmx.de> + +properties: + compatible: + const: realtek,rtl9301-ecc + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + soc { + #address-cells = <1>; + #size-cells = <1>; + + ecc0: ecc@1a600 { + compatible = "realtek,rtl9301-ecc"; + reg = <0x1a600 0x54>; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt b/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt deleted file mode 100644 index 635455350660..000000000000 --- a/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt +++ /dev/null @@ -1,56 +0,0 @@ -* Samsung S3C2410 and compatible NAND flash controller - -Required properties: -- compatible : The possible values are: - "samsung,s3c2410-nand" - "samsung,s3c2412-nand" - "samsung,s3c2440-nand" -- reg : register's location and length. -- #address-cells, #size-cells : see nand-controller.yaml -- clocks : phandle to the nand controller clock -- clock-names : must contain "nand" - -Optional child nodes: -Child nodes representing the available nand chips. - -Optional child properties: -- nand-ecc-mode : see nand-controller.yaml -- nand-on-flash-bbt : see nand-controller.yaml - -Each child device node may optionally contain a 'partitions' sub-node, -which further contains sub-nodes describing the flash partition mapping. -See mtd.yaml for more detail. - -Example: - -nand-controller@4e000000 { - compatible = "samsung,s3c2440-nand"; - reg = <0x4e000000 0x40>; - - #address-cells = <1>; - #size-cells = <0>; - - clocks = <&clocks HCLK_NAND>; - clock-names = "nand"; - - nand { - nand-ecc-mode = "soft"; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0 0x040000>; - }; - - partition@40000 { - label = "kernel"; - reg = <0x040000 0x500000>; - }; - }; - }; -}; diff --git a/MAINTAINERS b/MAINTAINERS index 439cf09de532..6da623f6e8a4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17241,7 +17241,7 @@ F: Documentation/devicetree/bindings/*/loongson,ls1*.yaml F: arch/mips/include/asm/mach-loongson32/ F: arch/mips/loongson32/ F: drivers/*/*loongson1* -F: drivers/mtd/nand/raw/loongson1-nand-controller.c +F: drivers/mtd/nand/raw/loongson-nand-controller.c F: drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c F: sound/soc/loongson/loongson1_ac97.c diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index a04b6174181c..e254f9cd2796 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -208,7 +208,7 @@ static int __xipram cfi_chip_setup(struct map_info *map, if (!num_erase_regions) return 0; - cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); + cfi->cfiq = kmalloc(struct_size(cfi->cfiq, EraseRegionInfo, num_erase_regions), GFP_KERNEL); if (!cfi->cfiq) return 0; diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 23c32fe584b7..b285962eee2a 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1953,7 +1953,7 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) * as they will ignore the writes and don't care what address * the F0 is written to */ if (cfi->addr_unlock1) { - pr_debug( "reset unlock called %x %x \n", + pr_debug("reset unlock called %x %x\n", cfi->addr_unlock1,cfi->addr_unlock2); cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); @@ -1985,7 +1985,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in num_erase_regions = jedec_table[index].nr_regions; - cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); + cfi->cfiq = kmalloc(struct_size(cfi->cfiq, EraseRegionInfo, num_erase_regions), GFP_KERNEL); if (!cfi->cfiq) { //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); return 0; diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index f2bd1984609c..59a901549257 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c @@ -263,7 +263,7 @@ static int build_maps(partition_t *part) /* Set up virtual page map */ blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; - part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t))); + part->VirtualBlockMap = vmalloc_array(blocks, sizeof(uint32_t)); if (!part->VirtualBlockMap) goto out_XferInfo; diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c index 82a1e7b7e4d8..9d31464046b2 100644 --- a/drivers/mtd/hyperbus/hbmc-am654.c +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -272,5 +272,4 @@ module_platform_driver(am654_hbmc_platform_driver); MODULE_DESCRIPTION("HBMC driver for AM654 SoC"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:hbmc-am654"); MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index 14e36ae71958..290fd0119e98 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c @@ -142,7 +142,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip, if (dsr & DSR_READY_STATUS) break; if (!timeo) { - printk(KERN_ERR "%s: Flash timeout error state %d \n", + printk(KERN_ERR "%s: Flash timeout error state %d\n", map->name, chip_state); ret = -ETIME; break; @@ -186,7 +186,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip, if (dsr & DSR_ERR) { /* Clear DSR*/ map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR); - printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n", + printk(KERN_WARNING"%s: Bad status on wait: 0x%x\n", map->name, dsr); print_drs_error(dsr); ret = -EIO; @@ -321,7 +321,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode) /* Resume and pretend we weren't here. */ put_chip(map, chip); printk(KERN_ERR "%s: suspend operation failed." - "State may be wrong \n", map->name); + "State may be wrong\n", map->name); return -EIO; } chip->erase_suspended = 1; @@ -468,7 +468,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip, chip->state = FL_WRITING; ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime)); if (ret) { - printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n", + printk(KERN_WARNING"%s Buffer program error: %d at %lx\n", map->name, ret, adr); goto out; } @@ -736,7 +736,7 @@ static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) ret = wait_for_ready(map, chip, 1); if (ret) { - printk(KERN_ERR "%s: block unlock error status %d \n", + printk(KERN_ERR "%s: block unlock error status %d\n", map->name, ret); goto out; } diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c index 137ae5f0a19b..42281e460c62 100644 --- a/drivers/mtd/lpddr/qinfo_probe.c +++ b/drivers/mtd/lpddr/qinfo_probe.c @@ -55,7 +55,7 @@ static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) return minor | (major << bankwidth); } } - printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name); + printk(KERN_ERR"%s qinfo id string is wrong!\n", map->name); BUG(); return -1; } @@ -112,7 +112,7 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr) return 1; /* "PFOW" is found */ out: - printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n", + printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found\n", map->name, map->pfow_base); return 0; } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5ba9a741f5ac..64808493b4f5 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -384,14 +384,64 @@ EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode); static struct dentry *dfs_dir_mtd; +static int mtd_ooblayout_show(struct seq_file *s, void *p, + int (*iter)(struct mtd_info *, int section, + struct mtd_oob_region *region)) +{ + struct mtd_info *mtd = s->private; + int section; + + for (section = 0;; section++) { + struct mtd_oob_region region; + int err; + + err = iter(mtd, section, ®ion); + if (err) { + if (err == -ERANGE) + break; + + return err; + } + + seq_printf(s, "%-3d %4u %4u\n", section, region.offset, + region.length); + } + + return 0; +} + +static int mtd_ooblayout_ecc_show(struct seq_file *s, void *p) +{ + return mtd_ooblayout_show(s, p, mtd_ooblayout_ecc); +} +DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_ecc); + +static int mtd_ooblayout_free_show(struct seq_file *s, void *p) +{ + return mtd_ooblayout_show(s, p, mtd_ooblayout_free); +} +DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_free); + static void mtd_debugfs_populate(struct mtd_info *mtd) { struct device *dev = &mtd->dev; + struct mtd_oob_region region; if (IS_ERR_OR_NULL(dfs_dir_mtd)) return; mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); + if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir)) + return; + + /* Create ooblayout files only if at least one region is present. */ + if (mtd_ooblayout_ecc(mtd, 0, ®ion) == 0) + debugfs_create_file("ooblayout_ecc", 0444, mtd->dbg.dfs_dir, + mtd, &mtd_ooblayout_ecc_fops); + + if (mtd_ooblayout_free(mtd, 0, ®ion) == 0) + debugfs_create_file("ooblayout_free", 0444, mtd->dbg.dfs_dir, + mtd, &mtd_ooblayout_free_fops); } #ifndef CONFIG_MMU @@ -2339,6 +2389,7 @@ EXPORT_SYMBOL_GPL(mtd_block_isbad); int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_info *master = mtd_get_master(mtd); + loff_t moffs; int ret; if (!master->_block_markbad) @@ -2351,7 +2402,15 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; - ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); + moffs = mtd_get_master_ofs(mtd, ofs); + + if (master->_block_isbad) { + ret = master->_block_isbad(master, moffs); + if (ret > 0) + return 0; + } + + ret = master->_block_markbad(master, moffs); if (ret) return ret; diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 7bf3777e1f13..b88083751a0c 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -356,9 +356,8 @@ static void mtdoops_notify_add(struct mtd_info *mtd) /* oops_page_used is a bit field */ cxt->oops_page_used = - vmalloc(array_size(sizeof(unsigned long), - DIV_ROUND_UP(mtdoops_pages, - BITS_PER_LONG))); + vmalloc_array(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG), + sizeof(unsigned long)); if (!cxt->oops_page_used) { pr_err("could not allocate page array\n"); return; diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index 680366616da2..d8f2e5be2d31 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -1285,11 +1285,11 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, for (i = 0; i < MTDSWAP_TREE_CNT; i++) d->trees[i].root = RB_ROOT; - d->page_data = vmalloc(array_size(pages, sizeof(int))); + d->page_data = vmalloc_array(pages, sizeof(int)); if (!d->page_data) goto page_data_fail; - d->revmap = vmalloc(array_size(blocks, sizeof(int))); + d->revmap = vmalloc_array(blocks, sizeof(int)); if (!d->revmap) goto revmap_fail; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b0c2c95f10c..4a17271076bc 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -61,6 +61,14 @@ config MTD_NAND_ECC_MEDIATEK help This enables support for the hardware ECC engine from Mediatek. +config MTD_NAND_ECC_REALTEK + tristate "Realtek RTL93xx hardware ECC engine" + depends on HAS_IOMEM + depends on MACH_REALTEK_RTL || COMPILE_TEST + select MTD_NAND_ECC + help + This enables support for the hardware ECC engine from Realtek. + endmenu endmenu diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 44913ff1bf12..2e0e56267718 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -3,6 +3,7 @@ nandcore-objs := core.o bbt.o obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o +obj-$(CONFIG_MTD_NAND_ECC_REALTEK) += ecc-realtek.o obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o obj-y += onenand/ diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index 7737b1a4a177..3e76d127715f 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -13,6 +13,137 @@ #include <linux/mtd/nand.h> /** + * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data + * @buf: buffer to test + * @len: buffer length + * @bitflips_threshold: maximum number of bitflips + * + * Check if a buffer contains only 0xff, which means the underlying region + * has been erased and is ready to be programmed. + * The bitflips_threshold specify the maximum number of bitflips before + * considering the region is not erased. + * Note: The logic of this function has been extracted from the memweight + * implementation, except that nand_check_erased_buf function exit before + * testing the whole buffer if the number of bitflips exceed the + * bitflips_threshold value. + * + * Returns a positive number of bitflips less than or equal to + * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the + * threshold. + */ +static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) +{ + const unsigned char *bitmap = buf; + int bitflips = 0; + int weight; + + for (; len && ((uintptr_t)bitmap) % sizeof(long); + len--, bitmap++) { + weight = hweight8(*bitmap); + bitflips += BITS_PER_BYTE - weight; + if (unlikely(bitflips > bitflips_threshold)) + return -EBADMSG; + } + + for (; len >= sizeof(long); + len -= sizeof(long), bitmap += sizeof(long)) { + unsigned long d = *((unsigned long *)bitmap); + if (d == ~0UL) + continue; + weight = hweight_long(d); + bitflips += BITS_PER_LONG - weight; + if (unlikely(bitflips > bitflips_threshold)) + return -EBADMSG; + } + + for (; len > 0; len--, bitmap++) { + weight = hweight8(*bitmap); + bitflips += BITS_PER_BYTE - weight; + if (unlikely(bitflips > bitflips_threshold)) + return -EBADMSG; + } + + return bitflips; +} + +/** + * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only + * 0xff data + * @data: data buffer to test + * @datalen: data length + * @ecc: ECC buffer + * @ecclen: ECC length + * @extraoob: extra OOB buffer + * @extraooblen: extra OOB length + * @bitflips_threshold: maximum number of bitflips + * + * Check if a data buffer and its associated ECC and OOB data contains only + * 0xff pattern, which means the underlying region has been erased and is + * ready to be programmed. + * The bitflips_threshold specify the maximum number of bitflips before + * considering the region as not erased. + * + * Note: + * 1/ ECC algorithms are working on pre-defined block sizes which are usually + * different from the NAND page size. When fixing bitflips, ECC engines will + * report the number of errors per chunk, and the NAND core infrastructure + * expect you to return the maximum number of bitflips for the whole page. + * This is why you should always use this function on a single chunk and + * not on the whole page. After checking each chunk you should update your + * max_bitflips value accordingly. + * 2/ When checking for bitflips in erased pages you should not only check + * the payload data but also their associated ECC data, because a user might + * have programmed almost all bits to 1 but a few. In this case, we + * shouldn't consider the chunk as erased, and checking ECC bytes prevent + * this case. + * 3/ The extraoob argument is optional, and should be used if some of your OOB + * data are protected by the ECC engine. + * It could also be used if you support subpages and want to attach some + * extra OOB data to an ECC chunk. + * + * Returns a positive number of bitflips less than or equal to + * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the + * threshold. In case of success, the passed buffers are filled with 0xff. + */ +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int bitflips_threshold) +{ + int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; + + data_bitflips = nand_check_erased_buf(data, datalen, + bitflips_threshold); + if (data_bitflips < 0) + return data_bitflips; + + bitflips_threshold -= data_bitflips; + + ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); + if (ecc_bitflips < 0) + return ecc_bitflips; + + bitflips_threshold -= ecc_bitflips; + + extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, + bitflips_threshold); + if (extraoob_bitflips < 0) + return extraoob_bitflips; + + if (data_bitflips) + memset(data, 0xff, datalen); + + if (ecc_bitflips) + memset(ecc, 0xff, ecclen); + + if (extraoob_bitflips) + memset(extraoob, 0xff, extraooblen); + + return data_bitflips + ecc_bitflips + extraoob_bitflips; +} +EXPORT_SYMBOL(nand_check_erased_ecc_chunk); + +/** * nanddev_isbad() - Check if a block is bad * @nand: NAND device * @pos: position pointing to the block we want to check diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 1bf9a5a64b87..60cdcb4175ef 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -322,14 +322,14 @@ static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev) sg_init_table(ctx->sg, 2); /* Configuration dump and sanity checks */ - dev_err(dev, "DPE version number: %d\n", + dev_dbg(dev, "DPE version number: %d\n", readl(mxic->regs + DP_VER) >> DP_VER_OFFSET); - dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE)); - dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE)); - dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg)); - dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg)); - dev_err(dev, "Parity size: %d\n", ctx->parity_sz); - dev_err(dev, "Meta size: %d\n", ctx->meta_sz); + dev_dbg(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE)); + dev_dbg(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE)); + dev_dbg(dev, "Spare size: %d\n", SPARE_SZ(spare_reg)); + dev_dbg(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg)); + dev_dbg(dev, "Parity size: %d\n", ctx->parity_sz); + dev_dbg(dev, "Meta size: %d\n", ctx->meta_sz); if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) != SPARE_SZ(spare_reg)) { diff --git a/drivers/mtd/nand/ecc-realtek.c b/drivers/mtd/nand/ecc-realtek.c new file mode 100644 index 000000000000..7d718934c909 --- /dev/null +++ b/drivers/mtd/nand/ecc-realtek.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for Realtek hardware ECC engine in RTL93xx SoCs + */ + +#include <linux/bitfield.h> +#include <linux/dma-mapping.h> +#include <linux/mtd/nand.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +/* + * The Realtek ECC engine has two operation modes. + * + * - BCH6 : Generate 10 ECC bytes from 512 data bytes plus 6 free bytes + * - BCH12 : Generate 20 ECC bytes from 512 data bytes plus 6 free bytes + * + * It can run for arbitrary NAND flash chips with different block and OOB sizes. Currently there + * are only two known devices in the wild that have NAND flash and make use of this ECC engine + * (Linksys LGS328C & LGS352C). To keep compatibility with vendor firmware, new modes can only + * be added when new data layouts have been analyzed. For now allow BCH6 on flash with 2048 byte + * blocks and 64 bytes oob. + * + * This driver aligns with kernel ECC naming conventions. Neverthless a short notice on the + * Realtek naming conventions for the different structures in the OOB area. + * + * - BBI : Bad block indicator. The first two bytes of OOB. Protected by ECC! + * - tag : 6 User/free bytes. First tag "contains" 2 bytes BBI. Protected by ECC! + * - syndrome : ECC/parity bytes + * + * Altogether this gives currently the following block layout. + * + * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+ + * | 512 | 512 | 512 | 512 | 2 | 4 | 6 | 6 | 6 | 10 | 10 | 10 | 10 | + * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+ + * | data | data | data | data | BBI | free | free | free | free | ECC | ECC | ECC | ECC | + * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+ + */ + +#define RTL_ECC_ALLOWED_PAGE_SIZE 2048 +#define RTL_ECC_ALLOWED_OOB_SIZE 64 +#define RTL_ECC_ALLOWED_STRENGTH 6 + +#define RTL_ECC_BLOCK_SIZE 512 +#define RTL_ECC_FREE_SIZE 6 +#define RTL_ECC_PARITY_SIZE_BCH6 10 +#define RTL_ECC_PARITY_SIZE_BCH12 20 + +/* + * The engine is fed with two DMA regions. One for data (always 512 bytes) and one for free bytes + * and parity (either 16 bytes for BCH6 or 26 bytes for BCH12). Start and length of each must be + * aligned to a multiple of 4. + */ + +#define RTL_ECC_DMA_FREE_PARITY_SIZE ALIGN(RTL_ECC_FREE_SIZE + RTL_ECC_PARITY_SIZE_BCH12, 4) +#define RTL_ECC_DMA_SIZE (RTL_ECC_BLOCK_SIZE + RTL_ECC_DMA_FREE_PARITY_SIZE) + +#define RTL_ECC_CFG 0x00 +#define RTL_ECC_BCH6 0 +#define RTL_ECC_BCH12 BIT(28) +#define RTL_ECC_DMA_PRECISE BIT(12) +#define RTL_ECC_BURST_128 GENMASK(1, 0) +#define RTL_ECC_DMA_TRIGGER 0x08 +#define RTL_ECC_OP_DECODE 0 +#define RTL_ECC_OP_ENCODE BIT(0) +#define RTL_ECC_DMA_START 0x0c +#define RTL_ECC_DMA_TAG 0x10 +#define RTL_ECC_STATUS 0x14 +#define RTL_ECC_CORR_COUNT GENMASK(19, 12) +#define RTL_ECC_RESULT BIT(8) +#define RTL_ECC_ALL_ONE BIT(4) +#define RTL_ECC_OP_STATUS BIT(0) + +struct rtl_ecc_engine { + struct device *dev; + struct nand_ecc_engine engine; + struct mutex lock; + char *buf; + dma_addr_t buf_dma; + struct regmap *regmap; +}; + +struct rtl_ecc_ctx { + struct rtl_ecc_engine * rtlc; + struct nand_ecc_req_tweak_ctx req_ctx; + int steps; + int bch_mode; + int strength; + int parity_size; +}; + +static const struct regmap_config rtl_ecc_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static inline void *nand_to_ctx(struct nand_device *nand) +{ + return nand->ecc.ctx.priv; +} + +static inline struct rtl_ecc_engine *nand_to_rtlc(struct nand_device *nand) +{ + struct nand_ecc_engine *eng = nand->ecc.engine; + + return container_of(eng, struct rtl_ecc_engine, engine); +} + +static int rtl_ecc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); + + if (section < 0 || section >= ctx->steps) + return -ERANGE; + + oobregion->offset = ctx->steps * RTL_ECC_FREE_SIZE + section * ctx->parity_size; + oobregion->length = ctx->parity_size; + + return 0; +} + +static int rtl_ecc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); + int bbm; + + if (section < 0 || section >= ctx->steps) + return -ERANGE; + + /* reserve 2 BBM bytes in first block */ + bbm = section ? 0 : 2; + oobregion->offset = section * RTL_ECC_FREE_SIZE + bbm; + oobregion->length = RTL_ECC_FREE_SIZE - bbm; + + return 0; +} + +static const struct mtd_ooblayout_ops rtl_ecc_ooblayout_ops = { + .ecc = rtl_ecc_ooblayout_ecc, + .free = rtl_ecc_ooblayout_free, +}; + +static void rtl_ecc_kick_engine(struct rtl_ecc_ctx *ctx, int operation) +{ + struct rtl_ecc_engine *rtlc = ctx->rtlc; + + regmap_write(rtlc->regmap, RTL_ECC_CFG, + ctx->bch_mode | RTL_ECC_BURST_128 | RTL_ECC_DMA_PRECISE); + + regmap_write(rtlc->regmap, RTL_ECC_DMA_START, rtlc->buf_dma); + regmap_write(rtlc->regmap, RTL_ECC_DMA_TAG, rtlc->buf_dma + RTL_ECC_BLOCK_SIZE); + regmap_write(rtlc->regmap, RTL_ECC_DMA_TRIGGER, operation); +} + +static int rtl_ecc_wait_for_engine(struct rtl_ecc_ctx *ctx) +{ + struct rtl_ecc_engine *rtlc = ctx->rtlc; + int ret, status, bitflips; + bool all_one; + + /* + * The ECC engine needs 6-8 us to encode/decode a BCH6 syndrome for 512 bytes of data + * and 6 free bytes. In case the NAND area has been erased and all data and oob is + * set to 0xff, decoding takes 30us (reason unknown). Although the engine can trigger + * interrupts when finished, use active polling for now. 12 us maximum wait time has + * proven to be a good tradeoff between performance and overhead. + */ + + ret = regmap_read_poll_timeout(rtlc->regmap, RTL_ECC_STATUS, status, + !(status & RTL_ECC_OP_STATUS), 12, 1000000); + if (ret) + return ret; + + ret = FIELD_GET(RTL_ECC_RESULT, status); + all_one = FIELD_GET(RTL_ECC_ALL_ONE, status); + bitflips = FIELD_GET(RTL_ECC_CORR_COUNT, status); + + /* For erased blocks (all bits one) error status can be ignored */ + if (all_one) + ret = 0; + + return ret ? -EBADMSG : bitflips; +} + +static int rtl_ecc_run_engine(struct rtl_ecc_ctx *ctx, char *data, char *free, + char *parity, int operation) +{ + struct rtl_ecc_engine *rtlc = ctx->rtlc; + char *buf_parity = rtlc->buf + RTL_ECC_BLOCK_SIZE + RTL_ECC_FREE_SIZE; + char *buf_free = rtlc->buf + RTL_ECC_BLOCK_SIZE; + char *buf_data = rtlc->buf; + int ret; + + mutex_lock(&rtlc->lock); + + memcpy(buf_data, data, RTL_ECC_BLOCK_SIZE); + memcpy(buf_free, free, RTL_ECC_FREE_SIZE); + memcpy(buf_parity, parity, ctx->parity_size); + + dma_sync_single_for_device(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_TO_DEVICE); + rtl_ecc_kick_engine(ctx, operation); + ret = rtl_ecc_wait_for_engine(ctx); + dma_sync_single_for_cpu(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_FROM_DEVICE); + + if (ret >= 0) { + memcpy(data, buf_data, RTL_ECC_BLOCK_SIZE); + memcpy(free, buf_free, RTL_ECC_FREE_SIZE); + memcpy(parity, buf_parity, ctx->parity_size); + } + + mutex_unlock(&rtlc->lock); + + return ret; +} + +static int rtl_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req) +{ + struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand); + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); + char *data, *free, *parity; + int ret = 0; + + if (req->mode == MTD_OPS_RAW) + return 0; + + nand_ecc_tweak_req(&ctx->req_ctx, req); + + if (req->type == NAND_PAGE_READ) + return 0; + + free = req->oobbuf.in; + data = req->databuf.in; + parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE; + + for (int i = 0; i < ctx->steps; i++) { + ret |= rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_ENCODE); + + free += RTL_ECC_FREE_SIZE; + data += RTL_ECC_BLOCK_SIZE; + parity += ctx->parity_size; + } + + if (unlikely(ret)) + dev_dbg(rtlc->dev, "ECC calculation failed\n"); + + return ret ? -EBADMSG : 0; +} + +static int rtl_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req) +{ + struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand); + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + char *data, *free, *parity; + bool failure = false; + int bitflips = 0; + + if (req->mode == MTD_OPS_RAW) + return 0; + + if (req->type == NAND_PAGE_WRITE) { + nand_ecc_restore_req(&ctx->req_ctx, req); + return 0; + } + + free = req->oobbuf.in; + data = req->databuf.in; + parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE; + + for (int i = 0 ; i < ctx->steps; i++) { + int ret = rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_DECODE); + + if (unlikely(ret < 0)) + /* ECC totally fails for bitflips in erased blocks */ + ret = nand_check_erased_ecc_chunk(data, RTL_ECC_BLOCK_SIZE, + parity, ctx->parity_size, + free, RTL_ECC_FREE_SIZE, + ctx->strength); + if (unlikely(ret < 0)) { + failure = true; + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += ret; + bitflips = max_t(unsigned int, bitflips, ret); + } + + free += RTL_ECC_FREE_SIZE; + data += RTL_ECC_BLOCK_SIZE; + parity += ctx->parity_size; + } + + nand_ecc_restore_req(&ctx->req_ctx, req); + + if (unlikely(failure)) + dev_dbg(rtlc->dev, "ECC correction failed\n"); + else if (unlikely(bitflips > 2)) + dev_dbg(rtlc->dev, "%d bitflips detected\n", bitflips); + + return failure ? -EBADMSG : bitflips; +} + +static int rtl_ecc_check_support(struct nand_device *nand) +{ + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct device *dev = nand->ecc.engine->dev; + + if (mtd->oobsize != RTL_ECC_ALLOWED_OOB_SIZE || + mtd->writesize != RTL_ECC_ALLOWED_PAGE_SIZE) { + dev_err(dev, "only flash geometry data=%d, oob=%d supported\n", + RTL_ECC_ALLOWED_PAGE_SIZE, RTL_ECC_ALLOWED_OOB_SIZE); + return -EINVAL; + } + + if (nand->ecc.user_conf.algo != NAND_ECC_ALGO_BCH || + nand->ecc.user_conf.strength != RTL_ECC_ALLOWED_STRENGTH || + nand->ecc.user_conf.placement != NAND_ECC_PLACEMENT_OOB || + nand->ecc.user_conf.step_size != RTL_ECC_BLOCK_SIZE) { + dev_err(dev, "only algo=bch, strength=%d, placement=oob, step=%d supported\n", + RTL_ECC_ALLOWED_STRENGTH, RTL_ECC_BLOCK_SIZE); + return -EINVAL; + } + + return 0; +} + +static int rtl_ecc_init_ctx(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + int strength = nand->ecc.user_conf.strength; + struct device *dev = nand->ecc.engine->dev; + struct rtl_ecc_ctx *ctx; + int ret; + + ret = rtl_ecc_check_support(nand); + if (ret) + return ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + nand->ecc.ctx.priv = ctx; + mtd_set_ooblayout(mtd, &rtl_ecc_ooblayout_ops); + + conf->algo = NAND_ECC_ALGO_BCH; + conf->strength = strength; + conf->step_size = RTL_ECC_BLOCK_SIZE; + conf->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + + ctx->rtlc = rtlc; + ctx->steps = mtd->writesize / RTL_ECC_BLOCK_SIZE; + ctx->strength = strength; + ctx->bch_mode = strength == 6 ? RTL_ECC_BCH6 : RTL_ECC_BCH12; + ctx->parity_size = strength == 6 ? RTL_ECC_PARITY_SIZE_BCH6 : RTL_ECC_PARITY_SIZE_BCH12; + + ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand); + if (ret) + return ret; + + dev_dbg(dev, "using bch%d with geometry data=%dx%d, free=%dx6, parity=%dx%d", + conf->strength, ctx->steps, conf->step_size, + ctx->steps, ctx->steps, ctx->parity_size); + + return 0; +} + +static void rtl_ecc_cleanup_ctx(struct nand_device *nand) +{ + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); + + if (ctx) + nand_ecc_cleanup_req_tweaking(&ctx->req_ctx); +} + +static struct nand_ecc_engine_ops rtl_ecc_engine_ops = { + .init_ctx = rtl_ecc_init_ctx, + .cleanup_ctx = rtl_ecc_cleanup_ctx, + .prepare_io_req = rtl_ecc_prepare_io_req, + .finish_io_req = rtl_ecc_finish_io_req, +}; + +static int rtl_ecc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rtl_ecc_engine *rtlc; + void __iomem *base; + int ret; + + rtlc = devm_kzalloc(dev, sizeof(*rtlc), GFP_KERNEL); + if (!rtlc) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + ret = devm_mutex_init(dev, &rtlc->lock); + if (ret) + return ret; + + rtlc->regmap = devm_regmap_init_mmio(dev, base, &rtl_ecc_regmap_config); + if (IS_ERR(rtlc->regmap)) + return PTR_ERR(rtlc->regmap); + + /* + * Focus on simplicity and use a preallocated DMA buffer for data exchange with the + * engine. For now make it a noncoherent memory model as invalidating/flushing caches + * is faster than reading/writing uncached memory on the known architectures. + */ + + rtlc->buf = dma_alloc_noncoherent(dev, RTL_ECC_DMA_SIZE, &rtlc->buf_dma, + DMA_BIDIRECTIONAL, GFP_KERNEL); + if (IS_ERR(rtlc->buf)) + return PTR_ERR(rtlc->buf); + + rtlc->dev = dev; + rtlc->engine.dev = dev; + rtlc->engine.ops = &rtl_ecc_engine_ops; + rtlc->engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL; + + nand_ecc_register_on_host_hw_engine(&rtlc->engine); + + platform_set_drvdata(pdev, rtlc); + + return 0; +} + +static void rtl_ecc_remove(struct platform_device *pdev) +{ + struct rtl_ecc_engine *rtlc = platform_get_drvdata(pdev); + + nand_ecc_unregister_on_host_hw_engine(&rtlc->engine); + dma_free_noncoherent(rtlc->dev, RTL_ECC_DMA_SIZE, rtlc->buf, rtlc->buf_dma, + DMA_BIDIRECTIONAL); +} + +static const struct of_device_id rtl_ecc_of_ids[] = { + { + .compatible = "realtek,rtl9301-ecc", + }, + { /* sentinel */ }, +}; + +static struct platform_driver rtl_ecc_driver = { + .driver = { + .name = "rtl-nand-ecc-engine", + .of_match_table = rtl_ecc_of_ids, + }, + .probe = rtl_ecc_probe, + .remove = rtl_ecc_remove, +}; +module_platform_driver(rtl_ecc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Markus Stockhausen <markus.stockhausen@gmx.de>"); +MODULE_DESCRIPTION("Realtek NAND hardware ECC controller"); diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c index 8f996e8d61b8..6ccdff3fc913 100644 --- a/drivers/mtd/nand/ecc.c +++ b/drivers/mtd/nand/ecc.c @@ -552,7 +552,7 @@ void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx, memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size); } - /* Copy the data that must be writen in the bounce buffers, if needed */ + /* Copy the data that must be written in the bounce buffers, if needed */ if (orig->type == NAND_PAGE_WRITE) { if (ctx->bounce_data) memcpy((void *)tweak->databuf.out + orig->dataoffs, diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c index f9a386b69050..0793251ada3b 100644 --- a/drivers/mtd/nand/onenand/onenand_omap2.c +++ b/drivers/mtd/nand/onenand/onenand_omap2.c @@ -603,7 +603,6 @@ static struct platform_driver omap2_onenand_driver = { module_platform_driver(omap2_onenand_driver); -MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c index 8e604cc22ca3..db6c46a6fe01 100644 --- a/drivers/mtd/nand/qpic_common.c +++ b/drivers/mtd/nand/qpic_common.c @@ -89,10 +89,8 @@ void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc) memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions)); bam_txn->last_data_desc = NULL; - sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * - QPIC_PER_CW_CMD_SGL); - sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * - QPIC_PER_CW_DATA_SGL); + sg_init_table(bam_txn->cmd_sgl, bam_txn->cmd_sgl_nitems); + sg_init_table(bam_txn->data_sgl, bam_txn->data_sgl_nitems); reinit_completion(&bam_txn->txn_done); } diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 4b99d9c422c3..7408f34f0c68 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -77,32 +77,6 @@ config MTD_NAND_NDFC help NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs -config MTD_NAND_S3C2410 - tristate "Samsung S3C NAND controller" - depends on ARCH_S3C64XX - help - This enables the NAND flash controller on the S3C24xx and S3C64xx - SoCs - - No board specific support is done by this driver, each board - must advertise a platform_device for the driver to attach. - -config MTD_NAND_S3C2410_DEBUG - bool "Samsung S3C NAND controller debug" - depends on MTD_NAND_S3C2410 - help - Enable debugging of the S3C NAND driver - -config MTD_NAND_S3C2410_CLKSTOP - bool "Samsung S3C NAND IDLE clock stop" - depends on MTD_NAND_S3C2410 - default n - help - Stop the clock to the NAND controller when there is no chip - selected to save power. This will mean there is a small delay - when the is NAND chip selected or released, but will save - approximately 5mA of power when there is nothing happening. - config MTD_NAND_SHARPSL tristate "Sharp SL Series (C7xx + others) NAND controller" depends on ARCH_PXA || COMPILE_TEST @@ -462,12 +436,12 @@ config MTD_NAND_NUVOTON_MA35 Enables support for the NAND controller found on the Nuvoton MA35 series SoCs. -config MTD_NAND_LOONGSON1 - tristate "Loongson1 NAND controller" - depends on LOONGSON1_APB_DMA || COMPILE_TEST +config MTD_NAND_LOONGSON + tristate "Loongson NAND controller" + depends on LOONGSON1_APB_DMA || LOONGSON2_APB_DMA || COMPILE_TEST select REGMAP_MMIO help - Enables support for NAND controller on Loongson1 SoCs. + Enables support for NAND controller on Loongson family chips. comment "Misc" diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 711d043ad4f8..619760138d32 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -9,7 +9,6 @@ obj-$(CONFIG_MTD_NAND_DENALI) += denali.o obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o -obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o @@ -59,7 +58,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o -obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o +obj-$(CONFIG_MTD_NAND_LOONGSON) += loongson-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index db94d14a3807..83ba4ebd02d4 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1240,7 +1240,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, const struct nand_interface_config *conf, struct atmel_smc_cs_conf *smcconf) { - u32 ncycles, totalcycles, timeps, mckperiodps; + u32 ncycles, totalcycles, timeps, mckperiodps, pulse; struct atmel_nand_controller *nc; int ret; @@ -1366,11 +1366,16 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, ATMEL_SMC_MODE_TDFMODE_OPTIMIZED; /* - * Read pulse timing directly matches tRP: + * Read pulse timing would directly match tRP, + * but some NAND flash chips (S34ML01G2 and W29N02KVxxAF) + * do not work properly in timing mode 3. + * The workaround is to extend the SMC NRD pulse to meet tREA + * timing. * - * NRD_PULSE = tRP + * NRD_PULSE = max(tRP, tREA) */ - ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps); + pulse = max(conf->timings.sdr.tRP_min, conf->timings.sdr.tREA_max); + ncycles = DIV_ROUND_UP(pulse, mckperiodps); totalcycles += ncycles; ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles); @@ -1858,7 +1863,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc) static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) { - struct device_node *np, *nand_np; + struct device_node *np; struct device *dev = nc->dev; int ret, reg_cells; u32 val; @@ -1885,7 +1890,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) reg_cells += val; - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { struct atmel_nand *nand; nand = atmel_nand_create(nc, nand_np, reg_cells); diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 0b402823b619..1d0e93e4edb1 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -1010,4 +1010,3 @@ module_platform_driver(atmel_pmecc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); MODULE_DESCRIPTION("PMECC engine driver"); -MODULE_ALIAS("platform:atmel_pmecc"); diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index df61db8ce466..b13b2b0c3f30 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev, if (!of_property_read_u32(np, "bank-width", &val)) { if (val == 2) { nand->options |= NAND_BUSWIDTH_16; - } else if (val != 1) { + } else if (val == 1) { + nand->options |= NAND_BUSWIDTH_AUTO; + } else { dev_err(&pdev->dev, "invalid bank-width %u\n", val); return -EINVAL; } + } else { + nand->options |= NAND_BUSWIDTH_AUTO; } if (of_property_read_bool(np, "nand-skip-bbtscan")) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index f4e68008ea03..a750f5839e34 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -145,6 +145,9 @@ err_clk: return ret; } +#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) +#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) + static int gpmi_init(struct gpmi_nand_data *this) { struct resources *r = &this->resources; @@ -2765,6 +2768,11 @@ static int gpmi_nand_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); +#ifndef CONFIG_PM + ret = gpmi_enable_clk(this); + if (ret) + goto exit_acquire_resources; +#endif ret = gpmi_init(this); if (ret) @@ -2800,6 +2808,9 @@ static void gpmi_nand_remove(struct platform_device *pdev) release_resources(this); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); +#ifndef CONFIG_PM + gpmi_disable_clk(this); +#endif } static int gpmi_pm_suspend(struct device *dev) @@ -2846,9 +2857,6 @@ static int gpmi_pm_resume(struct device *dev) return 0; } -#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) -#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) - static int gpmi_runtime_suspend(struct device *dev) { struct gpmi_nand_data *this = dev_get_drvdata(dev); diff --git a/drivers/mtd/nand/raw/loongson-nand-controller.c b/drivers/mtd/nand/raw/loongson-nand-controller.c new file mode 100644 index 000000000000..8490412d5be1 --- /dev/null +++ b/drivers/mtd/nand/raw/loongson-nand-controller.c @@ -0,0 +1,1024 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NAND Controller Driver for Loongson family chips + * + * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com> + * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/sizes.h> + +/* Loongson NAND Controller Registers */ +#define LOONGSON_NAND_CMD 0x0 +#define LOONGSON_NAND_ADDR1 0x4 +#define LOONGSON_NAND_ADDR2 0x8 +#define LOONGSON_NAND_TIMING 0xc +#define LOONGSON_NAND_IDL 0x10 +#define LOONGSON_NAND_IDH_STATUS 0x14 +#define LOONGSON_NAND_PARAM 0x18 +#define LOONGSON_NAND_OP_NUM 0x1c +#define LOONGSON_NAND_CS_RDY_MAP 0x20 + +/* Bitfields of nand command register */ +#define LOONGSON_NAND_CMD_OP_DONE BIT(10) +#define LOONGSON_NAND_CMD_OP_SPARE BIT(9) +#define LOONGSON_NAND_CMD_OP_MAIN BIT(8) +#define LOONGSON_NAND_CMD_STATUS BIT(7) +#define LOONGSON_NAND_CMD_RESET BIT(6) +#define LOONGSON_NAND_CMD_READID BIT(5) +#define LOONGSON_NAND_CMD_BLOCKS_ERASE BIT(4) +#define LOONGSON_NAND_CMD_ERASE BIT(3) +#define LOONGSON_NAND_CMD_WRITE BIT(2) +#define LOONGSON_NAND_CMD_READ BIT(1) +#define LOONGSON_NAND_CMD_VALID BIT(0) + +/* Bitfields of nand cs/rdy map register */ +#define LOONGSON_NAND_MAP_CS1_SEL GENMASK(11, 8) +#define LOONGSON_NAND_MAP_RDY1_SEL GENMASK(15, 12) +#define LOONGSON_NAND_MAP_CS2_SEL GENMASK(19, 16) +#define LOONGSON_NAND_MAP_RDY2_SEL GENMASK(23, 20) +#define LOONGSON_NAND_MAP_CS3_SEL GENMASK(27, 24) +#define LOONGSON_NAND_MAP_RDY3_SEL GENMASK(31, 28) + +#define LOONGSON_NAND_CS_SEL0 BIT(0) +#define LOONGSON_NAND_CS_SEL1 BIT(1) +#define LOONGSON_NAND_CS_SEL2 BIT(2) +#define LOONGSON_NAND_CS_SEL3 BIT(3) +#define LOONGSON_NAND_CS_RDY0 BIT(0) +#define LOONGSON_NAND_CS_RDY1 BIT(1) +#define LOONGSON_NAND_CS_RDY2 BIT(2) +#define LOONGSON_NAND_CS_RDY3 BIT(3) + +/* Bitfields of nand timing register */ +#define LOONGSON_NAND_WAIT_CYCLE_MASK GENMASK(7, 0) +#define LOONGSON_NAND_HOLD_CYCLE_MASK GENMASK(15, 8) + +/* Bitfields of nand parameter register */ +#define LOONGSON_NAND_CELL_SIZE_MASK GENMASK(11, 8) + +#define LOONGSON_NAND_COL_ADDR_CYC 2U +#define LOONGSON_NAND_MAX_ADDR_CYC 5U + +#define LOONGSON_NAND_READ_ID_SLEEP_US 1000 +#define LOONGSON_NAND_READ_ID_TIMEOUT_US 5000 + +#define BITS_PER_WORD (4 * BITS_PER_BYTE) + +/* Loongson-2K1000 NAND DMA routing register */ +#define LS2K1000_NAND_DMA_MASK GENMASK(2, 0) +#define LS2K1000_DMA0_CONF 0x0 +#define LS2K1000_DMA1_CONF 0x1 +#define LS2K1000_DMA2_CONF 0x2 +#define LS2K1000_DMA3_CONF 0x3 +#define LS2K1000_DMA4_CONF 0x4 + +struct loongson_nand_host; + +struct loongson_nand_op { + char addrs[LOONGSON_NAND_MAX_ADDR_CYC]; + unsigned int naddrs; + unsigned int addrs_offset; + unsigned int aligned_offset; + unsigned int cmd_reg; + unsigned int row_start; + unsigned int rdy_timeout_ms; + unsigned int orig_len; + bool is_readid; + bool is_erase; + bool is_write; + bool is_read; + bool is_change_column; + size_t len; + char *buf; +}; + +struct loongson_nand_data { + unsigned int max_id_cycle; + unsigned int id_cycle_field; + unsigned int status_field; + unsigned int op_scope_field; + unsigned int hold_cycle; + unsigned int wait_cycle; + unsigned int nand_cs; + unsigned int dma_bits; + int (*dma_config)(struct device *dev); + void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op); +}; + +struct loongson_nand_host { + struct device *dev; + struct nand_chip chip; + struct nand_controller controller; + const struct loongson_nand_data *data; + unsigned int addr_cs_field; + void __iomem *reg_base; + struct regmap *regmap; + /* DMA Engine stuff */ + dma_addr_t dma_base; + struct dma_chan *dma_chan; + dma_cookie_t dma_cookie; + struct completion dma_complete; +}; + +static const struct regmap_config loongson_nand_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op, + u8 opcode) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + + op->row_start = chip->page_shift + 1; + + /* The controller abstracts the following NAND operations. */ + switch (opcode) { + case NAND_CMD_STATUS: + op->cmd_reg = LOONGSON_NAND_CMD_STATUS; + break; + case NAND_CMD_RESET: + op->cmd_reg = LOONGSON_NAND_CMD_RESET; + break; + case NAND_CMD_READID: + op->is_readid = true; + op->cmd_reg = LOONGSON_NAND_CMD_READID; + break; + case NAND_CMD_ERASE1: + op->is_erase = true; + op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC; + break; + case NAND_CMD_ERASE2: + if (!op->is_erase) + return -EOPNOTSUPP; + /* During erasing, row_start differs from the default value. */ + op->row_start = chip->page_shift; + op->cmd_reg = LOONGSON_NAND_CMD_ERASE; + break; + case NAND_CMD_SEQIN: + op->is_write = true; + break; + case NAND_CMD_PAGEPROG: + if (!op->is_write) + return -EOPNOTSUPP; + op->cmd_reg = LOONGSON_NAND_CMD_WRITE; + break; + case NAND_CMD_READ0: + op->is_read = true; + break; + case NAND_CMD_READSTART: + if (!op->is_read) + return -EOPNOTSUPP; + op->cmd_reg = LOONGSON_NAND_CMD_READ; + break; + case NAND_CMD_RNDOUT: + op->is_change_column = true; + break; + case NAND_CMD_RNDOUTSTART: + if (!op->is_change_column) + return -EOPNOTSUPP; + op->cmd_reg = LOONGSON_NAND_CMD_READ; + break; + default: + dev_dbg(host->dev, "unsupported opcode: %u\n", opcode); + return -EOPNOTSUPP; + } + + return 0; +} + +static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop, + struct loongson_nand_op *op) +{ + unsigned int op_id; + int ret; + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + const struct nand_op_instr *instr = &subop->instrs[op_id]; + unsigned int offset, naddrs; + const u8 *addrs; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode); + if (ret < 0) + return ret; + + break; + case NAND_OP_ADDR_INSTR: + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC) + return -EOPNOTSUPP; + op->naddrs = naddrs; + offset = nand_subop_get_addr_start_off(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + memcpy(op->addrs + op->addrs_offset, addrs, naddrs); + break; + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + offset = nand_subop_get_data_start_off(subop, op_id); + op->orig_len = nand_subop_get_data_len(subop, op_id); + if (instr->type == NAND_OP_DATA_IN_INSTR) + op->buf = instr->ctx.data.buf.in + offset; + else if (instr->type == NAND_OP_DATA_OUT_INSTR) + op->buf = (void *)instr->ctx.data.buf.out + offset; + + break; + case NAND_OP_WAITRDY_INSTR: + op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + break; + default: + break; + } + } + + return 0; +} + +static void loongson_nand_set_addr_cs(struct loongson_nand_host *host) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + + if (!host->data->nand_cs) + return; + + /* + * The Manufacturer/Chip ID read operation precedes attach_chip, at which point + * information such as NAND chip selection and capacity is unknown. As a + * workaround, we use 128MB cellsize (2KB pagesize) as a fallback. + */ + if (!mtd->writesize) + host->addr_cs_field = GENMASK(17, 16); + + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field, + host->data->nand_cs << __ffs(host->addr_cs_field)); +} + +static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + int i; + + for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) { + int shift, mask, val; + + if (i < LOONGSON_NAND_COL_ADDR_CYC) { + shift = i * BITS_PER_BYTE; + mask = (u32)0xff << shift; + mask &= GENMASK(chip->page_shift, 0); + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val); + } else if (!op->is_change_column) { + shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val); + + if (i == 4) { + mask = (u32)0xff >> (BITS_PER_WORD - shift); + val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift); + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val); + } + } + } +} + +static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op) +{ + int i; + + for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) { + int shift, mask, val; + + if (i < LOONGSON_NAND_COL_ADDR_CYC) { + shift = i * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val); + } else if (!op->is_change_column) { + shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val); + } + } + + loongson_nand_set_addr_cs(host); +} + +static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int col0 = op->addrs[0]; + short col; + + if (!IS_ALIGNED(col0, chip->buf_align)) { + col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align); + op->aligned_offset = op->addrs[0] - col0; + op->addrs[0] = col0; + } + + if (host->data->set_addr) + host->data->set_addr(host, op); + + /* set operation length */ + if (op->is_write || op->is_read || op->is_change_column) + op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align); + else if (op->is_erase) + op->len = 1; + else + op->len = op->orig_len; + + writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM); + + /* set operation area and scope */ + col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0]; + if (op->orig_len && !op->is_readid) { + unsigned int op_scope = 0; + + if (col < mtd->writesize) { + op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN; + op_scope = mtd->writesize; + } + + op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE; + op_scope += mtd->oobsize; + + op_scope <<= __ffs(host->data->op_scope_field); + regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, + host->data->op_scope_field, op_scope); + } + + /* set command */ + writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD); + + /* trigger operation */ + regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID, + LOONGSON_NAND_CMD_VALID); +} + +static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host, + struct loongson_nand_op *op) +{ + unsigned int val; + int ret = 0; + + if (op->rdy_timeout_ms) { + ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD, + val, val & LOONGSON_NAND_CMD_OP_DONE, + 0, op->rdy_timeout_ms * MSEC_PER_SEC); + if (ret) + dev_err(host->dev, "operation failed\n"); + } + + return ret; +} + +static void loongson_nand_dma_callback(void *data) +{ + struct loongson_nand_host *host = (struct loongson_nand_host *)data; + struct dma_chan *chan = host->dma_chan; + struct device *dev = chan->device->dev; + enum dma_status status; + + status = dmaengine_tx_status(chan, host->dma_cookie, NULL); + if (likely(status == DMA_COMPLETE)) { + dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie); + complete(&host->dma_complete); + } else { + dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie); + } +} + +static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + struct dma_chan *chan = host->dma_chan; + struct device *dev = chan->device->dev; + struct dma_async_tx_descriptor *desc; + enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + void *buf = op->buf; + char *dma_buf = NULL; + dma_addr_t dma_addr; + int ret; + + if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) && + IS_ALIGNED(op->orig_len, chip->buf_align)) { + dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "failed to map DMA buffer\n"); + return -ENXIO; + } + } else if (!op->is_write) { + dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL); + if (!dma_buf) + return -ENOMEM; + } else { + dev_err(dev, "subpage writing not supported\n"); + return -EOPNOTSUPP; + } + + desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "failed to prepare DMA descriptor\n"); + ret = -ENOMEM; + goto err; + } + desc->callback = loongson_nand_dma_callback; + desc->callback_param = host; + + host->dma_cookie = dmaengine_submit(desc); + ret = dma_submit_error(host->dma_cookie); + if (ret) { + dev_err(dev, "failed to submit DMA descriptor\n"); + goto err; + } + + dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie); + dma_async_issue_pending(chan); + + if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) { + dmaengine_terminate_sync(chan); + reinit_completion(&host->dma_complete); + ret = -ETIMEDOUT; + goto err; + } + + if (dma_buf) + memcpy(buf, dma_buf + op->aligned_offset, op->orig_len); +err: + if (dma_buf) + dma_free_coherent(dev, op->len, dma_buf, dma_addr); + else + dma_unmap_single(dev, dma_addr, op->orig_len, data_dir); + + return ret; +} + +static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + struct loongson_nand_op op = {}; + int ret; + + ret = loongson_nand_parse_instructions(chip, subop, &op); + if (ret) + return ret; + + loongson_nand_trigger_op(host, &op); + + ret = loongson_nand_dma_transfer(host, &op); + if (ret) + return ret; + + return loongson_nand_wait_for_op_done(host, &op); +} + +static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop, + struct loongson_nand_op *op) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + int ret; + + ret = loongson_nand_parse_instructions(chip, subop, op); + if (ret) + return ret; + + loongson_nand_trigger_op(host, op); + + return loongson_nand_wait_for_op_done(host, op); +} + +static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct loongson_nand_op op = {}; + + return loongson_nand_misc_type_exec(chip, subop, &op); +} + +static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + struct loongson_nand_op op = {}; + int i, ret; + union { + char ids[6]; + struct { + int idl; + u16 idh; + }; + } nand_id; + + ret = loongson_nand_misc_type_exec(chip, subop, &op); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl, + LOONGSON_NAND_READ_ID_SLEEP_US, + LOONGSON_NAND_READ_ID_TIMEOUT_US); + if (ret) + return ret; + + nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS); + + for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++) + op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i]; + + return ret; +} + +static int loongson_nand_read_status_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + struct loongson_nand_op op = {}; + int val, ret; + + ret = loongson_nand_misc_type_exec(chip, subop, &op); + if (ret) + return ret; + + val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS); + val &= ~host->data->status_field; + op.buf[0] = val << ffs(host->data->status_field); + + return ret; +} + +static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + loongson_nand_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + loongson_nand_read_status_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + loongson_nand_zerolen_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + loongson_nand_zerolen_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + loongson_nand_data_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)), + NAND_OP_PARSER_PATTERN( + loongson_nand_data_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), + ); + +static int loongson_nand_is_valid_cmd(u8 opcode) +{ + if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID) + return 0; + + return -EOPNOTSUPP; +} + +static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2) +{ + if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART) + return 0; + + if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART) + return 0; + + if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2) + return 0; + + if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG) + return 0; + + return -EOPNOTSUPP; +} + +static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op) +{ + const struct nand_op_instr *instr1 = NULL, *instr2 = NULL; + int op_id; + + for (op_id = 0; op_id < op->ninstrs; op_id++) { + const struct nand_op_instr *instr = &op->instrs[op_id]; + + if (instr->type == NAND_OP_CMD_INSTR) { + if (!instr1) + instr1 = instr; + else if (!instr2) + instr2 = instr; + else + break; + } + } + + if (!instr1) + return -EOPNOTSUPP; + + if (!instr2) + return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode); + + return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode); +} + +static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op, + bool check_only) +{ + if (check_only) + return loongson_nand_check_op(chip, op); + + return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only); +} + +static int loongson_nand_get_chip_capacity(struct nand_chip *chip) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + u64 chipsize = nanddev_target_size(&chip->base); + struct mtd_info *mtd = nand_to_mtd(chip); + + switch (mtd->writesize) { + case SZ_512: + switch (chipsize) { + case SZ_8M: + host->addr_cs_field = GENMASK(15, 14); + return 0x9; + case SZ_16M: + host->addr_cs_field = GENMASK(16, 15); + return 0xa; + case SZ_32M: + host->addr_cs_field = GENMASK(17, 16); + return 0xb; + case SZ_64M: + host->addr_cs_field = GENMASK(18, 17); + return 0xc; + case SZ_128M: + host->addr_cs_field = GENMASK(19, 18); + return 0xd; + } + break; + case SZ_2K: + switch (chipsize) { + case SZ_128M: + host->addr_cs_field = GENMASK(17, 16); + return 0x0; + case SZ_256M: + host->addr_cs_field = GENMASK(18, 17); + return 0x1; + case SZ_512M: + host->addr_cs_field = GENMASK(19, 18); + return 0x2; + case SZ_1G: + host->addr_cs_field = GENMASK(20, 19); + return 0x3; + } + break; + case SZ_4K: + if (chipsize == SZ_2G) { + host->addr_cs_field = GENMASK(20, 19); + return 0x4; + } + break; + case SZ_8K: + switch (chipsize) { + case SZ_4G: + host->addr_cs_field = GENMASK(20, 19); + return 0x5; + case SZ_8G: + host->addr_cs_field = GENMASK(21, 20); + return 0x6; + case SZ_16G: + host->addr_cs_field = GENMASK(22, 21); + return 0x7; + } + break; + } + + dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n", + chipsize, mtd->writesize); + return -EINVAL; +} + +static int loongson_nand_attach_chip(struct nand_chip *chip) +{ + struct loongson_nand_host *host = nand_get_controller_data(chip); + int cell_size = loongson_nand_get_chip_capacity(chip); + + if (cell_size < 0) + return cell_size; + + switch (chip->ecc.engine_type) { + case NAND_ECC_ENGINE_TYPE_NONE: + break; + case NAND_ECC_ENGINE_TYPE_SOFT: + break; + default: + return -EINVAL; + } + + /* set cell size */ + regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK, + FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size)); + + regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK, + FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle)); + + regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK, + FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle)); + + chip->ecc.read_page_raw = nand_monolithic_read_page_raw; + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; + + return 0; +} + +static const struct nand_controller_ops loongson_nand_controller_ops = { + .exec_op = loongson_nand_exec_op, + .attach_chip = loongson_nand_attach_chip, +}; + +static void loongson_nand_controller_cleanup(struct loongson_nand_host *host) +{ + if (host->dma_chan) + dma_release_channel(host->dma_chan); +} + +static int ls2k1000_nand_apbdma_config(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + void __iomem *regs; + int val; + + regs = devm_platform_ioremap_resource_byname(pdev, "dma-config"); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + val = readl(regs); + val |= FIELD_PREP(LS2K1000_NAND_DMA_MASK, LS2K1000_DMA0_CONF); + writel(val, regs); + + return 0; +} + +static int loongson_nand_controller_init(struct loongson_nand_host *host) +{ + struct device *dev = host->dev; + struct dma_chan *chan; + struct dma_slave_config cfg = {}; + int ret, val; + + host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config); + if (IS_ERR(host->regmap)) + return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n"); + + if (host->data->id_cycle_field) + regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field, + host->data->max_id_cycle << __ffs(host->data->id_cycle_field)); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(host->data->dma_bits)); + if (ret) + return dev_err_probe(dev, ret, "failed to set DMA mask\n"); + + val = FIELD_PREP(LOONGSON_NAND_MAP_CS1_SEL, LOONGSON_NAND_CS_SEL1) | + FIELD_PREP(LOONGSON_NAND_MAP_RDY1_SEL, LOONGSON_NAND_CS_RDY1) | + FIELD_PREP(LOONGSON_NAND_MAP_CS2_SEL, LOONGSON_NAND_CS_SEL2) | + FIELD_PREP(LOONGSON_NAND_MAP_RDY2_SEL, LOONGSON_NAND_CS_RDY2) | + FIELD_PREP(LOONGSON_NAND_MAP_CS3_SEL, LOONGSON_NAND_CS_SEL3) | + FIELD_PREP(LOONGSON_NAND_MAP_RDY3_SEL, LOONGSON_NAND_CS_RDY3); + + regmap_write(host->regmap, LOONGSON_NAND_CS_RDY_MAP, val); + + if (host->data->dma_config) { + ret = host->data->dma_config(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to config DMA routing\n"); + } + + chan = dma_request_chan(dev, "rxtx"); + if (IS_ERR(chan)) + return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n"); + host->dma_chan = chan; + + cfg.src_addr = host->dma_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr = host->dma_base; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + ret = dmaengine_slave_config(host->dma_chan, &cfg); + if (ret) + return dev_err_probe(dev, ret, "failed to config DMA channel\n"); + + init_completion(&host->dma_complete); + + return 0; +} + +static int loongson_nand_chip_init(struct loongson_nand_host *host) +{ + struct device *dev = host->dev; + int nchips = of_get_child_count(dev->of_node); + struct device_node *chip_np; + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + if (nchips != 1) + return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n"); + + chip_np = of_get_next_child(dev->of_node, NULL); + if (!chip_np) + return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n"); + + nand_set_flash_node(chip, chip_np); + of_node_put(chip_np); + if (!mtd->name) + return dev_err_probe(dev, -EINVAL, "Missing MTD label\n"); + + nand_set_controller_data(chip, host); + chip->controller = &host->controller; + chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD; + chip->buf_align = 16; + mtd->dev.parent = dev; + mtd->owner = THIS_MODULE; + + ret = nand_scan(chip, 1); + if (ret) + return dev_err_probe(dev, ret, "failed to scan NAND chip\n"); + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + nand_cleanup(chip); + return dev_err_probe(dev, ret, "failed to register MTD device\n"); + } + + return 0; +} + +static int loongson_nand_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct loongson_nand_data *data; + struct loongson_nand_host *host; + struct resource *res; + int ret; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->reg_base)) + return PTR_ERR(host->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma"); + if (!res) + return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n"); + + host->dma_base = dma_map_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, host->dma_base)) + return -ENXIO; + + host->dev = dev; + host->data = data; + host->controller.ops = &loongson_nand_controller_ops; + + nand_controller_init(&host->controller); + + ret = loongson_nand_controller_init(host); + if (ret) + goto err; + + ret = loongson_nand_chip_init(host); + if (ret) + goto err; + + platform_set_drvdata(pdev, host); + + return 0; +err: + loongson_nand_controller_cleanup(host); + + return ret; +} + +static void loongson_nand_remove(struct platform_device *pdev) +{ + struct loongson_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + loongson_nand_controller_cleanup(host); +} + +static const struct loongson_nand_data ls1b_nand_data = { + .max_id_cycle = 5, + .status_field = GENMASK(15, 8), + .hold_cycle = 0x2, + .wait_cycle = 0xc, + .dma_bits = 32, + .set_addr = ls1b_nand_set_addr, +}; + +static const struct loongson_nand_data ls1c_nand_data = { + .max_id_cycle = 6, + .id_cycle_field = GENMASK(14, 12), + .status_field = GENMASK(23, 16), + .op_scope_field = GENMASK(29, 16), + .hold_cycle = 0x2, + .wait_cycle = 0xc, + .dma_bits = 32, + .set_addr = ls1c_nand_set_addr, +}; + +static const struct loongson_nand_data ls2k0500_nand_data = { + .max_id_cycle = 6, + .id_cycle_field = GENMASK(14, 12), + .status_field = GENMASK(23, 16), + .op_scope_field = GENMASK(29, 16), + .hold_cycle = 0x4, + .wait_cycle = 0x12, + .dma_bits = 64, + .set_addr = ls1c_nand_set_addr, +}; + +static const struct loongson_nand_data ls2k1000_nand_data = { + .max_id_cycle = 6, + .id_cycle_field = GENMASK(14, 12), + .status_field = GENMASK(23, 16), + .op_scope_field = GENMASK(29, 16), + .hold_cycle = 0x4, + .wait_cycle = 0x12, + .nand_cs = 0x2, + .dma_bits = 64, + .dma_config = ls2k1000_nand_apbdma_config, + .set_addr = ls1c_nand_set_addr, +}; + +static const struct of_device_id loongson_nand_match[] = { + { + .compatible = "loongson,ls1b-nand-controller", + .data = &ls1b_nand_data, + }, + { + .compatible = "loongson,ls1c-nand-controller", + .data = &ls1c_nand_data, + }, + { + .compatible = "loongson,ls2k0500-nand-controller", + .data = &ls2k0500_nand_data, + }, + { + .compatible = "loongson,ls2k1000-nand-controller", + .data = &ls2k1000_nand_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, loongson_nand_match); + +static struct platform_driver loongson_nand_driver = { + .probe = loongson_nand_probe, + .remove = loongson_nand_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = loongson_nand_match, + }, +}; + +module_platform_driver(loongson_nand_driver); + +MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); +MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>"); +MODULE_DESCRIPTION("Loongson NAND Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c deleted file mode 100644 index ef8e4f9ce287..000000000000 --- a/drivers/mtd/nand/raw/loongson1-nand-controller.c +++ /dev/null @@ -1,836 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * NAND Controller Driver for Loongson-1 SoC - * - * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/dmaengine.h> -#include <linux/dma-mapping.h> -#include <linux/iopoll.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <linux/regmap.h> -#include <linux/sizes.h> - -/* Loongson-1 NAND Controller Registers */ -#define LS1X_NAND_CMD 0x0 -#define LS1X_NAND_ADDR1 0x4 -#define LS1X_NAND_ADDR2 0x8 -#define LS1X_NAND_TIMING 0xc -#define LS1X_NAND_IDL 0x10 -#define LS1X_NAND_IDH_STATUS 0x14 -#define LS1X_NAND_PARAM 0x18 -#define LS1X_NAND_OP_NUM 0x1c - -/* NAND Command Register Bits */ -#define LS1X_NAND_CMD_OP_DONE BIT(10) -#define LS1X_NAND_CMD_OP_SPARE BIT(9) -#define LS1X_NAND_CMD_OP_MAIN BIT(8) -#define LS1X_NAND_CMD_STATUS BIT(7) -#define LS1X_NAND_CMD_RESET BIT(6) -#define LS1X_NAND_CMD_READID BIT(5) -#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4) -#define LS1X_NAND_CMD_ERASE BIT(3) -#define LS1X_NAND_CMD_WRITE BIT(2) -#define LS1X_NAND_CMD_READ BIT(1) -#define LS1X_NAND_CMD_VALID BIT(0) - -#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0) -#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8) -#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8) - -#define LS1X_NAND_COL_ADDR_CYC 2U -#define LS1X_NAND_MAX_ADDR_CYC 5U - -#define BITS_PER_WORD (4 * BITS_PER_BYTE) - -struct ls1x_nand_host; - -struct ls1x_nand_op { - char addrs[LS1X_NAND_MAX_ADDR_CYC]; - unsigned int naddrs; - unsigned int addrs_offset; - unsigned int aligned_offset; - unsigned int cmd_reg; - unsigned int row_start; - unsigned int rdy_timeout_ms; - unsigned int orig_len; - bool is_readid; - bool is_erase; - bool is_write; - bool is_read; - bool is_change_column; - size_t len; - char *buf; -}; - -struct ls1x_nand_data { - unsigned int status_field; - unsigned int op_scope_field; - unsigned int hold_cycle; - unsigned int wait_cycle; - void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op); -}; - -struct ls1x_nand_host { - struct device *dev; - struct nand_chip chip; - struct nand_controller controller; - const struct ls1x_nand_data *data; - void __iomem *reg_base; - struct regmap *regmap; - /* DMA Engine stuff */ - dma_addr_t dma_base; - struct dma_chan *dma_chan; - dma_cookie_t dma_cookie; - struct completion dma_complete; -}; - -static const struct regmap_config ls1x_nand_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, -}; - -static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode) -{ - struct ls1x_nand_host *host = nand_get_controller_data(chip); - - op->row_start = chip->page_shift + 1; - - /* The controller abstracts the following NAND operations. */ - switch (opcode) { - case NAND_CMD_STATUS: - op->cmd_reg = LS1X_NAND_CMD_STATUS; - break; - case NAND_CMD_RESET: - op->cmd_reg = LS1X_NAND_CMD_RESET; - break; - case NAND_CMD_READID: - op->is_readid = true; - op->cmd_reg = LS1X_NAND_CMD_READID; - break; - case NAND_CMD_ERASE1: - op->is_erase = true; - op->addrs_offset = LS1X_NAND_COL_ADDR_CYC; - break; - case NAND_CMD_ERASE2: - if (!op->is_erase) - return -EOPNOTSUPP; - /* During erasing, row_start differs from the default value. */ - op->row_start = chip->page_shift; - op->cmd_reg = LS1X_NAND_CMD_ERASE; - break; - case NAND_CMD_SEQIN: - op->is_write = true; - break; - case NAND_CMD_PAGEPROG: - if (!op->is_write) - return -EOPNOTSUPP; - op->cmd_reg = LS1X_NAND_CMD_WRITE; - break; - case NAND_CMD_READ0: - op->is_read = true; - break; - case NAND_CMD_READSTART: - if (!op->is_read) - return -EOPNOTSUPP; - op->cmd_reg = LS1X_NAND_CMD_READ; - break; - case NAND_CMD_RNDOUT: - op->is_change_column = true; - break; - case NAND_CMD_RNDOUTSTART: - if (!op->is_change_column) - return -EOPNOTSUPP; - op->cmd_reg = LS1X_NAND_CMD_READ; - break; - default: - dev_dbg(host->dev, "unsupported opcode: %u\n", opcode); - return -EOPNOTSUPP; - } - - return 0; -} - -static int ls1x_nand_parse_instructions(struct nand_chip *chip, - const struct nand_subop *subop, struct ls1x_nand_op *op) -{ - unsigned int op_id; - int ret; - - for (op_id = 0; op_id < subop->ninstrs; op_id++) { - const struct nand_op_instr *instr = &subop->instrs[op_id]; - unsigned int offset, naddrs; - const u8 *addrs; - - switch (instr->type) { - case NAND_OP_CMD_INSTR: - ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode); - if (ret < 0) - return ret; - - break; - case NAND_OP_ADDR_INSTR: - naddrs = nand_subop_get_num_addr_cyc(subop, op_id); - if (naddrs > LS1X_NAND_MAX_ADDR_CYC) - return -EOPNOTSUPP; - op->naddrs = naddrs; - offset = nand_subop_get_addr_start_off(subop, op_id); - addrs = &instr->ctx.addr.addrs[offset]; - memcpy(op->addrs + op->addrs_offset, addrs, naddrs); - break; - case NAND_OP_DATA_IN_INSTR: - case NAND_OP_DATA_OUT_INSTR: - offset = nand_subop_get_data_start_off(subop, op_id); - op->orig_len = nand_subop_get_data_len(subop, op_id); - if (instr->type == NAND_OP_DATA_IN_INSTR) - op->buf = instr->ctx.data.buf.in + offset; - else if (instr->type == NAND_OP_DATA_OUT_INSTR) - op->buf = (void *)instr->ctx.data.buf.out + offset; - - break; - case NAND_OP_WAITRDY_INSTR: - op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; - break; - default: - break; - } - } - - return 0; -} - -static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) -{ - struct nand_chip *chip = &host->chip; - int i; - - for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { - int shift, mask, val; - - if (i < LS1X_NAND_COL_ADDR_CYC) { - shift = i * BITS_PER_BYTE; - mask = (u32)0xff << shift; - mask &= GENMASK(chip->page_shift, 0); - val = (u32)op->addrs[i] << shift; - regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); - } else if (!op->is_change_column) { - shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; - mask = (u32)0xff << shift; - val = (u32)op->addrs[i] << shift; - regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); - - if (i == 4) { - mask = (u32)0xff >> (BITS_PER_WORD - shift); - val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift); - regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); - } - } - } -} - -static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) -{ - int i; - - for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { - int shift, mask, val; - - if (i < LS1X_NAND_COL_ADDR_CYC) { - shift = i * BITS_PER_BYTE; - mask = (u32)0xff << shift; - val = (u32)op->addrs[i] << shift; - regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); - } else if (!op->is_change_column) { - shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; - mask = (u32)0xff << shift; - val = (u32)op->addrs[i] << shift; - regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); - } - } -} - -static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op) -{ - struct nand_chip *chip = &host->chip; - struct mtd_info *mtd = nand_to_mtd(chip); - int col0 = op->addrs[0]; - short col; - - if (!IS_ALIGNED(col0, chip->buf_align)) { - col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align); - op->aligned_offset = op->addrs[0] - col0; - op->addrs[0] = col0; - } - - if (host->data->set_addr) - host->data->set_addr(host, op); - - /* set operation length */ - if (op->is_write || op->is_read || op->is_change_column) - op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align); - else if (op->is_erase) - op->len = 1; - else - op->len = op->orig_len; - - writel(op->len, host->reg_base + LS1X_NAND_OP_NUM); - - /* set operation area and scope */ - col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0]; - if (op->orig_len && !op->is_readid) { - unsigned int op_scope = 0; - - if (col < mtd->writesize) { - op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN; - op_scope = mtd->writesize; - } - - op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE; - op_scope += mtd->oobsize; - - op_scope <<= __ffs(host->data->op_scope_field); - regmap_update_bits(host->regmap, LS1X_NAND_PARAM, - host->data->op_scope_field, op_scope); - } - - /* set command */ - writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD); - - /* trigger operation */ - regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID); -} - -static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op) -{ - unsigned int val; - int ret = 0; - - if (op->rdy_timeout_ms) { - ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD, - val, val & LS1X_NAND_CMD_OP_DONE, - 0, op->rdy_timeout_ms * MSEC_PER_SEC); - if (ret) - dev_err(host->dev, "operation failed\n"); - } - - return ret; -} - -static void ls1x_nand_dma_callback(void *data) -{ - struct ls1x_nand_host *host = (struct ls1x_nand_host *)data; - struct dma_chan *chan = host->dma_chan; - struct device *dev = chan->device->dev; - enum dma_status status; - - status = dmaengine_tx_status(chan, host->dma_cookie, NULL); - if (likely(status == DMA_COMPLETE)) { - dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie); - complete(&host->dma_complete); - } else { - dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie); - } -} - -static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op) -{ - struct nand_chip *chip = &host->chip; - struct dma_chan *chan = host->dma_chan; - struct device *dev = chan->device->dev; - struct dma_async_tx_descriptor *desc; - enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; - void *buf = op->buf; - char *dma_buf = NULL; - dma_addr_t dma_addr; - int ret; - - if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) && - IS_ALIGNED(op->orig_len, chip->buf_align)) { - dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir); - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "failed to map DMA buffer\n"); - return -ENXIO; - } - } else if (!op->is_write) { - dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL); - if (!dma_buf) - return -ENOMEM; - } else { - dev_err(dev, "subpage writing not supported\n"); - return -EOPNOTSUPP; - } - - desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT); - if (!desc) { - dev_err(dev, "failed to prepare DMA descriptor\n"); - ret = -ENOMEM; - goto err; - } - desc->callback = ls1x_nand_dma_callback; - desc->callback_param = host; - - host->dma_cookie = dmaengine_submit(desc); - ret = dma_submit_error(host->dma_cookie); - if (ret) { - dev_err(dev, "failed to submit DMA descriptor\n"); - goto err; - } - - dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie); - dma_async_issue_pending(chan); - - if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) { - dmaengine_terminate_sync(chan); - reinit_completion(&host->dma_complete); - ret = -ETIMEDOUT; - goto err; - } - - if (dma_buf) - memcpy(buf, dma_buf + op->aligned_offset, op->orig_len); -err: - if (dma_buf) - dma_free_coherent(dev, op->len, dma_buf, dma_addr); - else - dma_unmap_single(dev, dma_addr, op->orig_len, data_dir); - - return ret; -} - -static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop) -{ - struct ls1x_nand_host *host = nand_get_controller_data(chip); - struct ls1x_nand_op op = {}; - int ret; - - ret = ls1x_nand_parse_instructions(chip, subop, &op); - if (ret) - return ret; - - ls1x_nand_trigger_op(host, &op); - - ret = ls1x_nand_dma_transfer(host, &op); - if (ret) - return ret; - - return ls1x_nand_wait_for_op_done(host, &op); -} - -static int ls1x_nand_misc_type_exec(struct nand_chip *chip, - const struct nand_subop *subop, struct ls1x_nand_op *op) -{ - struct ls1x_nand_host *host = nand_get_controller_data(chip); - int ret; - - ret = ls1x_nand_parse_instructions(chip, subop, op); - if (ret) - return ret; - - ls1x_nand_trigger_op(host, op); - - return ls1x_nand_wait_for_op_done(host, op); -} - -static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop) -{ - struct ls1x_nand_op op = {}; - - return ls1x_nand_misc_type_exec(chip, subop, &op); -} - -static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) -{ - struct ls1x_nand_host *host = nand_get_controller_data(chip); - struct ls1x_nand_op op = {}; - int i, ret; - union { - char ids[5]; - struct { - int idl; - char idh; - }; - } nand_id; - - ret = ls1x_nand_misc_type_exec(chip, subop, &op); - if (ret) - return ret; - - nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL); - nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS); - - for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++) - op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i]; - - return ret; -} - -static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop) -{ - struct ls1x_nand_host *host = nand_get_controller_data(chip); - struct ls1x_nand_op op = {}; - int val, ret; - - ret = ls1x_nand_misc_type_exec(chip, subop, &op); - if (ret) - return ret; - - val = readl(host->reg_base + LS1X_NAND_IDH_STATUS); - val &= ~host->data->status_field; - op.buf[0] = val << ffs(host->data->status_field); - - return ret; -} - -static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER( - NAND_OP_PARSER_PATTERN( - ls1x_nand_read_id_type_exec, - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), - NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), - NAND_OP_PARSER_PATTERN( - ls1x_nand_read_status_type_exec, - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), - NAND_OP_PARSER_PATTERN( - ls1x_nand_zerolen_type_exec, - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), - NAND_OP_PARSER_PATTERN( - ls1x_nand_zerolen_type_exec, - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), - NAND_OP_PARSER_PATTERN( - ls1x_nand_data_type_exec, - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), - NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)), - NAND_OP_PARSER_PATTERN( - ls1x_nand_data_type_exec, - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), - NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0), - NAND_OP_PARSER_PAT_CMD_ELEM(false), - NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), - ); - -static int ls1x_nand_is_valid_cmd(u8 opcode) -{ - if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID) - return 0; - - return -EOPNOTSUPP; -} - -static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2) -{ - if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART) - return 0; - - if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART) - return 0; - - if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2) - return 0; - - if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG) - return 0; - - return -EOPNOTSUPP; -} - -static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op) -{ - const struct nand_op_instr *instr1 = NULL, *instr2 = NULL; - int op_id; - - for (op_id = 0; op_id < op->ninstrs; op_id++) { - const struct nand_op_instr *instr = &op->instrs[op_id]; - - if (instr->type == NAND_OP_CMD_INSTR) { - if (!instr1) - instr1 = instr; - else if (!instr2) - instr2 = instr; - else - break; - } - } - - if (!instr1) - return -EOPNOTSUPP; - - if (!instr2) - return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode); - - return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode); -} - -static int ls1x_nand_exec_op(struct nand_chip *chip, - const struct nand_operation *op, bool check_only) -{ - if (check_only) - return ls1x_nand_check_op(chip, op); - - return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only); -} - -static int ls1x_nand_attach_chip(struct nand_chip *chip) -{ - struct ls1x_nand_host *host = nand_get_controller_data(chip); - u64 chipsize = nanddev_target_size(&chip->base); - int cell_size = 0; - - switch (chipsize) { - case SZ_128M: - cell_size = 0x0; - break; - case SZ_256M: - cell_size = 0x1; - break; - case SZ_512M: - cell_size = 0x2; - break; - case SZ_1G: - cell_size = 0x3; - break; - case SZ_2G: - cell_size = 0x4; - break; - case SZ_4G: - cell_size = 0x5; - break; - case SZ_8G: - cell_size = 0x6; - break; - case SZ_16G: - cell_size = 0x7; - break; - default: - dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize); - return -EINVAL; - } - - switch (chip->ecc.engine_type) { - case NAND_ECC_ENGINE_TYPE_NONE: - break; - case NAND_ECC_ENGINE_TYPE_SOFT: - break; - default: - return -EINVAL; - } - - /* set cell size */ - regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK, - FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size)); - - regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK, - FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle)); - - regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK, - FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle)); - - chip->ecc.read_page_raw = nand_monolithic_read_page_raw; - chip->ecc.write_page_raw = nand_monolithic_write_page_raw; - - return 0; -} - -static const struct nand_controller_ops ls1x_nand_controller_ops = { - .exec_op = ls1x_nand_exec_op, - .attach_chip = ls1x_nand_attach_chip, -}; - -static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host) -{ - if (host->dma_chan) - dma_release_channel(host->dma_chan); -} - -static int ls1x_nand_controller_init(struct ls1x_nand_host *host) -{ - struct device *dev = host->dev; - struct dma_chan *chan; - struct dma_slave_config cfg = {}; - int ret; - - host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config); - if (IS_ERR(host->regmap)) - return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n"); - - chan = dma_request_chan(dev, "rxtx"); - if (IS_ERR(chan)) - return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n"); - host->dma_chan = chan; - - cfg.src_addr = host->dma_base; - cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - cfg.dst_addr = host->dma_base; - cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - ret = dmaengine_slave_config(host->dma_chan, &cfg); - if (ret) - return dev_err_probe(dev, ret, "failed to config DMA channel\n"); - - init_completion(&host->dma_complete); - - return 0; -} - -static int ls1x_nand_chip_init(struct ls1x_nand_host *host) -{ - struct device *dev = host->dev; - int nchips = of_get_child_count(dev->of_node); - struct device_node *chip_np; - struct nand_chip *chip = &host->chip; - struct mtd_info *mtd = nand_to_mtd(chip); - int ret; - - if (nchips != 1) - return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n"); - - chip_np = of_get_next_child(dev->of_node, NULL); - if (!chip_np) - return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n"); - - nand_set_flash_node(chip, chip_np); - of_node_put(chip_np); - if (!mtd->name) - return dev_err_probe(dev, -EINVAL, "Missing MTD label\n"); - - nand_set_controller_data(chip, host); - chip->controller = &host->controller; - chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD; - chip->buf_align = 16; - mtd->dev.parent = dev; - mtd->owner = THIS_MODULE; - - ret = nand_scan(chip, 1); - if (ret) - return dev_err_probe(dev, ret, "failed to scan NAND chip\n"); - - ret = mtd_device_register(mtd, NULL, 0); - if (ret) { - nand_cleanup(chip); - return dev_err_probe(dev, ret, "failed to register MTD device\n"); - } - - return 0; -} - -static int ls1x_nand_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - const struct ls1x_nand_data *data; - struct ls1x_nand_host *host; - struct resource *res; - int ret; - - data = of_device_get_match_data(dev); - if (!data) - return -ENODEV; - - host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); - if (!host) - return -ENOMEM; - - host->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(host->reg_base)) - return PTR_ERR(host->reg_base); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma"); - if (!res) - return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n"); - - host->dma_base = dma_map_resource(dev, res->start, resource_size(res), - DMA_BIDIRECTIONAL, 0); - if (dma_mapping_error(dev, host->dma_base)) - return -ENXIO; - - host->dev = dev; - host->data = data; - host->controller.ops = &ls1x_nand_controller_ops; - - nand_controller_init(&host->controller); - - ret = ls1x_nand_controller_init(host); - if (ret) - goto err; - - ret = ls1x_nand_chip_init(host); - if (ret) - goto err; - - platform_set_drvdata(pdev, host); - - return 0; -err: - ls1x_nand_controller_cleanup(host); - - return ret; -} - -static void ls1x_nand_remove(struct platform_device *pdev) -{ - struct ls1x_nand_host *host = platform_get_drvdata(pdev); - struct nand_chip *chip = &host->chip; - int ret; - - ret = mtd_device_unregister(nand_to_mtd(chip)); - WARN_ON(ret); - nand_cleanup(chip); - ls1x_nand_controller_cleanup(host); -} - -static const struct ls1x_nand_data ls1b_nand_data = { - .status_field = GENMASK(15, 8), - .hold_cycle = 0x2, - .wait_cycle = 0xc, - .set_addr = ls1b_nand_set_addr, -}; - -static const struct ls1x_nand_data ls1c_nand_data = { - .status_field = GENMASK(23, 16), - .op_scope_field = GENMASK(29, 16), - .hold_cycle = 0x2, - .wait_cycle = 0xc, - .set_addr = ls1c_nand_set_addr, -}; - -static const struct of_device_id ls1x_nand_match[] = { - { - .compatible = "loongson,ls1b-nand-controller", - .data = &ls1b_nand_data, - }, - { - .compatible = "loongson,ls1c-nand-controller", - .data = &ls1c_nand_data, - }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, ls1x_nand_match); - -static struct platform_driver ls1x_nand_driver = { - .probe = ls1x_nand_probe, - .remove = ls1x_nand_remove, - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = ls1x_nand_match, - }, -}; - -module_platform_driver(ls1x_nand_driver); - -MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); -MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 13e4060bd1b6..c7d9501f646b 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -2784,137 +2784,6 @@ int nand_set_features(struct nand_chip *chip, int addr, } /** - * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data - * @buf: buffer to test - * @len: buffer length - * @bitflips_threshold: maximum number of bitflips - * - * Check if a buffer contains only 0xff, which means the underlying region - * has been erased and is ready to be programmed. - * The bitflips_threshold specify the maximum number of bitflips before - * considering the region is not erased. - * Note: The logic of this function has been extracted from the memweight - * implementation, except that nand_check_erased_buf function exit before - * testing the whole buffer if the number of bitflips exceed the - * bitflips_threshold value. - * - * Returns a positive number of bitflips less than or equal to - * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the - * threshold. - */ -static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) -{ - const unsigned char *bitmap = buf; - int bitflips = 0; - int weight; - - for (; len && ((uintptr_t)bitmap) % sizeof(long); - len--, bitmap++) { - weight = hweight8(*bitmap); - bitflips += BITS_PER_BYTE - weight; - if (unlikely(bitflips > bitflips_threshold)) - return -EBADMSG; - } - - for (; len >= sizeof(long); - len -= sizeof(long), bitmap += sizeof(long)) { - unsigned long d = *((unsigned long *)bitmap); - if (d == ~0UL) - continue; - weight = hweight_long(d); - bitflips += BITS_PER_LONG - weight; - if (unlikely(bitflips > bitflips_threshold)) - return -EBADMSG; - } - - for (; len > 0; len--, bitmap++) { - weight = hweight8(*bitmap); - bitflips += BITS_PER_BYTE - weight; - if (unlikely(bitflips > bitflips_threshold)) - return -EBADMSG; - } - - return bitflips; -} - -/** - * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only - * 0xff data - * @data: data buffer to test - * @datalen: data length - * @ecc: ECC buffer - * @ecclen: ECC length - * @extraoob: extra OOB buffer - * @extraooblen: extra OOB length - * @bitflips_threshold: maximum number of bitflips - * - * Check if a data buffer and its associated ECC and OOB data contains only - * 0xff pattern, which means the underlying region has been erased and is - * ready to be programmed. - * The bitflips_threshold specify the maximum number of bitflips before - * considering the region as not erased. - * - * Note: - * 1/ ECC algorithms are working on pre-defined block sizes which are usually - * different from the NAND page size. When fixing bitflips, ECC engines will - * report the number of errors per chunk, and the NAND core infrastructure - * expect you to return the maximum number of bitflips for the whole page. - * This is why you should always use this function on a single chunk and - * not on the whole page. After checking each chunk you should update your - * max_bitflips value accordingly. - * 2/ When checking for bitflips in erased pages you should not only check - * the payload data but also their associated ECC data, because a user might - * have programmed almost all bits to 1 but a few. In this case, we - * shouldn't consider the chunk as erased, and checking ECC bytes prevent - * this case. - * 3/ The extraoob argument is optional, and should be used if some of your OOB - * data are protected by the ECC engine. - * It could also be used if you support subpages and want to attach some - * extra OOB data to an ECC chunk. - * - * Returns a positive number of bitflips less than or equal to - * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the - * threshold. In case of success, the passed buffers are filled with 0xff. - */ -int nand_check_erased_ecc_chunk(void *data, int datalen, - void *ecc, int ecclen, - void *extraoob, int extraooblen, - int bitflips_threshold) -{ - int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; - - data_bitflips = nand_check_erased_buf(data, datalen, - bitflips_threshold); - if (data_bitflips < 0) - return data_bitflips; - - bitflips_threshold -= data_bitflips; - - ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); - if (ecc_bitflips < 0) - return ecc_bitflips; - - bitflips_threshold -= ecc_bitflips; - - extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, - bitflips_threshold); - if (extraoob_bitflips < 0) - return extraoob_bitflips; - - if (data_bitflips) - memset(data, 0xff, datalen); - - if (ecc_bitflips) - memset(ecc, 0xff, ecclen); - - if (extraoob_bitflips) - memset(extraoob, 0xff, extraooblen); - - return data_bitflips + ecc_bitflips + extraoob_bitflips; -} -EXPORT_SYMBOL(nand_check_erased_ecc_chunk); - -/** * nand_read_page_raw_notsupp - dummy read raw page function * @chip: nand chip info structure * @buf: buffer to store read data diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index df48b7d01d16..84942e7e528f 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -552,9 +552,8 @@ static int __init ns_alloc_device(struct nandsim *ns) err = -EINVAL; goto err_close_filp; } - ns->pages_written = - vzalloc(array_size(sizeof(unsigned long), - BITS_TO_LONGS(ns->geom.pgnum))); + ns->pages_written = vcalloc(BITS_TO_LONGS(ns->geom.pgnum), + sizeof(unsigned long)); if (!ns->pages_written) { NS_ERR("alloc_device: unable to allocate pages written array\n"); err = -ENOMEM; @@ -578,7 +577,7 @@ err_close_filp: return err; } - ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum)); + ns->pages = vmalloc_array(ns->geom.pgnum, sizeof(union ns_mem)); if (!ns->pages) { NS_ERR("alloc_device: unable to allocate page array\n"); return -ENOMEM; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index b8af3a3533fc..39e297486721 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -1979,7 +1979,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) err = rawnand_sw_bch_init(chip); if (err) { dev_err(dev, "Unable to use BCH library\n"); - return err; + goto err_put_elm_dev; } break; @@ -2016,7 +2016,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) err = rawnand_sw_bch_init(chip); if (err) { dev_err(dev, "unable to use BCH library\n"); - return err; + goto err_put_elm_dev; } break; @@ -2054,7 +2054,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip) break; default: dev_err(dev, "Invalid or unsupported ECC scheme\n"); - return -EINVAL; + err = -EINVAL; + goto err_put_elm_dev; } if (elm_bch_strength >= 0) { @@ -2073,7 +2074,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) info->nsteps_per_eccpg, chip->ecc.size, chip->ecc.bytes); if (err < 0) - return err; + goto err_put_elm_dev; } /* Check if NAND device's OOB is enough to store ECC signatures */ @@ -2083,10 +2084,24 @@ static int omap_nand_attach_chip(struct nand_chip *chip) dev_err(dev, "Not enough OOB bytes: required = %d, available=%d\n", min_oobbytes, mtd->oobsize); - return -EINVAL; + err = -EINVAL; + goto err_put_elm_dev; } return 0; + +err_put_elm_dev: + put_device(info->elm_dev); + + return err; +} + +static void omap_nand_detach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct omap_nand_info *info = mtd_to_omap(mtd); + + put_device(info->elm_dev); } static void omap_nand_data_in(struct nand_chip *chip, void *buf, @@ -2187,6 +2202,7 @@ static int omap_nand_exec_op(struct nand_chip *chip, static const struct nand_controller_ops omap_nand_controller_ops = { .attach_chip = omap_nand_attach_chip, + .detach_chip = omap_nand_detach_chip, .exec_op = omap_nand_exec_op, }; @@ -2316,6 +2332,5 @@ static struct platform_driver omap_nand_driver = { module_platform_driver(omap_nand_driver); -MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c index 09440ed4652e..11bd90e3f18c 100644 --- a/drivers/mtd/nand/raw/pl35x-nand-controller.c +++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c @@ -1137,7 +1137,7 @@ static int pl35x_nand_probe(struct platform_device *pdev) struct device *smc_dev = pdev->dev.parent; struct amba_device *smc_amba = to_amba_device(smc_dev); struct pl35x_nandc *nfc; - u32 ret; + int ret; nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL); if (!nfc) @@ -1193,6 +1193,5 @@ static struct platform_driver pl35x_nandc_driver = { module_platform_driver(pl35x_nandc_driver); MODULE_AUTHOR("Xilinx, Inc."); -MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME); MODULE_DESCRIPTION("ARM PL35X NAND controller driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index c5d7cd8a6cab..9444ba02696d 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -1505,4 +1505,3 @@ module_platform_driver(rk_nfc_driver); MODULE_LICENSE("Dual MIT/GPL"); MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>"); MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver"); -MODULE_ALIAS("platform:rockchip-nand-controller"); diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c deleted file mode 100644 index 229f2e87d56e..000000000000 --- a/drivers/mtd/nand/raw/s3c2410.c +++ /dev/null @@ -1,1230 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright © 2004-2008 Simtec Electronics - * http://armlinux.simtec.co.uk/ - * Ben Dooks <ben@simtec.co.uk> - * - * Samsung S3C2410/S3C2440/S3C2412 NAND driver -*/ - -#define pr_fmt(fmt) "nand-s3c2410: " fmt - -#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG -#define DEBUG -#endif - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/io.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/clk.h> -#include <linux/cpufreq.h> -#include <linux/of.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/partitions.h> - -#include <linux/platform_data/mtd-nand-s3c2410.h> - -#define S3C2410_NFREG(x) (x) - -#define S3C2410_NFCONF S3C2410_NFREG(0x00) -#define S3C2410_NFCMD S3C2410_NFREG(0x04) -#define S3C2410_NFADDR S3C2410_NFREG(0x08) -#define S3C2410_NFDATA S3C2410_NFREG(0x0C) -#define S3C2410_NFSTAT S3C2410_NFREG(0x10) -#define S3C2410_NFECC S3C2410_NFREG(0x14) -#define S3C2440_NFCONT S3C2410_NFREG(0x04) -#define S3C2440_NFCMD S3C2410_NFREG(0x08) -#define S3C2440_NFADDR S3C2410_NFREG(0x0C) -#define S3C2440_NFDATA S3C2410_NFREG(0x10) -#define S3C2440_NFSTAT S3C2410_NFREG(0x20) -#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C) -#define S3C2412_NFSTAT S3C2410_NFREG(0x28) -#define S3C2412_NFMECC0 S3C2410_NFREG(0x34) -#define S3C2410_NFCONF_EN (1<<15) -#define S3C2410_NFCONF_INITECC (1<<12) -#define S3C2410_NFCONF_nFCE (1<<11) -#define S3C2410_NFCONF_TACLS(x) ((x)<<8) -#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4) -#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0) -#define S3C2410_NFSTAT_BUSY (1<<0) -#define S3C2440_NFCONF_TACLS(x) ((x)<<12) -#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8) -#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4) -#define S3C2440_NFCONT_INITECC (1<<4) -#define S3C2440_NFCONT_nFCE (1<<1) -#define S3C2440_NFCONT_ENABLE (1<<0) -#define S3C2440_NFSTAT_READY (1<<0) -#define S3C2412_NFCONF_NANDBOOT (1<<31) -#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5) -#define S3C2412_NFCONT_nFCE0 (1<<1) -#define S3C2412_NFSTAT_READY (1<<0) - -/* new oob placement block for use with hardware ecc generation - */ -static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) -{ - if (section) - return -ERANGE; - - oobregion->offset = 0; - oobregion->length = 3; - - return 0; -} - -static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) -{ - if (section) - return -ERANGE; - - oobregion->offset = 8; - oobregion->length = 8; - - return 0; -} - -static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = { - .ecc = s3c2410_ooblayout_ecc, - .free = s3c2410_ooblayout_free, -}; - -/* controller and mtd information */ - -struct s3c2410_nand_info; - -/** - * struct s3c2410_nand_mtd - driver MTD structure - * @chip: The NAND chip information. - * @set: The platform information supplied for this set of NAND chips. - * @info: Link back to the hardware information. -*/ -struct s3c2410_nand_mtd { - struct nand_chip chip; - struct s3c2410_nand_set *set; - struct s3c2410_nand_info *info; -}; - -enum s3c_cpu_type { - TYPE_S3C2410, - TYPE_S3C2412, - TYPE_S3C2440, -}; - -enum s3c_nand_clk_state { - CLOCK_DISABLE = 0, - CLOCK_ENABLE, - CLOCK_SUSPEND, -}; - -/* overview of the s3c2410 nand state */ - -/** - * struct s3c2410_nand_info - NAND controller state. - * @controller: Base controller structure. - * @mtds: An array of MTD instances on this controller. - * @platform: The platform data for this board. - * @device: The platform device we bound to. - * @clk: The clock resource for this controller. - * @regs: The area mapped for the hardware registers. - * @sel_reg: Pointer to the register controlling the NAND selection. - * @sel_bit: The bit in @sel_reg to select the NAND chip. - * @mtd_count: The number of MTDs created from this controller. - * @save_sel: The contents of @sel_reg to be saved over suspend. - * @clk_rate: The clock rate from @clk. - * @clk_state: The current clock state. - * @cpu_type: The exact type of this controller. - */ -struct s3c2410_nand_info { - /* mtd info */ - struct nand_controller controller; - struct s3c2410_nand_mtd *mtds; - struct s3c2410_platform_nand *platform; - - /* device info */ - struct device *device; - struct clk *clk; - void __iomem *regs; - void __iomem *sel_reg; - int sel_bit; - int mtd_count; - unsigned long save_sel; - unsigned long clk_rate; - enum s3c_nand_clk_state clk_state; - - enum s3c_cpu_type cpu_type; -}; - -struct s3c24XX_nand_devtype_data { - enum s3c_cpu_type type; -}; - -static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = { - .type = TYPE_S3C2410, -}; - -static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = { - .type = TYPE_S3C2412, -}; - -static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = { - .type = TYPE_S3C2440, -}; - -/* conversion functions */ - -static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd) -{ - return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd, - chip); -} - -static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd) -{ - return s3c2410_nand_mtd_toours(mtd)->info; -} - -static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev) -{ - return platform_get_drvdata(dev); -} - -static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev) -{ - return dev_get_platdata(&dev->dev); -} - -static inline int allow_clk_suspend(struct s3c2410_nand_info *info) -{ -#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP - return 1; -#else - return 0; -#endif -} - -/** - * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock. - * @info: The controller instance. - * @new_state: State to which clock should be set. - */ -static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info, - enum s3c_nand_clk_state new_state) -{ - if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND) - return; - - if (info->clk_state == CLOCK_ENABLE) { - if (new_state != CLOCK_ENABLE) - clk_disable_unprepare(info->clk); - } else { - if (new_state == CLOCK_ENABLE) - clk_prepare_enable(info->clk); - } - - info->clk_state = new_state; -} - -/* timing calculations */ - -#define NS_IN_KHZ 1000000 - -/** - * s3c_nand_calc_rate - calculate timing data. - * @wanted: The cycle time in nanoseconds. - * @clk: The clock rate in kHz. - * @max: The maximum divider value. - * - * Calculate the timing value from the given parameters. - */ -static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) -{ - int result; - - result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ); - - pr_debug("result %d from %ld, %d\n", result, clk, wanted); - - if (result > max) { - pr_err("%d ns is too big for current clock rate %ld\n", - wanted, clk); - return -1; - } - - if (result < 1) - result = 1; - - return result; -} - -#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) - -/* controller setup */ - -/** - * s3c2410_nand_setrate - setup controller timing information. - * @info: The controller instance. - * - * Given the information supplied by the platform, calculate and set - * the necessary timing registers in the hardware to generate the - * necessary timing cycles to the hardware. - */ -static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) -{ - struct s3c2410_platform_nand *plat = info->platform; - int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; - int tacls, twrph0, twrph1; - unsigned long clkrate = clk_get_rate(info->clk); - unsigned long set, cfg, mask; - unsigned long flags; - - /* calculate the timing information for the controller */ - - info->clk_rate = clkrate; - clkrate /= 1000; /* turn clock into kHz for ease of use */ - - if (plat != NULL) { - tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max); - twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8); - twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8); - } else { - /* default timings */ - tacls = tacls_max; - twrph0 = 8; - twrph1 = 8; - } - - if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { - dev_err(info->device, "cannot get suitable timings\n"); - return -EINVAL; - } - - dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", - tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), - twrph1, to_ns(twrph1, clkrate)); - - switch (info->cpu_type) { - case TYPE_S3C2410: - mask = (S3C2410_NFCONF_TACLS(3) | - S3C2410_NFCONF_TWRPH0(7) | - S3C2410_NFCONF_TWRPH1(7)); - set = S3C2410_NFCONF_EN; - set |= S3C2410_NFCONF_TACLS(tacls - 1); - set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); - set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); - break; - - case TYPE_S3C2440: - case TYPE_S3C2412: - mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) | - S3C2440_NFCONF_TWRPH0(7) | - S3C2440_NFCONF_TWRPH1(7)); - - set = S3C2440_NFCONF_TACLS(tacls - 1); - set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); - set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); - break; - - default: - BUG(); - } - - local_irq_save(flags); - - cfg = readl(info->regs + S3C2410_NFCONF); - cfg &= ~mask; - cfg |= set; - writel(cfg, info->regs + S3C2410_NFCONF); - - local_irq_restore(flags); - - dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg); - - return 0; -} - -/** - * s3c2410_nand_inithw - basic hardware initialisation - * @info: The hardware state. - * - * Do the basic initialisation of the hardware, using s3c2410_nand_setrate() - * to setup the hardware access speeds and set the controller to be enabled. -*/ -static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) -{ - int ret; - - ret = s3c2410_nand_setrate(info); - if (ret < 0) - return ret; - - switch (info->cpu_type) { - case TYPE_S3C2410: - default: - break; - - case TYPE_S3C2440: - case TYPE_S3C2412: - /* enable the controller and de-assert nFCE */ - - writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); - } - - return 0; -} - -/** - * s3c2410_nand_select_chip - select the given nand chip - * @this: NAND chip object. - * @chip: The chip number. - * - * This is called by the MTD layer to either select a given chip for the - * @mtd instance, or to indicate that the access has finished and the - * chip can be de-selected. - * - * The routine ensures that the nFCE line is correctly setup, and any - * platform specific selection code is called to route nFCE to the specific - * chip. - */ -static void s3c2410_nand_select_chip(struct nand_chip *this, int chip) -{ - struct s3c2410_nand_info *info; - struct s3c2410_nand_mtd *nmtd; - unsigned long cur; - - nmtd = nand_get_controller_data(this); - info = nmtd->info; - - if (chip != -1) - s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); - - cur = readl(info->sel_reg); - - if (chip == -1) { - cur |= info->sel_bit; - } else { - if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { - dev_err(info->device, "invalid chip %d\n", chip); - return; - } - - if (info->platform != NULL) { - if (info->platform->select_chip != NULL) - (info->platform->select_chip) (nmtd->set, chip); - } - - cur &= ~info->sel_bit; - } - - writel(cur, info->sel_reg); - - if (chip == -1) - s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); -} - -/* s3c2410_nand_hwcontrol - * - * Issue command and address cycles to the chip -*/ - -static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd, - unsigned int ctrl) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, info->regs + S3C2410_NFCMD); - else - writeb(cmd, info->regs + S3C2410_NFADDR); -} - -/* command and control functions */ - -static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd, - unsigned int ctrl) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, info->regs + S3C2440_NFCMD); - else - writeb(cmd, info->regs + S3C2440_NFADDR); -} - -/* s3c2410_nand_devready() - * - * returns 0 if the nand is busy, 1 if it is ready -*/ - -static int s3c2410_nand_devready(struct nand_chip *chip) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; -} - -static int s3c2440_nand_devready(struct nand_chip *chip) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; -} - -static int s3c2412_nand_devready(struct nand_chip *chip) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY; -} - -/* ECC handling functions */ - -static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat, - u_char *read_ecc, u_char *calc_ecc) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - unsigned int diff0, diff1, diff2; - unsigned int bit, byte; - - pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc); - - diff0 = read_ecc[0] ^ calc_ecc[0]; - diff1 = read_ecc[1] ^ calc_ecc[1]; - diff2 = read_ecc[2] ^ calc_ecc[2]; - - pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n", - __func__, 3, read_ecc, 3, calc_ecc, - diff0, diff1, diff2); - - if (diff0 == 0 && diff1 == 0 && diff2 == 0) - return 0; /* ECC is ok */ - - /* sometimes people do not think about using the ECC, so check - * to see if we have an 0xff,0xff,0xff read ECC and then ignore - * the error, on the assumption that this is an un-eccd page. - */ - if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff - && info->platform->ignore_unset_ecc) - return 0; - - /* Can we correct this ECC (ie, one row and column change). - * Note, this is similar to the 256 error code on smartmedia */ - - if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 && - ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 && - ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { - /* calculate the bit position of the error */ - - bit = ((diff2 >> 3) & 1) | - ((diff2 >> 4) & 2) | - ((diff2 >> 5) & 4); - - /* calculate the byte position of the error */ - - byte = ((diff2 << 7) & 0x100) | - ((diff1 << 0) & 0x80) | - ((diff1 << 1) & 0x40) | - ((diff1 << 2) & 0x20) | - ((diff1 << 3) & 0x10) | - ((diff0 >> 4) & 0x08) | - ((diff0 >> 3) & 0x04) | - ((diff0 >> 2) & 0x02) | - ((diff0 >> 1) & 0x01); - - dev_dbg(info->device, "correcting error bit %d, byte %d\n", - bit, byte); - - dat[byte] ^= (1 << bit); - return 1; - } - - /* if there is only one bit difference in the ECC, then - * one of only a row or column parity has changed, which - * means the error is most probably in the ECC itself */ - - diff0 |= (diff1 << 8); - diff0 |= (diff2 << 16); - - /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */ - if ((diff0 & (diff0 - 1)) == 0) - return 1; - - return -1; -} - -/* ECC functions - * - * These allow the s3c2410 and s3c2440 to use the controller's ECC - * generator block to ECC the data as it passes through] -*/ - -static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode) -{ - struct s3c2410_nand_info *info; - unsigned long ctrl; - - info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip)); - ctrl = readl(info->regs + S3C2410_NFCONF); - ctrl |= S3C2410_NFCONF_INITECC; - writel(ctrl, info->regs + S3C2410_NFCONF); -} - -static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode) -{ - struct s3c2410_nand_info *info; - unsigned long ctrl; - - info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip)); - ctrl = readl(info->regs + S3C2440_NFCONT); - writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, - info->regs + S3C2440_NFCONT); -} - -static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode) -{ - struct s3c2410_nand_info *info; - unsigned long ctrl; - - info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip)); - ctrl = readl(info->regs + S3C2440_NFCONT); - writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); -} - -static int s3c2410_nand_calculate_ecc(struct nand_chip *chip, - const u_char *dat, u_char *ecc_code) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - - ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0); - ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); - ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); - - pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); - - return 0; -} - -static int s3c2412_nand_calculate_ecc(struct nand_chip *chip, - const u_char *dat, u_char *ecc_code) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); - - ecc_code[0] = ecc; - ecc_code[1] = ecc >> 8; - ecc_code[2] = ecc >> 16; - - pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); - - return 0; -} - -static int s3c2440_nand_calculate_ecc(struct nand_chip *chip, - const u_char *dat, u_char *ecc_code) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); - - ecc_code[0] = ecc; - ecc_code[1] = ecc >> 8; - ecc_code[2] = ecc >> 16; - - pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff); - - return 0; -} - -/* over-ride the standard functions for a little more speed. We can - * use read/write block to move the data buffers to/from the controller -*/ - -static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len) -{ - readsb(this->legacy.IO_ADDR_R, buf, len); -} - -static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len) -{ - struct mtd_info *mtd = nand_to_mtd(this); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - - readsl(info->regs + S3C2440_NFDATA, buf, len >> 2); - - /* cleanup if we've got less than a word to do */ - if (len & 3) { - buf += len & ~3; - - for (; len & 3; len--) - *buf++ = readb(info->regs + S3C2440_NFDATA); - } -} - -static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf, - int len) -{ - writesb(this->legacy.IO_ADDR_W, buf, len); -} - -static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf, - int len) -{ - struct mtd_info *mtd = nand_to_mtd(this); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - - writesl(info->regs + S3C2440_NFDATA, buf, len >> 2); - - /* cleanup any fractional write */ - if (len & 3) { - buf += len & ~3; - - for (; len & 3; len--, buf++) - writeb(*buf, info->regs + S3C2440_NFDATA); - } -} - -/* device management functions */ - -static void s3c24xx_nand_remove(struct platform_device *pdev) -{ - struct s3c2410_nand_info *info = to_nand_info(pdev); - - if (info == NULL) - return; - - /* Release all our mtds and their partitions, then go through - * freeing the resources used - */ - - if (info->mtds != NULL) { - struct s3c2410_nand_mtd *ptr = info->mtds; - int mtdno; - - for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { - pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); - WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip))); - nand_cleanup(&ptr->chip); - } - } - - /* free the common resources */ - - if (!IS_ERR(info->clk)) - s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); -} - -static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, - struct s3c2410_nand_mtd *mtd, - struct s3c2410_nand_set *set) -{ - if (set) { - struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip); - - mtdinfo->name = set->name; - - return mtd_device_register(mtdinfo, set->partitions, - set->nr_partitions); - } - - return -ENODEV; -} - -static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline, - const struct nand_interface_config *conf) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - struct s3c2410_platform_nand *pdata = info->platform; - const struct nand_sdr_timings *timings; - int tacls; - - timings = nand_get_sdr_timings(conf); - if (IS_ERR(timings)) - return -ENOTSUPP; - - tacls = timings->tCLS_min - timings->tWP_min; - if (tacls < 0) - tacls = 0; - - pdata->tacls = DIV_ROUND_UP(tacls, 1000); - pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000); - pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000); - - return s3c2410_nand_setrate(info); -} - -/** - * s3c2410_nand_init_chip - initialise a single instance of an chip - * @info: The base NAND controller the chip is on. - * @nmtd: The new controller MTD instance to fill in. - * @set: The information passed from the board specific platform data. - * - * Initialise the given @nmtd from the information in @info and @set. This - * readies the structure for use with the MTD layer functions by ensuring - * all pointers are setup and the necessary control routines selected. - */ -static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, - struct s3c2410_nand_mtd *nmtd, - struct s3c2410_nand_set *set) -{ - struct device_node *np = info->device->of_node; - struct nand_chip *chip = &nmtd->chip; - void __iomem *regs = info->regs; - - nand_set_flash_node(chip, set->of_node); - - chip->legacy.write_buf = s3c2410_nand_write_buf; - chip->legacy.read_buf = s3c2410_nand_read_buf; - chip->legacy.select_chip = s3c2410_nand_select_chip; - chip->legacy.chip_delay = 50; - nand_set_controller_data(chip, nmtd); - chip->options = set->options; - chip->controller = &info->controller; - - /* - * let's keep behavior unchanged for legacy boards booting via pdata and - * auto-detect timings only when booting with a device tree. - */ - if (!np) - chip->options |= NAND_KEEP_TIMINGS; - - switch (info->cpu_type) { - case TYPE_S3C2410: - chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA; - info->sel_reg = regs + S3C2410_NFCONF; - info->sel_bit = S3C2410_NFCONF_nFCE; - chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol; - chip->legacy.dev_ready = s3c2410_nand_devready; - break; - - case TYPE_S3C2440: - chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA; - info->sel_reg = regs + S3C2440_NFCONT; - info->sel_bit = S3C2440_NFCONT_nFCE; - chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol; - chip->legacy.dev_ready = s3c2440_nand_devready; - chip->legacy.read_buf = s3c2440_nand_read_buf; - chip->legacy.write_buf = s3c2440_nand_write_buf; - break; - - case TYPE_S3C2412: - chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA; - info->sel_reg = regs + S3C2440_NFCONT; - info->sel_bit = S3C2412_NFCONT_nFCE0; - chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol; - chip->legacy.dev_ready = s3c2412_nand_devready; - - if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT) - dev_info(info->device, "System booted from NAND\n"); - - break; - } - - chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W; - - nmtd->info = info; - nmtd->set = set; - - chip->ecc.engine_type = info->platform->engine_type; - - /* - * If you use u-boot BBT creation code, specifying this flag will - * let the kernel fish out the BBT from the NAND. - */ - if (set->flash_bbt) - chip->bbt_options |= NAND_BBT_USE_FLASH; -} - -/** - * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan - * @chip: The NAND chip - * - * This hook is called by the core after the identification of the NAND chip, - * once the relevant per-chip information is up to date.. This call ensure that - * we update the internal state accordingly. - * - * The internal state is currently limited to the ECC state information. -*/ -static int s3c2410_nand_attach_chip(struct nand_chip *chip) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); - - switch (chip->ecc.engine_type) { - - case NAND_ECC_ENGINE_TYPE_NONE: - dev_info(info->device, "ECC disabled\n"); - break; - - case NAND_ECC_ENGINE_TYPE_SOFT: - /* - * This driver expects Hamming based ECC when engine_type is set - * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to - * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field - * to s3c2410_platform_nand. - */ - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; - dev_info(info->device, "soft ECC\n"); - break; - - case NAND_ECC_ENGINE_TYPE_ON_HOST: - chip->ecc.calculate = s3c2410_nand_calculate_ecc; - chip->ecc.correct = s3c2410_nand_correct_data; - chip->ecc.strength = 1; - - switch (info->cpu_type) { - case TYPE_S3C2410: - chip->ecc.hwctl = s3c2410_nand_enable_hwecc; - chip->ecc.calculate = s3c2410_nand_calculate_ecc; - break; - - case TYPE_S3C2412: - chip->ecc.hwctl = s3c2412_nand_enable_hwecc; - chip->ecc.calculate = s3c2412_nand_calculate_ecc; - break; - - case TYPE_S3C2440: - chip->ecc.hwctl = s3c2440_nand_enable_hwecc; - chip->ecc.calculate = s3c2440_nand_calculate_ecc; - break; - } - - dev_dbg(info->device, "chip %p => page shift %d\n", - chip, chip->page_shift); - - /* change the behaviour depending on whether we are using - * the large or small page nand device */ - if (chip->page_shift > 10) { - chip->ecc.size = 256; - chip->ecc.bytes = 3; - } else { - chip->ecc.size = 512; - chip->ecc.bytes = 3; - mtd_set_ooblayout(nand_to_mtd(chip), - &s3c2410_ooblayout_ops); - } - - dev_info(info->device, "hardware ECC\n"); - break; - - default: - dev_err(info->device, "invalid ECC mode!\n"); - return -EINVAL; - } - - if (chip->bbt_options & NAND_BBT_USE_FLASH) - chip->options |= NAND_SKIP_BBTSCAN; - - return 0; -} - -static const struct nand_controller_ops s3c24xx_nand_controller_ops = { - .attach_chip = s3c2410_nand_attach_chip, - .setup_interface = s3c2410_nand_setup_interface, -}; - -static const struct of_device_id s3c24xx_nand_dt_ids[] = { - { - .compatible = "samsung,s3c2410-nand", - .data = &s3c2410_nand_devtype_data, - }, { - /* also compatible with s3c6400 */ - .compatible = "samsung,s3c2412-nand", - .data = &s3c2412_nand_devtype_data, - }, { - .compatible = "samsung,s3c2440-nand", - .data = &s3c2440_nand_devtype_data, - }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids); - -static int s3c24xx_nand_probe_dt(struct platform_device *pdev) -{ - const struct s3c24XX_nand_devtype_data *devtype_data; - struct s3c2410_platform_nand *pdata; - struct s3c2410_nand_info *info = platform_get_drvdata(pdev); - struct device_node *np = pdev->dev.of_node, *child; - struct s3c2410_nand_set *sets; - - devtype_data = of_device_get_match_data(&pdev->dev); - if (!devtype_data) - return -ENODEV; - - info->cpu_type = devtype_data->type; - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - pdev->dev.platform_data = pdata; - - pdata->nr_sets = of_get_child_count(np); - if (!pdata->nr_sets) - return 0; - - sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets), - GFP_KERNEL); - if (!sets) - return -ENOMEM; - - pdata->sets = sets; - - for_each_available_child_of_node(np, child) { - sets->name = (char *)child->name; - sets->of_node = child; - sets->nr_chips = 1; - - of_node_get(child); - - sets++; - } - - return 0; -} - -static int s3c24xx_nand_probe_pdata(struct platform_device *pdev) -{ - struct s3c2410_nand_info *info = platform_get_drvdata(pdev); - - info->cpu_type = platform_get_device_id(pdev)->driver_data; - - return 0; -} - -/* s3c24xx_nand_probe - * - * called by device layer when it finds a device matching - * one our driver can handled. This code checks to see if - * it can allocate all necessary resources then calls the - * nand layer to look for devices -*/ -static int s3c24xx_nand_probe(struct platform_device *pdev) -{ - struct s3c2410_platform_nand *plat; - struct s3c2410_nand_info *info; - struct s3c2410_nand_mtd *nmtd; - struct s3c2410_nand_set *sets; - struct resource *res; - int err = 0; - int size; - int nr_sets; - int setno; - - info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); - if (info == NULL) { - err = -ENOMEM; - goto exit_error; - } - - platform_set_drvdata(pdev, info); - - nand_controller_init(&info->controller); - info->controller.ops = &s3c24xx_nand_controller_ops; - - /* get the clock source and enable it */ - - info->clk = devm_clk_get(&pdev->dev, "nand"); - if (IS_ERR(info->clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - err = -ENOENT; - goto exit_error; - } - - s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); - - if (pdev->dev.of_node) - err = s3c24xx_nand_probe_dt(pdev); - else - err = s3c24xx_nand_probe_pdata(pdev); - - if (err) - goto exit_error; - - plat = to_nand_plat(pdev); - - /* allocate and map the resource */ - - /* currently we assume we have the one resource */ - res = pdev->resource; - size = resource_size(res); - - info->device = &pdev->dev; - info->platform = plat; - - info->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(info->regs)) { - err = PTR_ERR(info->regs); - goto exit_error; - } - - dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs); - - if (!plat->sets || plat->nr_sets < 1) { - err = -EINVAL; - goto exit_error; - } - - sets = plat->sets; - nr_sets = plat->nr_sets; - - info->mtd_count = nr_sets; - - /* allocate our information */ - - size = nr_sets * sizeof(*info->mtds); - info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (info->mtds == NULL) { - err = -ENOMEM; - goto exit_error; - } - - /* initialise all possible chips */ - - nmtd = info->mtds; - - for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) { - struct mtd_info *mtd = nand_to_mtd(&nmtd->chip); - - pr_debug("initialising set %d (%p, info %p)\n", - setno, nmtd, info); - - mtd->dev.parent = &pdev->dev; - s3c2410_nand_init_chip(info, nmtd, sets); - - err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1); - if (err) - goto exit_error; - - s3c2410_nand_add_partition(info, nmtd, sets); - } - - /* initialise the hardware */ - err = s3c2410_nand_inithw(info); - if (err != 0) - goto exit_error; - - if (allow_clk_suspend(info)) { - dev_info(&pdev->dev, "clock idle support enabled\n"); - s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); - } - - return 0; - - exit_error: - s3c24xx_nand_remove(pdev); - - if (err == 0) - err = -EINVAL; - return err; -} - -/* PM Support */ -#ifdef CONFIG_PM - -static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm) -{ - struct s3c2410_nand_info *info = platform_get_drvdata(dev); - - if (info) { - info->save_sel = readl(info->sel_reg); - - /* For the moment, we must ensure nFCE is high during - * the time we are suspended. This really should be - * handled by suspending the MTDs we are using, but - * that is currently not the case. */ - - writel(info->save_sel | info->sel_bit, info->sel_reg); - - s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); - } - - return 0; -} - -static int s3c24xx_nand_resume(struct platform_device *dev) -{ - struct s3c2410_nand_info *info = platform_get_drvdata(dev); - unsigned long sel; - - if (info) { - s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); - s3c2410_nand_inithw(info); - - /* Restore the state of the nFCE line. */ - - sel = readl(info->sel_reg); - sel &= ~info->sel_bit; - sel |= info->save_sel & info->sel_bit; - writel(sel, info->sel_reg); - - s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); - } - - return 0; -} - -#else -#define s3c24xx_nand_suspend NULL -#define s3c24xx_nand_resume NULL -#endif - -/* driver device registration */ - -static const struct platform_device_id s3c24xx_driver_ids[] = { - { - .name = "s3c2410-nand", - .driver_data = TYPE_S3C2410, - }, { - .name = "s3c2440-nand", - .driver_data = TYPE_S3C2440, - }, { - .name = "s3c2412-nand", - .driver_data = TYPE_S3C2412, - }, { - .name = "s3c6400-nand", - .driver_data = TYPE_S3C2412, /* compatible with 2412 */ - }, - { } -}; - -MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); - -static struct platform_driver s3c24xx_nand_driver = { - .probe = s3c24xx_nand_probe, - .remove = s3c24xx_nand_remove, - .suspend = s3c24xx_nand_suspend, - .resume = s3c24xx_nand_resume, - .id_table = s3c24xx_driver_ids, - .driver = { - .name = "s3c24xx-nand", - .of_match_table = s3c24xx_nand_dt_ids, - }, -}; - -module_platform_driver(s3c24xx_nand_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); -MODULE_DESCRIPTION("S3C24XX MTD NAND driver"); diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index d957327fb4fa..c08d6b176372 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -2158,7 +2158,6 @@ static struct platform_driver stm32_fmc2_nfc_driver = { }; module_platform_driver(stm32_fmc2_nfc_driver); -MODULE_ALIAS("platform:stm32_fmc2_nfc"); MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>"); MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 162cd5f4f234..f6a8e8ae819d 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2205,4 +2205,3 @@ module_platform_driver(sunxi_nfc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Boris BREZILLON"); MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver"); -MODULE_ALIAS("platform:sunxi_nand"); diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile index 258da42451a4..6d3d203df048 100644 --- a/drivers/mtd/nand/spi/Makefile +++ b/drivers/mtd/nand/spi/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 spinand-objs := core.o otp.o -spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o +spinand-objs += alliancememory.o ato.o esmt.o fmsh.o foresee.o gigadevice.o macronix.o spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o obj-$(CONFIG_MTD_SPI_NAND) += spinand.o diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index b0898990b2a5..f92133b8e1a6 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -430,8 +430,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, * Dirmap accesses are allowed to toggle the CS. * Toggling the CS during a continuous read is forbidden. */ - if (nbytes && req->continuous) - return -EIO; + if (nbytes && req->continuous) { + /* + * Spi controller with broken support of continuous + * reading was detected. Disable future use of + * continuous reading and return -EAGAIN to retry + * reading within regular mode. + */ + spinand->cont_read_possible = false; + return -EAGAIN; + } } if (req->datalen) @@ -899,10 +907,19 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, old_stats = mtd->ecc_stats; - if (spinand_use_cont_read(mtd, from, ops)) + if (spinand_use_cont_read(mtd, from, ops)) { ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips); - else + if (ret == -EAGAIN && !spinand->cont_read_possible) { + /* + * Spi controller with broken support of continuous + * reading was detected (see spinand_read_from_cache_op()), + * repeat reading in regular mode. + */ + ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); + } + } else { ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); + } if (ops->stats) { ops->stats->uncorrectable_errors += @@ -1093,22 +1110,50 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) return ret; } +static struct spi_mem_dirmap_desc *spinand_create_rdesc( + struct spinand_device *spinand, + struct spi_mem_dirmap_info *info) +{ + struct nand_device *nand = spinand_to_nand(spinand); + struct spi_mem_dirmap_desc *desc = NULL; + + if (spinand->cont_read_possible) { + /* + * spi controller may return an error if info->length is + * too large + */ + info->length = nanddev_eraseblock_size(nand); + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, + spinand->spimem, info); + } + + if (IS_ERR_OR_NULL(desc)) { + /* + * continuous reading is not supported by flash or + * its spi controller, use regular reading + */ + spinand->cont_read_possible = false; + + info->length = nanddev_page_size(nand) + + nanddev_per_page_oobsize(nand); + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, + spinand->spimem, info); + } + + return desc; +} + static int spinand_create_dirmap(struct spinand_device *spinand, unsigned int plane) { struct nand_device *nand = spinand_to_nand(spinand); - struct spi_mem_dirmap_info info = { - .length = nanddev_page_size(nand) + - nanddev_per_page_oobsize(nand), - }; + struct spi_mem_dirmap_info info = { 0 }; struct spi_mem_dirmap_desc *desc; - if (spinand->cont_read_possible) - info.length = nanddev_eraseblock_size(nand); - /* The plane number is passed in MSB just above the column address */ info.offset = plane << fls(nand->memorg.pagesize); + info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); info.op_tmpl = *spinand->op_templates.update_cache; desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, spinand->spimem, &info); @@ -1118,8 +1163,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand, spinand->dirmaps[plane].wdesc = desc; info.op_tmpl = *spinand->op_templates.read_cache; - desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, - spinand->spimem, &info); + desc = spinand_create_rdesc(spinand, &info); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -1132,6 +1176,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand, return 0; } + info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); info.op_tmpl = *spinand->op_templates.update_cache; info.op_tmpl.data.ecc = true; desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, @@ -1143,8 +1188,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand, info.op_tmpl = *spinand->op_templates.read_cache; info.op_tmpl.data.ecc = true; - desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, - spinand->spimem, &info); + desc = spinand_create_rdesc(spinand, &info); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -1184,6 +1228,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = { &alliancememory_spinand_manufacturer, &ato_spinand_manufacturer, &esmt_c8_spinand_manufacturer, + &fmsh_spinand_manufacturer, &foresee_spinand_manufacturer, &gigadevice_spinand_manufacturer, ¯onix_spinand_manufacturer, diff --git a/drivers/mtd/nand/spi/fmsh.c b/drivers/mtd/nand/spi/fmsh.c new file mode 100644 index 000000000000..8b2097bfc771 --- /dev/null +++ b/drivers/mtd/nand/spi/fmsh.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd. + * + * Author: Dingqiang Lin <jon.lin@rock-chips.com> + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/mtd/spinand.h> + +#define SPINAND_MFR_FMSH 0xA1 + +static SPINAND_OP_VARIANTS(read_cache_variants, + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); + +static SPINAND_OP_VARIANTS(write_cache_variants, + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(update_cache_variants, + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); + +static int fm25s01a_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + return -ERANGE; +} + +static int fm25s01a_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + region->offset = 2; + region->length = 62; + + return 0; +} + +static const struct mtd_ooblayout_ops fm25s01a_ooblayout = { + .ecc = fm25s01a_ooblayout_ecc, + .free = fm25s01a_ooblayout_free, +}; + +static const struct spinand_info fmsh_spinand_table[] = { + SPINAND_INFO("FM25S01A", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)), +}; + +static const struct spinand_manufacturer_ops fmsh_spinand_manuf_ops = { +}; + +const struct spinand_manufacturer fmsh_spinand_manufacturer = { + .id = SPINAND_MFR_FMSH, + .name = "Fudan Micro", + .chips = fmsh_spinand_table, + .nchips = ARRAY_SIZE(fmsh_spinand_table), + .ops = &fmsh_spinand_manuf_ops, +}; diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index 93e40431dbe2..72ad36c9a126 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -4,6 +4,7 @@ * Chuanhong Guo <gch981213@gmail.com> */ +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/mtd/spinand.h> @@ -23,6 +24,18 @@ #define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4) #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) +/* Feature bit definitions */ +#define GD_FEATURE_NR BIT(3) /* Normal Read(1=normal,0=continuous) */ +#define GD_FEATURE_CRDC BIT(2) /* Continuous Read Dummy */ + +/* ECC status extraction helpers */ +#define GD_ECCSR_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr) +#define GD_ECCSR_ACCUMULATED(eccsr) FIELD_GET(GENMASK(7, 4), eccsr) + +struct gigadevice_priv { + bool continuous_read; +}; + static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0), SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), @@ -63,6 +76,74 @@ static SPINAND_OP_VARIANTS(update_cache_variants, SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); +static int gd5fxgm9_get_eccsr(struct spinand_device *spinand, u8 *eccsr) +{ + struct gigadevice_priv *priv = spinand->priv; + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_DUMMY(1, 1), + SPI_MEM_OP_DATA_IN(1, eccsr, 1)); + int ret; + + ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + if (priv->continuous_read) + *eccsr = GD_ECCSR_ACCUMULATED(*eccsr); + else + *eccsr = GD_ECCSR_LAST_PAGE(*eccsr); + + return 0; +} + +static int gd5fxgm9_ecc_get_status(struct spinand_device *spinand, u8 status) +{ + struct nand_device *nand = spinand_to_nand(spinand); + u8 eccsr; + int ret; + + switch (status & STATUS_ECC_MASK) { + case STATUS_ECC_NO_BITFLIPS: + return 0; + + case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: + ret = gd5fxgm9_get_eccsr(spinand, spinand->scratchbuf); + if (ret) + return nanddev_get_ecc_conf(nand)->strength; + + eccsr = *spinand->scratchbuf; + if (WARN_ON(!eccsr || eccsr > nanddev_get_ecc_conf(nand)->strength)) + return nanddev_get_ecc_conf(nand)->strength; + + return eccsr; + + case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: + return 8; + + case STATUS_ECC_UNCOR_ERROR: + return -EBADMSG; + + default: + return -EINVAL; + } +} + +static int gd5fxgm9_set_continuous_read(struct spinand_device *spinand, bool enable) +{ + struct gigadevice_priv *priv = spinand->priv; + int ret; + + ret = spinand_upd_cfg(spinand, GD_FEATURE_NR, + enable ? 0 : GD_FEATURE_NR); + if (ret) + return ret; + + priv->continuous_read = enable; + + return 0; +} + static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { @@ -542,7 +623,8 @@ static const struct spinand_info gigadevice_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, - gd5fxgq4uexxg_ecc_get_status)), + gd5fxgm9_ecc_get_status), + SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)), SPINAND_INFO("GD5F1GM9RExxG", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), @@ -552,10 +634,31 @@ static const struct spinand_info gigadevice_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, - gd5fxgq4uexxg_ecc_get_status)), + gd5fxgm9_ecc_get_status), + SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)), }; +static int gd5fxgm9_spinand_init(struct spinand_device *spinand) +{ + struct gigadevice_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spinand->priv = priv; + + return 0; +} + +static void gd5fxgm9_spinand_cleanup(struct spinand_device *spinand) +{ + kfree(spinand->priv); +} + static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { + .init = gd5fxgm9_spinand_init, + .cleanup = gd5fxgm9_spinand_cleanup, }; const struct spinand_manufacturer gigadevice_spinand_manufacturer = { diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index c546f8c5f24d..be26cc67a1c4 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c @@ -190,8 +190,8 @@ static int scan_header(struct partition *part) if (!part->blocks) goto err; - part->sector_map = vmalloc(array_size(sizeof(u_long), - part->sector_count)); + part->sector_map = vmalloc_array(part->sector_count, + sizeof(u_long)); if (!part->sector_map) goto err; diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index ac4b960101cc..20ea80450f22 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2014,6 +2014,76 @@ static const struct flash_info *spi_nor_detect(struct spi_nor *nor) return info; } +/* + * On Octal DTR capable flashes, reads cannot start or end at an odd + * address in Octal DTR mode. Extra bytes need to be read at the start + * or end to make sure both the start address and length remain even. + */ +static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + u_char *tmp_buf; + size_t tmp_len; + loff_t start, end; + int ret, bytes_read; + + if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2)) + return spi_nor_read_data(nor, from, len, buf); + else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE) + return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE), + buf); + + tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!tmp_buf) + return -ENOMEM; + + start = round_down(from, 2); + end = round_up(from + len, 2); + + /* + * Avoid allocating too much memory. The requested read length might be + * quite large. Allocating a buffer just as large (slightly bigger, in + * fact) would put unnecessary memory pressure on the system. + * + * For example if the read is from 3 to 1M, then this will read from 2 + * to 4098. The reads from 4098 to 1M will then not need a temporary + * buffer so they can proceed as normal. + */ + tmp_len = min_t(size_t, end - start, PAGE_SIZE); + + ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf); + if (ret == 0) { + ret = -EIO; + goto out; + } + if (ret < 0) + goto out; + + /* + * More bytes are read than actually requested, but that number can't be + * reported to the calling function or it will confuse its calculations. + * Calculate how many of the _requested_ bytes were read. + */ + bytes_read = ret; + + if (from != start) + ret -= from - start; + + /* + * Only account for extra bytes at the end if they were actually read. + * For example, if the total length was truncated because of temporary + * buffer size limit then the adjustment for the extra bytes at the end + * is not needed. + */ + if (start + bytes_read == end) + ret -= end - (from + len); + + memcpy(buf, tmp_buf + (from - start), ret); +out: + kfree(tmp_buf); + return ret; +} + static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { @@ -2031,7 +2101,11 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, while (len) { loff_t addr = from; - ret = spi_nor_read_data(nor, addr, len, buf); + if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) + ret = spi_nor_octal_dtr_read(nor, addr, len, buf); + else + ret = spi_nor_read_data(nor, addr, len, buf); + if (ret == 0) { /* We shouldn't see 0-length reads */ ret = -EIO; @@ -2055,6 +2129,68 @@ read_err: } /* + * On Octal DTR capable flashes, writes cannot start or end at an odd address + * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to + * make sure the start address and end address are even. 0xff is used because + * on NOR flashes a program operation can only flip bits from 1 to 0, not the + * other way round. 0 to 1 flip needs to happen via erases. + */ +static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len, + const u8 *buf) +{ + u8 *tmp_buf; + size_t bytes_written; + loff_t start, end; + int ret; + + if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2)) + return spi_nor_write_data(nor, to, len, buf); + + tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL); + if (!tmp_buf) + return -ENOMEM; + + memset(tmp_buf, 0xff, nor->params->page_size); + + start = round_down(to, 2); + end = round_up(to + len, 2); + + memcpy(tmp_buf + (to - start), buf, len); + + ret = spi_nor_write_data(nor, start, end - start, tmp_buf); + if (ret == 0) { + ret = -EIO; + goto out; + } + if (ret < 0) + goto out; + + /* + * More bytes are written than actually requested, but that number can't + * be reported to the calling function or it will confuse its + * calculations. Calculate how many of the _requested_ bytes were + * written. + */ + bytes_written = ret; + + if (to != start) + ret -= to - start; + + /* + * Only account for extra bytes at the end if they were actually + * written. For example, if for some reason the controller could only + * complete a partial write then the adjustment for the extra bytes at + * the end is not needed. + */ + if (start + bytes_written == end) + ret -= end - (to + len); + +out: + kfree(tmp_buf); + return ret; +} + +/* * Write an address range to the nor chip. Data must be written in * FLASH_PAGESIZE chunks. The address range may be any size provided * it is within the physical boundaries. @@ -2090,7 +2226,12 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, goto write_err; } - ret = spi_nor_write_data(nor, addr, page_remain, buf + i); + if (nor->write_proto == SNOR_PROTO_8_8_8_DTR) + ret = spi_nor_octal_dtr_write(nor, addr, page_remain, + buf + i); + else + ret = spi_nor_write_data(nor, addr, page_remain, + buf + i); spi_nor_unlock_device(nor); if (ret < 0) goto write_err; diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 288ef765a44e..75b0b2abc880 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/unaligned.h> +#include <asm/barrier.h> struct device_node; struct module; diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h index 4e694b1aabbd..e8201d1b7cf9 100644 --- a/include/linux/mtd/nand-qpic-common.h +++ b/include/linux/mtd/nand-qpic-common.h @@ -71,14 +71,10 @@ /* NAND_DEVn_CFG0 bits */ #define DISABLE_STATUS_AFTER_WRITE BIT(4) -#define CW_PER_PAGE 6 #define CW_PER_PAGE_MASK GENMASK(8, 6) -#define UD_SIZE_BYTES 9 #define UD_SIZE_BYTES_MASK GENMASK(18, 9) #define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19) -#define SPARE_SIZE_BYTES 23 #define SPARE_SIZE_BYTES_MASK GENMASK(26, 23) -#define NUM_ADDR_CYCLES 27 #define NUM_ADDR_CYCLES_MASK GENMASK(29, 27) #define STATUS_BFR_READ BIT(30) #define SET_RD_MODE_AFTER_STATUS BIT(31) @@ -86,26 +82,20 @@ /* NAND_DEVn_CFG0 bits */ #define DEV0_CFG1_ECC_DISABLE BIT(0) #define WIDE_FLASH BIT(1) -#define NAND_RECOVERY_CYCLES 2 #define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2) #define CS_ACTIVE_BSY BIT(5) -#define BAD_BLOCK_BYTE_NUM 6 #define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6) #define BAD_BLOCK_IN_SPARE_AREA BIT(16) -#define WR_RD_BSY_GAP 17 #define WR_RD_BSY_GAP_MASK GENMASK(22, 17) #define ENABLE_BCH_ECC BIT(27) /* NAND_DEV0_ECC_CFG bits */ #define ECC_CFG_ECC_DISABLE BIT(0) #define ECC_SW_RESET BIT(1) -#define ECC_MODE 4 #define ECC_MODE_MASK GENMASK(5, 4) #define ECC_MODE_4BIT 0 #define ECC_MODE_8BIT 1 -#define ECC_PARITY_SIZE_BYTES_BCH 8 #define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8) -#define ECC_NUM_DATA_BYTES 16 #define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16) #define ECC_FORCE_CLK_OPEN BIT(30) @@ -120,7 +110,6 @@ #define SEQ_READ_START_VLD BIT(4) /* NAND_EBI2_ECC_BUF_CFG bits */ -#define NUM_STEPS 0 #define NUM_STEPS_MASK GENMASK(9, 0) /* NAND_ERASED_CW_DETECT_CFG bits */ @@ -141,11 +130,8 @@ #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) /* NAND_READ_LOCATION_n bits */ -#define READ_LOCATION_OFFSET 0 #define READ_LOCATION_OFFSET_MASK GENMASK(9, 0) -#define READ_LOCATION_SIZE 16 #define READ_LOCATION_SIZE_MASK GENMASK(25, 16) -#define READ_LOCATION_LAST 31 #define READ_LOCATION_LAST_MASK BIT(31) /* Version Mask */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 07486168d104..09c8c93e4dba 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1136,4 +1136,9 @@ static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len); +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int threshold); + #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index e84522e31301..d30bdc3fcfd7 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1519,11 +1519,6 @@ int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc); void rawnand_sw_bch_cleanup(struct nand_chip *chip); -int nand_check_erased_ecc_chunk(void *data, int datalen, - void *ecc, int ecclen, - void *extraoob, int extraooblen, - int threshold); - int nand_ecc_choose_conf(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail); diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 27a45bdab7ec..927c10d78769 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -355,6 +355,7 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer alliancememory_spinand_manufacturer; extern const struct spinand_manufacturer ato_spinand_manufacturer; extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer; +extern const struct spinand_manufacturer fmsh_spinand_manufacturer; extern const struct spinand_manufacturer foresee_spinand_manufacturer; extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h deleted file mode 100644 index 25390fc3e795..000000000000 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ /dev/null @@ -1,70 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2004 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * - * S3C2410 - NAND device controller platform_device info -*/ - -#ifndef __MTD_NAND_S3C2410_H -#define __MTD_NAND_S3C2410_H - -#include <linux/mtd/rawnand.h> - -/** - * struct s3c2410_nand_set - define a set of one or more nand chips - * @flash_bbt: Openmoko u-boot can create a Bad Block Table - * Setting this flag will allow the kernel to - * look for it at boot time and also skip the NAND - * scan. - * @options: Default value to set into 'struct nand_chip' options. - * @nr_chips: Number of chips in this set - * @nr_partitions: Number of partitions pointed to by @partitions - * @name: Name of set (optional) - * @nr_map: Map for low-layer logical to physical chip numbers (option) - * @partitions: The mtd partition list - * - * define a set of one or more nand chips registered with an unique mtd. Also - * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger - * a warning at boot time. - */ -struct s3c2410_nand_set { - unsigned int flash_bbt:1; - - unsigned int options; - int nr_chips; - int nr_partitions; - char *name; - int *nr_map; - struct mtd_partition *partitions; - struct device_node *of_node; -}; - -struct s3c2410_platform_nand { - /* timing information for controller, all times in nanoseconds */ - - int tacls; /* time for active CLE/ALE to nWE/nOE */ - int twrph0; /* active time for nWE/nOE */ - int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */ - - unsigned int ignore_unset_ecc:1; - - enum nand_ecc_engine_type engine_type; - - int nr_sets; - struct s3c2410_nand_set *sets; - - void (*select_chip)(struct s3c2410_nand_set *, - int chip); -}; - -/** - * s3c_nand_set_platdata() - register NAND platform data. - * @nand: The NAND platform data to register with s3c_device_nand. - * - * This function copies the given NAND platform data, @nand and registers - * it with the s3c_device_nand. This allows @nand to be __initdata. -*/ -extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand); - -#endif /*__MTD_NAND_S3C2410_H */ |