Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nand/for-6.18' into mtd/next

* Raw NAND:
- Add support for Loongson-2K1000 and Loongson-2K0500 NAND controllers,
including extra features, such as chip select and 6-byte NAND ID
reading support.
- Drop the s3c2410 driver.

* SPI NAND:
- Important SPI NAND continuous read improvements and fixes.
- Add support for FudanMicro FM25S01A.
- Add support for continuous reads in Gigadevice vendor driver.

* ECC:
- Add support for the Realtek ECC engine.

This PR comes with the usual amount of various miscellaneous fixes.

Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>

+2033 -2424
+53 -3
Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml
··· 4 4 $id: http://devicetree.org/schemas/mtd/loongson,ls1b-nand-controller.yaml# 5 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 6 6 7 - title: Loongson-1 NAND Controller 7 + title: Loongson NAND Controller 8 8 9 9 maintainers: 10 10 - Keguang Zhang <keguang.zhang@gmail.com> 11 + - Binbin Zhou <zhoubinbin@loongson.cn> 11 12 12 13 description: 13 - The Loongson-1 NAND controller abstracts all supported operations, 14 + The Loongson NAND controller abstracts all supported operations, 14 15 meaning it does not support low-level access to raw NAND flash chips. 15 16 Moreover, the controller is paired with the DMA engine to perform 16 17 READ and PROGRAM functions. ··· 25 24 - enum: 26 25 - loongson,ls1b-nand-controller 27 26 - loongson,ls1c-nand-controller 27 + - loongson,ls2k0500-nand-controller 28 + - loongson,ls2k1000-nand-controller 28 29 - items: 29 30 - enum: 30 31 - loongson,ls1a-nand-controller 31 32 - const: loongson,ls1b-nand-controller 32 33 33 34 reg: 34 - maxItems: 2 35 + minItems: 2 36 + maxItems: 3 35 37 36 38 reg-names: 39 + minItems: 2 37 40 items: 38 41 - const: nand 39 42 - const: nand-dma 43 + - const: dma-config 40 44 41 45 dmas: 42 46 maxItems: 1 ··· 57 51 - dma-names 58 52 59 53 unevaluatedProperties: false 54 + 55 + if: 56 + properties: 57 + compatible: 58 + contains: 59 + enum: 60 + - loongson,ls2k1000-nand-controller 61 + 62 + then: 63 + properties: 64 + reg: 65 + minItems: 3 66 + reg-names: 67 + minItems: 3 68 + 69 + else: 70 + properties: 71 + reg: 72 + maxItems: 2 73 + reg-names: 74 + maxItems: 2 60 75 61 76 examples: 62 77 - | ··· 95 68 label = "ls1x-nand"; 96 69 nand-use-soft-ecc-engine; 97 70 nand-ecc-algo = "hamming"; 71 + }; 72 + }; 73 + 74 + - | 75 + nand-controller@1fe26000 { 76 + compatible = "loongson,ls2k1000-nand-controller"; 77 + reg = <0x1fe26000 0x24>, 78 + <0x1fe26040 0x4>, 79 + <0x1fe00438 0x8>; 80 + reg-names = "nand", "nand-dma", "dma-config"; 81 + dmas = <&apbdma0 0>; 82 + dma-names = "rxtx"; 83 + 84 + #address-cells = <1>; 85 + #size-cells = <0>; 86 + 87 + nand@0 { 88 + reg = <0>; 89 + label = "ls2k1000-nand"; 90 + nand-use-soft-ecc-engine; 91 + nand-ecc-algo = "bch"; 92 + nand-ecc-strength = <8>; 93 + nand-ecc-step-size = <512>; 98 94 }; 99 95 };
+41
Documentation/devicetree/bindings/mtd/realtek,rtl9301-ecc.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/mtd/realtek,rtl9301-ecc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Realtek SoCs NAND ECC engine 8 + 9 + maintainers: 10 + - Markus Stockhausen <markus.stockhausen@gmx.de> 11 + 12 + properties: 13 + compatible: 14 + const: realtek,rtl9301-ecc 15 + 16 + reg: 17 + maxItems: 1 18 + 19 + clocks: 20 + maxItems: 1 21 + 22 + interrupts: 23 + maxItems: 1 24 + 25 + required: 26 + - compatible 27 + - reg 28 + 29 + additionalProperties: false 30 + 31 + examples: 32 + - | 33 + soc { 34 + #address-cells = <1>; 35 + #size-cells = <1>; 36 + 37 + ecc0: ecc@1a600 { 38 + compatible = "realtek,rtl9301-ecc"; 39 + reg = <0x1a600 0x54>; 40 + }; 41 + };
-56
Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt
··· 1 - * Samsung S3C2410 and compatible NAND flash controller 2 - 3 - Required properties: 4 - - compatible : The possible values are: 5 - "samsung,s3c2410-nand" 6 - "samsung,s3c2412-nand" 7 - "samsung,s3c2440-nand" 8 - - reg : register's location and length. 9 - - #address-cells, #size-cells : see nand-controller.yaml 10 - - clocks : phandle to the nand controller clock 11 - - clock-names : must contain "nand" 12 - 13 - Optional child nodes: 14 - Child nodes representing the available nand chips. 15 - 16 - Optional child properties: 17 - - nand-ecc-mode : see nand-controller.yaml 18 - - nand-on-flash-bbt : see nand-controller.yaml 19 - 20 - Each child device node may optionally contain a 'partitions' sub-node, 21 - which further contains sub-nodes describing the flash partition mapping. 22 - See mtd.yaml for more detail. 23 - 24 - Example: 25 - 26 - nand-controller@4e000000 { 27 - compatible = "samsung,s3c2440-nand"; 28 - reg = <0x4e000000 0x40>; 29 - 30 - #address-cells = <1>; 31 - #size-cells = <0>; 32 - 33 - clocks = <&clocks HCLK_NAND>; 34 - clock-names = "nand"; 35 - 36 - nand { 37 - nand-ecc-mode = "soft"; 38 - nand-on-flash-bbt; 39 - 40 - partitions { 41 - compatible = "fixed-partitions"; 42 - #address-cells = <1>; 43 - #size-cells = <1>; 44 - 45 - partition@0 { 46 - label = "u-boot"; 47 - reg = <0 0x040000>; 48 - }; 49 - 50 - partition@40000 { 51 - label = "kernel"; 52 - reg = <0x040000 0x500000>; 53 - }; 54 - }; 55 - }; 56 - };
+1 -1
MAINTAINERS
··· 16992 16992 F: arch/mips/include/asm/mach-loongson32/ 16993 16993 F: arch/mips/loongson32/ 16994 16994 F: drivers/*/*loongson1* 16995 - F: drivers/mtd/nand/raw/loongson1-nand-controller.c 16995 + F: drivers/mtd/nand/raw/loongson-nand-controller.c 16996 16996 F: drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c 16997 16997 F: sound/soc/loongson/loongson1_ac97.c 16998 16998
+8
drivers/mtd/nand/Kconfig
··· 61 61 help 62 62 This enables support for the hardware ECC engine from Mediatek. 63 63 64 + config MTD_NAND_ECC_REALTEK 65 + tristate "Realtek RTL93xx hardware ECC engine" 66 + depends on HAS_IOMEM 67 + depends on MACH_REALTEK_RTL || COMPILE_TEST 68 + select MTD_NAND_ECC 69 + help 70 + This enables support for the hardware ECC engine from Realtek. 71 + 64 72 endmenu 65 73 66 74 endmenu
+1
drivers/mtd/nand/Makefile
··· 3 3 nandcore-objs := core.o bbt.o 4 4 obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o 5 5 obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o 6 + obj-$(CONFIG_MTD_NAND_ECC_REALTEK) += ecc-realtek.o 6 7 obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o 7 8 obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o 8 9 obj-y += onenand/
+131
drivers/mtd/nand/core.c
··· 13 13 #include <linux/mtd/nand.h> 14 14 15 15 /** 16 + * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 17 + * @buf: buffer to test 18 + * @len: buffer length 19 + * @bitflips_threshold: maximum number of bitflips 20 + * 21 + * Check if a buffer contains only 0xff, which means the underlying region 22 + * has been erased and is ready to be programmed. 23 + * The bitflips_threshold specify the maximum number of bitflips before 24 + * considering the region is not erased. 25 + * Note: The logic of this function has been extracted from the memweight 26 + * implementation, except that nand_check_erased_buf function exit before 27 + * testing the whole buffer if the number of bitflips exceed the 28 + * bitflips_threshold value. 29 + * 30 + * Returns a positive number of bitflips less than or equal to 31 + * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 32 + * threshold. 33 + */ 34 + static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 35 + { 36 + const unsigned char *bitmap = buf; 37 + int bitflips = 0; 38 + int weight; 39 + 40 + for (; len && ((uintptr_t)bitmap) % sizeof(long); 41 + len--, bitmap++) { 42 + weight = hweight8(*bitmap); 43 + bitflips += BITS_PER_BYTE - weight; 44 + if (unlikely(bitflips > bitflips_threshold)) 45 + return -EBADMSG; 46 + } 47 + 48 + for (; len >= sizeof(long); 49 + len -= sizeof(long), bitmap += sizeof(long)) { 50 + unsigned long d = *((unsigned long *)bitmap); 51 + if (d == ~0UL) 52 + continue; 53 + weight = hweight_long(d); 54 + bitflips += BITS_PER_LONG - weight; 55 + if (unlikely(bitflips > bitflips_threshold)) 56 + return -EBADMSG; 57 + } 58 + 59 + for (; len > 0; len--, bitmap++) { 60 + weight = hweight8(*bitmap); 61 + bitflips += BITS_PER_BYTE - weight; 62 + if (unlikely(bitflips > bitflips_threshold)) 63 + return -EBADMSG; 64 + } 65 + 66 + return bitflips; 67 + } 68 + 69 + /** 70 + * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 71 + * 0xff data 72 + * @data: data buffer to test 73 + * @datalen: data length 74 + * @ecc: ECC buffer 75 + * @ecclen: ECC length 76 + * @extraoob: extra OOB buffer 77 + * @extraooblen: extra OOB length 78 + * @bitflips_threshold: maximum number of bitflips 79 + * 80 + * Check if a data buffer and its associated ECC and OOB data contains only 81 + * 0xff pattern, which means the underlying region has been erased and is 82 + * ready to be programmed. 83 + * The bitflips_threshold specify the maximum number of bitflips before 84 + * considering the region as not erased. 85 + * 86 + * Note: 87 + * 1/ ECC algorithms are working on pre-defined block sizes which are usually 88 + * different from the NAND page size. When fixing bitflips, ECC engines will 89 + * report the number of errors per chunk, and the NAND core infrastructure 90 + * expect you to return the maximum number of bitflips for the whole page. 91 + * This is why you should always use this function on a single chunk and 92 + * not on the whole page. After checking each chunk you should update your 93 + * max_bitflips value accordingly. 94 + * 2/ When checking for bitflips in erased pages you should not only check 95 + * the payload data but also their associated ECC data, because a user might 96 + * have programmed almost all bits to 1 but a few. In this case, we 97 + * shouldn't consider the chunk as erased, and checking ECC bytes prevent 98 + * this case. 99 + * 3/ The extraoob argument is optional, and should be used if some of your OOB 100 + * data are protected by the ECC engine. 101 + * It could also be used if you support subpages and want to attach some 102 + * extra OOB data to an ECC chunk. 103 + * 104 + * Returns a positive number of bitflips less than or equal to 105 + * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 106 + * threshold. In case of success, the passed buffers are filled with 0xff. 107 + */ 108 + int nand_check_erased_ecc_chunk(void *data, int datalen, 109 + void *ecc, int ecclen, 110 + void *extraoob, int extraooblen, 111 + int bitflips_threshold) 112 + { 113 + int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 114 + 115 + data_bitflips = nand_check_erased_buf(data, datalen, 116 + bitflips_threshold); 117 + if (data_bitflips < 0) 118 + return data_bitflips; 119 + 120 + bitflips_threshold -= data_bitflips; 121 + 122 + ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 123 + if (ecc_bitflips < 0) 124 + return ecc_bitflips; 125 + 126 + bitflips_threshold -= ecc_bitflips; 127 + 128 + extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 129 + bitflips_threshold); 130 + if (extraoob_bitflips < 0) 131 + return extraoob_bitflips; 132 + 133 + if (data_bitflips) 134 + memset(data, 0xff, datalen); 135 + 136 + if (ecc_bitflips) 137 + memset(ecc, 0xff, ecclen); 138 + 139 + if (extraoob_bitflips) 140 + memset(extraoob, 0xff, extraooblen); 141 + 142 + return data_bitflips + ecc_bitflips + extraoob_bitflips; 143 + } 144 + EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 145 + 146 + /** 16 147 * nanddev_isbad() - Check if a block is bad 17 148 * @nand: NAND device 18 149 * @pos: position pointing to the block we want to check
+7 -7
drivers/mtd/nand/ecc-mxic.c
··· 322 322 sg_init_table(ctx->sg, 2); 323 323 324 324 /* Configuration dump and sanity checks */ 325 - dev_err(dev, "DPE version number: %d\n", 325 + dev_dbg(dev, "DPE version number: %d\n", 326 326 readl(mxic->regs + DP_VER) >> DP_VER_OFFSET); 327 - dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE)); 328 - dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE)); 329 - dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg)); 330 - dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg)); 331 - dev_err(dev, "Parity size: %d\n", ctx->parity_sz); 332 - dev_err(dev, "Meta size: %d\n", ctx->meta_sz); 327 + dev_dbg(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE)); 328 + dev_dbg(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE)); 329 + dev_dbg(dev, "Spare size: %d\n", SPARE_SZ(spare_reg)); 330 + dev_dbg(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg)); 331 + dev_dbg(dev, "Parity size: %d\n", ctx->parity_sz); 332 + dev_dbg(dev, "Meta size: %d\n", ctx->meta_sz); 333 333 334 334 if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) != 335 335 SPARE_SZ(spare_reg)) {
+464
drivers/mtd/nand/ecc-realtek.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Support for Realtek hardware ECC engine in RTL93xx SoCs 4 + */ 5 + 6 + #include <linux/bitfield.h> 7 + #include <linux/dma-mapping.h> 8 + #include <linux/mtd/nand.h> 9 + #include <linux/mutex.h> 10 + #include <linux/platform_device.h> 11 + #include <linux/regmap.h> 12 + 13 + /* 14 + * The Realtek ECC engine has two operation modes. 15 + * 16 + * - BCH6 : Generate 10 ECC bytes from 512 data bytes plus 6 free bytes 17 + * - BCH12 : Generate 20 ECC bytes from 512 data bytes plus 6 free bytes 18 + * 19 + * It can run for arbitrary NAND flash chips with different block and OOB sizes. Currently there 20 + * are only two known devices in the wild that have NAND flash and make use of this ECC engine 21 + * (Linksys LGS328C & LGS352C). To keep compatibility with vendor firmware, new modes can only 22 + * be added when new data layouts have been analyzed. For now allow BCH6 on flash with 2048 byte 23 + * blocks and 64 bytes oob. 24 + * 25 + * This driver aligns with kernel ECC naming conventions. Neverthless a short notice on the 26 + * Realtek naming conventions for the different structures in the OOB area. 27 + * 28 + * - BBI : Bad block indicator. The first two bytes of OOB. Protected by ECC! 29 + * - tag : 6 User/free bytes. First tag "contains" 2 bytes BBI. Protected by ECC! 30 + * - syndrome : ECC/parity bytes 31 + * 32 + * Altogether this gives currently the following block layout. 33 + * 34 + * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+ 35 + * | 512 | 512 | 512 | 512 | 2 | 4 | 6 | 6 | 6 | 10 | 10 | 10 | 10 | 36 + * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+ 37 + * | data | data | data | data | BBI | free | free | free | free | ECC | ECC | ECC | ECC | 38 + * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+ 39 + */ 40 + 41 + #define RTL_ECC_ALLOWED_PAGE_SIZE 2048 42 + #define RTL_ECC_ALLOWED_OOB_SIZE 64 43 + #define RTL_ECC_ALLOWED_STRENGTH 6 44 + 45 + #define RTL_ECC_BLOCK_SIZE 512 46 + #define RTL_ECC_FREE_SIZE 6 47 + #define RTL_ECC_PARITY_SIZE_BCH6 10 48 + #define RTL_ECC_PARITY_SIZE_BCH12 20 49 + 50 + /* 51 + * The engine is fed with two DMA regions. One for data (always 512 bytes) and one for free bytes 52 + * and parity (either 16 bytes for BCH6 or 26 bytes for BCH12). Start and length of each must be 53 + * aligned to a multiple of 4. 54 + */ 55 + 56 + #define RTL_ECC_DMA_FREE_PARITY_SIZE ALIGN(RTL_ECC_FREE_SIZE + RTL_ECC_PARITY_SIZE_BCH12, 4) 57 + #define RTL_ECC_DMA_SIZE (RTL_ECC_BLOCK_SIZE + RTL_ECC_DMA_FREE_PARITY_SIZE) 58 + 59 + #define RTL_ECC_CFG 0x00 60 + #define RTL_ECC_BCH6 0 61 + #define RTL_ECC_BCH12 BIT(28) 62 + #define RTL_ECC_DMA_PRECISE BIT(12) 63 + #define RTL_ECC_BURST_128 GENMASK(1, 0) 64 + #define RTL_ECC_DMA_TRIGGER 0x08 65 + #define RTL_ECC_OP_DECODE 0 66 + #define RTL_ECC_OP_ENCODE BIT(0) 67 + #define RTL_ECC_DMA_START 0x0c 68 + #define RTL_ECC_DMA_TAG 0x10 69 + #define RTL_ECC_STATUS 0x14 70 + #define RTL_ECC_CORR_COUNT GENMASK(19, 12) 71 + #define RTL_ECC_RESULT BIT(8) 72 + #define RTL_ECC_ALL_ONE BIT(4) 73 + #define RTL_ECC_OP_STATUS BIT(0) 74 + 75 + struct rtl_ecc_engine { 76 + struct device *dev; 77 + struct nand_ecc_engine engine; 78 + struct mutex lock; 79 + char *buf; 80 + dma_addr_t buf_dma; 81 + struct regmap *regmap; 82 + }; 83 + 84 + struct rtl_ecc_ctx { 85 + struct rtl_ecc_engine * rtlc; 86 + struct nand_ecc_req_tweak_ctx req_ctx; 87 + int steps; 88 + int bch_mode; 89 + int strength; 90 + int parity_size; 91 + }; 92 + 93 + static const struct regmap_config rtl_ecc_regmap_config = { 94 + .reg_bits = 32, 95 + .val_bits = 32, 96 + .reg_stride = 4, 97 + }; 98 + 99 + static inline void *nand_to_ctx(struct nand_device *nand) 100 + { 101 + return nand->ecc.ctx.priv; 102 + } 103 + 104 + static inline struct rtl_ecc_engine *nand_to_rtlc(struct nand_device *nand) 105 + { 106 + struct nand_ecc_engine *eng = nand->ecc.engine; 107 + 108 + return container_of(eng, struct rtl_ecc_engine, engine); 109 + } 110 + 111 + static int rtl_ecc_ooblayout_ecc(struct mtd_info *mtd, int section, 112 + struct mtd_oob_region *oobregion) 113 + { 114 + struct nand_device *nand = mtd_to_nanddev(mtd); 115 + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); 116 + 117 + if (section < 0 || section >= ctx->steps) 118 + return -ERANGE; 119 + 120 + oobregion->offset = ctx->steps * RTL_ECC_FREE_SIZE + section * ctx->parity_size; 121 + oobregion->length = ctx->parity_size; 122 + 123 + return 0; 124 + } 125 + 126 + static int rtl_ecc_ooblayout_free(struct mtd_info *mtd, int section, 127 + struct mtd_oob_region *oobregion) 128 + { 129 + struct nand_device *nand = mtd_to_nanddev(mtd); 130 + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); 131 + int bbm; 132 + 133 + if (section < 0 || section >= ctx->steps) 134 + return -ERANGE; 135 + 136 + /* reserve 2 BBM bytes in first block */ 137 + bbm = section ? 0 : 2; 138 + oobregion->offset = section * RTL_ECC_FREE_SIZE + bbm; 139 + oobregion->length = RTL_ECC_FREE_SIZE - bbm; 140 + 141 + return 0; 142 + } 143 + 144 + static const struct mtd_ooblayout_ops rtl_ecc_ooblayout_ops = { 145 + .ecc = rtl_ecc_ooblayout_ecc, 146 + .free = rtl_ecc_ooblayout_free, 147 + }; 148 + 149 + static void rtl_ecc_kick_engine(struct rtl_ecc_ctx *ctx, int operation) 150 + { 151 + struct rtl_ecc_engine *rtlc = ctx->rtlc; 152 + 153 + regmap_write(rtlc->regmap, RTL_ECC_CFG, 154 + ctx->bch_mode | RTL_ECC_BURST_128 | RTL_ECC_DMA_PRECISE); 155 + 156 + regmap_write(rtlc->regmap, RTL_ECC_DMA_START, rtlc->buf_dma); 157 + regmap_write(rtlc->regmap, RTL_ECC_DMA_TAG, rtlc->buf_dma + RTL_ECC_BLOCK_SIZE); 158 + regmap_write(rtlc->regmap, RTL_ECC_DMA_TRIGGER, operation); 159 + } 160 + 161 + static int rtl_ecc_wait_for_engine(struct rtl_ecc_ctx *ctx) 162 + { 163 + struct rtl_ecc_engine *rtlc = ctx->rtlc; 164 + int ret, status, bitflips; 165 + bool all_one; 166 + 167 + /* 168 + * The ECC engine needs 6-8 us to encode/decode a BCH6 syndrome for 512 bytes of data 169 + * and 6 free bytes. In case the NAND area has been erased and all data and oob is 170 + * set to 0xff, decoding takes 30us (reason unknown). Although the engine can trigger 171 + * interrupts when finished, use active polling for now. 12 us maximum wait time has 172 + * proven to be a good tradeoff between performance and overhead. 173 + */ 174 + 175 + ret = regmap_read_poll_timeout(rtlc->regmap, RTL_ECC_STATUS, status, 176 + !(status & RTL_ECC_OP_STATUS), 12, 1000000); 177 + if (ret) 178 + return ret; 179 + 180 + ret = FIELD_GET(RTL_ECC_RESULT, status); 181 + all_one = FIELD_GET(RTL_ECC_ALL_ONE, status); 182 + bitflips = FIELD_GET(RTL_ECC_CORR_COUNT, status); 183 + 184 + /* For erased blocks (all bits one) error status can be ignored */ 185 + if (all_one) 186 + ret = 0; 187 + 188 + return ret ? -EBADMSG : bitflips; 189 + } 190 + 191 + static int rtl_ecc_run_engine(struct rtl_ecc_ctx *ctx, char *data, char *free, 192 + char *parity, int operation) 193 + { 194 + struct rtl_ecc_engine *rtlc = ctx->rtlc; 195 + char *buf_parity = rtlc->buf + RTL_ECC_BLOCK_SIZE + RTL_ECC_FREE_SIZE; 196 + char *buf_free = rtlc->buf + RTL_ECC_BLOCK_SIZE; 197 + char *buf_data = rtlc->buf; 198 + int ret; 199 + 200 + mutex_lock(&rtlc->lock); 201 + 202 + memcpy(buf_data, data, RTL_ECC_BLOCK_SIZE); 203 + memcpy(buf_free, free, RTL_ECC_FREE_SIZE); 204 + memcpy(buf_parity, parity, ctx->parity_size); 205 + 206 + dma_sync_single_for_device(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_TO_DEVICE); 207 + rtl_ecc_kick_engine(ctx, operation); 208 + ret = rtl_ecc_wait_for_engine(ctx); 209 + dma_sync_single_for_cpu(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_FROM_DEVICE); 210 + 211 + if (ret >= 0) { 212 + memcpy(data, buf_data, RTL_ECC_BLOCK_SIZE); 213 + memcpy(free, buf_free, RTL_ECC_FREE_SIZE); 214 + memcpy(parity, buf_parity, ctx->parity_size); 215 + } 216 + 217 + mutex_unlock(&rtlc->lock); 218 + 219 + return ret; 220 + } 221 + 222 + static int rtl_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req) 223 + { 224 + struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand); 225 + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); 226 + char *data, *free, *parity; 227 + int ret = 0; 228 + 229 + if (req->mode == MTD_OPS_RAW) 230 + return 0; 231 + 232 + nand_ecc_tweak_req(&ctx->req_ctx, req); 233 + 234 + if (req->type == NAND_PAGE_READ) 235 + return 0; 236 + 237 + free = req->oobbuf.in; 238 + data = req->databuf.in; 239 + parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE; 240 + 241 + for (int i = 0; i < ctx->steps; i++) { 242 + ret |= rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_ENCODE); 243 + 244 + free += RTL_ECC_FREE_SIZE; 245 + data += RTL_ECC_BLOCK_SIZE; 246 + parity += ctx->parity_size; 247 + } 248 + 249 + if (unlikely(ret)) 250 + dev_dbg(rtlc->dev, "ECC calculation failed\n"); 251 + 252 + return ret ? -EBADMSG : 0; 253 + } 254 + 255 + static int rtl_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req) 256 + { 257 + struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand); 258 + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); 259 + struct mtd_info *mtd = nanddev_to_mtd(nand); 260 + char *data, *free, *parity; 261 + bool failure = false; 262 + int bitflips = 0; 263 + 264 + if (req->mode == MTD_OPS_RAW) 265 + return 0; 266 + 267 + if (req->type == NAND_PAGE_WRITE) { 268 + nand_ecc_restore_req(&ctx->req_ctx, req); 269 + return 0; 270 + } 271 + 272 + free = req->oobbuf.in; 273 + data = req->databuf.in; 274 + parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE; 275 + 276 + for (int i = 0 ; i < ctx->steps; i++) { 277 + int ret = rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_DECODE); 278 + 279 + if (unlikely(ret < 0)) 280 + /* ECC totally fails for bitflips in erased blocks */ 281 + ret = nand_check_erased_ecc_chunk(data, RTL_ECC_BLOCK_SIZE, 282 + parity, ctx->parity_size, 283 + free, RTL_ECC_FREE_SIZE, 284 + ctx->strength); 285 + if (unlikely(ret < 0)) { 286 + failure = true; 287 + mtd->ecc_stats.failed++; 288 + } else { 289 + mtd->ecc_stats.corrected += ret; 290 + bitflips = max_t(unsigned int, bitflips, ret); 291 + } 292 + 293 + free += RTL_ECC_FREE_SIZE; 294 + data += RTL_ECC_BLOCK_SIZE; 295 + parity += ctx->parity_size; 296 + } 297 + 298 + nand_ecc_restore_req(&ctx->req_ctx, req); 299 + 300 + if (unlikely(failure)) 301 + dev_dbg(rtlc->dev, "ECC correction failed\n"); 302 + else if (unlikely(bitflips > 2)) 303 + dev_dbg(rtlc->dev, "%d bitflips detected\n", bitflips); 304 + 305 + return failure ? -EBADMSG : bitflips; 306 + } 307 + 308 + static int rtl_ecc_check_support(struct nand_device *nand) 309 + { 310 + struct mtd_info *mtd = nanddev_to_mtd(nand); 311 + struct device *dev = nand->ecc.engine->dev; 312 + 313 + if (mtd->oobsize != RTL_ECC_ALLOWED_OOB_SIZE || 314 + mtd->writesize != RTL_ECC_ALLOWED_PAGE_SIZE) { 315 + dev_err(dev, "only flash geometry data=%d, oob=%d supported\n", 316 + RTL_ECC_ALLOWED_PAGE_SIZE, RTL_ECC_ALLOWED_OOB_SIZE); 317 + return -EINVAL; 318 + } 319 + 320 + if (nand->ecc.user_conf.algo != NAND_ECC_ALGO_BCH || 321 + nand->ecc.user_conf.strength != RTL_ECC_ALLOWED_STRENGTH || 322 + nand->ecc.user_conf.placement != NAND_ECC_PLACEMENT_OOB || 323 + nand->ecc.user_conf.step_size != RTL_ECC_BLOCK_SIZE) { 324 + dev_err(dev, "only algo=bch, strength=%d, placement=oob, step=%d supported\n", 325 + RTL_ECC_ALLOWED_STRENGTH, RTL_ECC_BLOCK_SIZE); 326 + return -EINVAL; 327 + } 328 + 329 + return 0; 330 + } 331 + 332 + static int rtl_ecc_init_ctx(struct nand_device *nand) 333 + { 334 + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; 335 + struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand); 336 + struct mtd_info *mtd = nanddev_to_mtd(nand); 337 + int strength = nand->ecc.user_conf.strength; 338 + struct device *dev = nand->ecc.engine->dev; 339 + struct rtl_ecc_ctx *ctx; 340 + int ret; 341 + 342 + ret = rtl_ecc_check_support(nand); 343 + if (ret) 344 + return ret; 345 + 346 + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 347 + if (!ctx) 348 + return -ENOMEM; 349 + 350 + nand->ecc.ctx.priv = ctx; 351 + mtd_set_ooblayout(mtd, &rtl_ecc_ooblayout_ops); 352 + 353 + conf->algo = NAND_ECC_ALGO_BCH; 354 + conf->strength = strength; 355 + conf->step_size = RTL_ECC_BLOCK_SIZE; 356 + conf->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 357 + 358 + ctx->rtlc = rtlc; 359 + ctx->steps = mtd->writesize / RTL_ECC_BLOCK_SIZE; 360 + ctx->strength = strength; 361 + ctx->bch_mode = strength == 6 ? RTL_ECC_BCH6 : RTL_ECC_BCH12; 362 + ctx->parity_size = strength == 6 ? RTL_ECC_PARITY_SIZE_BCH6 : RTL_ECC_PARITY_SIZE_BCH12; 363 + 364 + ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand); 365 + if (ret) 366 + return ret; 367 + 368 + dev_dbg(dev, "using bch%d with geometry data=%dx%d, free=%dx6, parity=%dx%d", 369 + conf->strength, ctx->steps, conf->step_size, 370 + ctx->steps, ctx->steps, ctx->parity_size); 371 + 372 + return 0; 373 + } 374 + 375 + static void rtl_ecc_cleanup_ctx(struct nand_device *nand) 376 + { 377 + struct rtl_ecc_ctx *ctx = nand_to_ctx(nand); 378 + 379 + if (ctx) 380 + nand_ecc_cleanup_req_tweaking(&ctx->req_ctx); 381 + } 382 + 383 + static struct nand_ecc_engine_ops rtl_ecc_engine_ops = { 384 + .init_ctx = rtl_ecc_init_ctx, 385 + .cleanup_ctx = rtl_ecc_cleanup_ctx, 386 + .prepare_io_req = rtl_ecc_prepare_io_req, 387 + .finish_io_req = rtl_ecc_finish_io_req, 388 + }; 389 + 390 + static int rtl_ecc_probe(struct platform_device *pdev) 391 + { 392 + struct device *dev = &pdev->dev; 393 + struct rtl_ecc_engine *rtlc; 394 + void __iomem *base; 395 + int ret; 396 + 397 + rtlc = devm_kzalloc(dev, sizeof(*rtlc), GFP_KERNEL); 398 + if (!rtlc) 399 + return -ENOMEM; 400 + 401 + base = devm_platform_ioremap_resource(pdev, 0); 402 + if (IS_ERR(base)) 403 + return PTR_ERR(base); 404 + 405 + ret = devm_mutex_init(dev, &rtlc->lock); 406 + if (ret) 407 + return ret; 408 + 409 + rtlc->regmap = devm_regmap_init_mmio(dev, base, &rtl_ecc_regmap_config); 410 + if (IS_ERR(rtlc->regmap)) 411 + return PTR_ERR(rtlc->regmap); 412 + 413 + /* 414 + * Focus on simplicity and use a preallocated DMA buffer for data exchange with the 415 + * engine. For now make it a noncoherent memory model as invalidating/flushing caches 416 + * is faster than reading/writing uncached memory on the known architectures. 417 + */ 418 + 419 + rtlc->buf = dma_alloc_noncoherent(dev, RTL_ECC_DMA_SIZE, &rtlc->buf_dma, 420 + DMA_BIDIRECTIONAL, GFP_KERNEL); 421 + if (IS_ERR(rtlc->buf)) 422 + return PTR_ERR(rtlc->buf); 423 + 424 + rtlc->dev = dev; 425 + rtlc->engine.dev = dev; 426 + rtlc->engine.ops = &rtl_ecc_engine_ops; 427 + rtlc->engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL; 428 + 429 + nand_ecc_register_on_host_hw_engine(&rtlc->engine); 430 + 431 + platform_set_drvdata(pdev, rtlc); 432 + 433 + return 0; 434 + } 435 + 436 + static void rtl_ecc_remove(struct platform_device *pdev) 437 + { 438 + struct rtl_ecc_engine *rtlc = platform_get_drvdata(pdev); 439 + 440 + nand_ecc_unregister_on_host_hw_engine(&rtlc->engine); 441 + dma_free_noncoherent(rtlc->dev, RTL_ECC_DMA_SIZE, rtlc->buf, rtlc->buf_dma, 442 + DMA_BIDIRECTIONAL); 443 + } 444 + 445 + static const struct of_device_id rtl_ecc_of_ids[] = { 446 + { 447 + .compatible = "realtek,rtl9301-ecc", 448 + }, 449 + { /* sentinel */ }, 450 + }; 451 + 452 + static struct platform_driver rtl_ecc_driver = { 453 + .driver = { 454 + .name = "rtl-nand-ecc-engine", 455 + .of_match_table = rtl_ecc_of_ids, 456 + }, 457 + .probe = rtl_ecc_probe, 458 + .remove = rtl_ecc_remove, 459 + }; 460 + module_platform_driver(rtl_ecc_driver); 461 + 462 + MODULE_LICENSE("GPL"); 463 + MODULE_AUTHOR("Markus Stockhausen <markus.stockhausen@gmx.de>"); 464 + MODULE_DESCRIPTION("Realtek NAND hardware ECC controller");
+1 -1
drivers/mtd/nand/ecc.c
··· 552 552 memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size); 553 553 } 554 554 555 - /* Copy the data that must be writen in the bounce buffers, if needed */ 555 + /* Copy the data that must be written in the bounce buffers, if needed */ 556 556 if (orig->type == NAND_PAGE_WRITE) { 557 557 if (ctx->bounce_data) 558 558 memcpy((void *)tweak->databuf.out + orig->dataoffs,
+2 -4
drivers/mtd/nand/qpic_common.c
··· 89 89 memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions)); 90 90 bam_txn->last_data_desc = NULL; 91 91 92 - sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * 93 - QPIC_PER_CW_CMD_SGL); 94 - sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * 95 - QPIC_PER_CW_DATA_SGL); 92 + sg_init_table(bam_txn->cmd_sgl, bam_txn->cmd_sgl_nitems); 93 + sg_init_table(bam_txn->data_sgl, bam_txn->data_sgl_nitems); 96 94 97 95 reinit_completion(&bam_txn->txn_done); 98 96 }
+4 -30
drivers/mtd/nand/raw/Kconfig
··· 77 77 help 78 78 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs 79 79 80 - config MTD_NAND_S3C2410 81 - tristate "Samsung S3C NAND controller" 82 - depends on ARCH_S3C64XX 83 - help 84 - This enables the NAND flash controller on the S3C24xx and S3C64xx 85 - SoCs 86 - 87 - No board specific support is done by this driver, each board 88 - must advertise a platform_device for the driver to attach. 89 - 90 - config MTD_NAND_S3C2410_DEBUG 91 - bool "Samsung S3C NAND controller debug" 92 - depends on MTD_NAND_S3C2410 93 - help 94 - Enable debugging of the S3C NAND driver 95 - 96 - config MTD_NAND_S3C2410_CLKSTOP 97 - bool "Samsung S3C NAND IDLE clock stop" 98 - depends on MTD_NAND_S3C2410 99 - default n 100 - help 101 - Stop the clock to the NAND controller when there is no chip 102 - selected to save power. This will mean there is a small delay 103 - when the is NAND chip selected or released, but will save 104 - approximately 5mA of power when there is nothing happening. 105 - 106 80 config MTD_NAND_SHARPSL 107 81 tristate "Sharp SL Series (C7xx + others) NAND controller" 108 82 depends on ARCH_PXA || COMPILE_TEST ··· 436 462 Enables support for the NAND controller found on 437 463 the Nuvoton MA35 series SoCs. 438 464 439 - config MTD_NAND_LOONGSON1 440 - tristate "Loongson1 NAND controller" 441 - depends on LOONGSON1_APB_DMA || COMPILE_TEST 465 + config MTD_NAND_LOONGSON 466 + tristate "Loongson NAND controller" 467 + depends on LOONGSON1_APB_DMA || LOONGSON2_APB_DMA || COMPILE_TEST 442 468 select REGMAP_MMIO 443 469 help 444 - Enables support for NAND controller on Loongson1 SoCs. 470 + Enables support for NAND controller on Loongson family chips. 445 471 446 472 comment "Misc" 447 473
+1 -2
drivers/mtd/nand/raw/Makefile
··· 9 9 obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o 10 10 obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o 11 11 obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 12 - obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o 13 12 obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o 14 13 obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o 15 14 obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o ··· 58 59 obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o 59 60 obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o 60 61 obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o 61 - obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o 62 + obj-$(CONFIG_MTD_NAND_LOONGSON) += loongson-nand-controller.o 62 63 63 64 nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o 64 65 nand-objs += nand_onfi.o
+11 -6
drivers/mtd/nand/raw/atmel/nand-controller.c
··· 1240 1240 const struct nand_interface_config *conf, 1241 1241 struct atmel_smc_cs_conf *smcconf) 1242 1242 { 1243 - u32 ncycles, totalcycles, timeps, mckperiodps; 1243 + u32 ncycles, totalcycles, timeps, mckperiodps, pulse; 1244 1244 struct atmel_nand_controller *nc; 1245 1245 int ret; 1246 1246 ··· 1366 1366 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED; 1367 1367 1368 1368 /* 1369 - * Read pulse timing directly matches tRP: 1369 + * Read pulse timing would directly match tRP, 1370 + * but some NAND flash chips (S34ML01G2 and W29N02KVxxAF) 1371 + * do not work properly in timing mode 3. 1372 + * The workaround is to extend the SMC NRD pulse to meet tREA 1373 + * timing. 1370 1374 * 1371 - * NRD_PULSE = tRP 1375 + * NRD_PULSE = max(tRP, tREA) 1372 1376 */ 1373 - ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps); 1377 + pulse = max(conf->timings.sdr.tRP_min, conf->timings.sdr.tREA_max); 1378 + ncycles = DIV_ROUND_UP(pulse, mckperiodps); 1374 1379 totalcycles += ncycles; 1375 1380 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT, 1376 1381 ncycles); ··· 1853 1848 1854 1849 static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) 1855 1850 { 1856 - struct device_node *np, *nand_np; 1851 + struct device_node *np; 1857 1852 struct device *dev = nc->dev; 1858 1853 int ret, reg_cells; 1859 1854 u32 val; ··· 1880 1875 1881 1876 reg_cells += val; 1882 1877 1883 - for_each_child_of_node(np, nand_np) { 1878 + for_each_child_of_node_scoped(np, nand_np) { 1884 1879 struct atmel_nand *nand; 1885 1880 1886 1881 nand = atmel_nand_create(nc, nand_np, reg_cells);
+5 -1
drivers/mtd/nand/raw/fsmc_nand.c
··· 876 876 if (!of_property_read_u32(np, "bank-width", &val)) { 877 877 if (val == 2) { 878 878 nand->options |= NAND_BUSWIDTH_16; 879 - } else if (val != 1) { 879 + } else if (val == 1) { 880 + nand->options |= NAND_BUSWIDTH_AUTO; 881 + } else { 880 882 dev_err(&pdev->dev, "invalid bank-width %u\n", val); 881 883 return -EINVAL; 882 884 } 885 + } else { 886 + nand->options |= NAND_BUSWIDTH_AUTO; 883 887 } 884 888 885 889 if (of_property_read_bool(np, "nand-skip-bbtscan"))
+11 -3
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
··· 145 145 return ret; 146 146 } 147 147 148 + #define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) 149 + #define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) 150 + 148 151 static int gpmi_init(struct gpmi_nand_data *this) 149 152 { 150 153 struct resources *r = &this->resources; ··· 2768 2765 pm_runtime_enable(&pdev->dev); 2769 2766 pm_runtime_set_autosuspend_delay(&pdev->dev, 500); 2770 2767 pm_runtime_use_autosuspend(&pdev->dev); 2768 + #ifndef CONFIG_PM 2769 + ret = gpmi_enable_clk(this); 2770 + if (ret) 2771 + goto exit_acquire_resources; 2772 + #endif 2771 2773 2772 2774 ret = gpmi_init(this); 2773 2775 if (ret) ··· 2808 2800 release_resources(this); 2809 2801 pm_runtime_dont_use_autosuspend(&pdev->dev); 2810 2802 pm_runtime_disable(&pdev->dev); 2803 + #ifndef CONFIG_PM 2804 + gpmi_disable_clk(this); 2805 + #endif 2811 2806 } 2812 2807 2813 2808 static int gpmi_pm_suspend(struct device *dev) ··· 2856 2845 2857 2846 return 0; 2858 2847 } 2859 - 2860 - #define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) 2861 - #define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) 2862 2848 2863 2849 static int gpmi_runtime_suspend(struct device *dev) 2864 2850 {
+1024
drivers/mtd/nand/raw/loongson-nand-controller.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * NAND Controller Driver for Loongson family chips 4 + * 5 + * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com> 6 + * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn> 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/module.h> 11 + #include <linux/dmaengine.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/mtd/mtd.h> 15 + #include <linux/mtd/rawnand.h> 16 + #include <linux/of.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/regmap.h> 19 + #include <linux/sizes.h> 20 + 21 + /* Loongson NAND Controller Registers */ 22 + #define LOONGSON_NAND_CMD 0x0 23 + #define LOONGSON_NAND_ADDR1 0x4 24 + #define LOONGSON_NAND_ADDR2 0x8 25 + #define LOONGSON_NAND_TIMING 0xc 26 + #define LOONGSON_NAND_IDL 0x10 27 + #define LOONGSON_NAND_IDH_STATUS 0x14 28 + #define LOONGSON_NAND_PARAM 0x18 29 + #define LOONGSON_NAND_OP_NUM 0x1c 30 + #define LOONGSON_NAND_CS_RDY_MAP 0x20 31 + 32 + /* Bitfields of nand command register */ 33 + #define LOONGSON_NAND_CMD_OP_DONE BIT(10) 34 + #define LOONGSON_NAND_CMD_OP_SPARE BIT(9) 35 + #define LOONGSON_NAND_CMD_OP_MAIN BIT(8) 36 + #define LOONGSON_NAND_CMD_STATUS BIT(7) 37 + #define LOONGSON_NAND_CMD_RESET BIT(6) 38 + #define LOONGSON_NAND_CMD_READID BIT(5) 39 + #define LOONGSON_NAND_CMD_BLOCKS_ERASE BIT(4) 40 + #define LOONGSON_NAND_CMD_ERASE BIT(3) 41 + #define LOONGSON_NAND_CMD_WRITE BIT(2) 42 + #define LOONGSON_NAND_CMD_READ BIT(1) 43 + #define LOONGSON_NAND_CMD_VALID BIT(0) 44 + 45 + /* Bitfields of nand cs/rdy map register */ 46 + #define LOONGSON_NAND_MAP_CS1_SEL GENMASK(11, 8) 47 + #define LOONGSON_NAND_MAP_RDY1_SEL GENMASK(15, 12) 48 + #define LOONGSON_NAND_MAP_CS2_SEL GENMASK(19, 16) 49 + #define LOONGSON_NAND_MAP_RDY2_SEL GENMASK(23, 20) 50 + #define LOONGSON_NAND_MAP_CS3_SEL GENMASK(27, 24) 51 + #define LOONGSON_NAND_MAP_RDY3_SEL GENMASK(31, 28) 52 + 53 + #define LOONGSON_NAND_CS_SEL0 BIT(0) 54 + #define LOONGSON_NAND_CS_SEL1 BIT(1) 55 + #define LOONGSON_NAND_CS_SEL2 BIT(2) 56 + #define LOONGSON_NAND_CS_SEL3 BIT(3) 57 + #define LOONGSON_NAND_CS_RDY0 BIT(0) 58 + #define LOONGSON_NAND_CS_RDY1 BIT(1) 59 + #define LOONGSON_NAND_CS_RDY2 BIT(2) 60 + #define LOONGSON_NAND_CS_RDY3 BIT(3) 61 + 62 + /* Bitfields of nand timing register */ 63 + #define LOONGSON_NAND_WAIT_CYCLE_MASK GENMASK(7, 0) 64 + #define LOONGSON_NAND_HOLD_CYCLE_MASK GENMASK(15, 8) 65 + 66 + /* Bitfields of nand parameter register */ 67 + #define LOONGSON_NAND_CELL_SIZE_MASK GENMASK(11, 8) 68 + 69 + #define LOONGSON_NAND_COL_ADDR_CYC 2U 70 + #define LOONGSON_NAND_MAX_ADDR_CYC 5U 71 + 72 + #define LOONGSON_NAND_READ_ID_SLEEP_US 1000 73 + #define LOONGSON_NAND_READ_ID_TIMEOUT_US 5000 74 + 75 + #define BITS_PER_WORD (4 * BITS_PER_BYTE) 76 + 77 + /* Loongson-2K1000 NAND DMA routing register */ 78 + #define LS2K1000_NAND_DMA_MASK GENMASK(2, 0) 79 + #define LS2K1000_DMA0_CONF 0x0 80 + #define LS2K1000_DMA1_CONF 0x1 81 + #define LS2K1000_DMA2_CONF 0x2 82 + #define LS2K1000_DMA3_CONF 0x3 83 + #define LS2K1000_DMA4_CONF 0x4 84 + 85 + struct loongson_nand_host; 86 + 87 + struct loongson_nand_op { 88 + char addrs[LOONGSON_NAND_MAX_ADDR_CYC]; 89 + unsigned int naddrs; 90 + unsigned int addrs_offset; 91 + unsigned int aligned_offset; 92 + unsigned int cmd_reg; 93 + unsigned int row_start; 94 + unsigned int rdy_timeout_ms; 95 + unsigned int orig_len; 96 + bool is_readid; 97 + bool is_erase; 98 + bool is_write; 99 + bool is_read; 100 + bool is_change_column; 101 + size_t len; 102 + char *buf; 103 + }; 104 + 105 + struct loongson_nand_data { 106 + unsigned int max_id_cycle; 107 + unsigned int id_cycle_field; 108 + unsigned int status_field; 109 + unsigned int op_scope_field; 110 + unsigned int hold_cycle; 111 + unsigned int wait_cycle; 112 + unsigned int nand_cs; 113 + unsigned int dma_bits; 114 + int (*dma_config)(struct device *dev); 115 + void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op); 116 + }; 117 + 118 + struct loongson_nand_host { 119 + struct device *dev; 120 + struct nand_chip chip; 121 + struct nand_controller controller; 122 + const struct loongson_nand_data *data; 123 + unsigned int addr_cs_field; 124 + void __iomem *reg_base; 125 + struct regmap *regmap; 126 + /* DMA Engine stuff */ 127 + dma_addr_t dma_base; 128 + struct dma_chan *dma_chan; 129 + dma_cookie_t dma_cookie; 130 + struct completion dma_complete; 131 + }; 132 + 133 + static const struct regmap_config loongson_nand_regmap_config = { 134 + .reg_bits = 32, 135 + .val_bits = 32, 136 + .reg_stride = 4, 137 + }; 138 + 139 + static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op, 140 + u8 opcode) 141 + { 142 + struct loongson_nand_host *host = nand_get_controller_data(chip); 143 + 144 + op->row_start = chip->page_shift + 1; 145 + 146 + /* The controller abstracts the following NAND operations. */ 147 + switch (opcode) { 148 + case NAND_CMD_STATUS: 149 + op->cmd_reg = LOONGSON_NAND_CMD_STATUS; 150 + break; 151 + case NAND_CMD_RESET: 152 + op->cmd_reg = LOONGSON_NAND_CMD_RESET; 153 + break; 154 + case NAND_CMD_READID: 155 + op->is_readid = true; 156 + op->cmd_reg = LOONGSON_NAND_CMD_READID; 157 + break; 158 + case NAND_CMD_ERASE1: 159 + op->is_erase = true; 160 + op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC; 161 + break; 162 + case NAND_CMD_ERASE2: 163 + if (!op->is_erase) 164 + return -EOPNOTSUPP; 165 + /* During erasing, row_start differs from the default value. */ 166 + op->row_start = chip->page_shift; 167 + op->cmd_reg = LOONGSON_NAND_CMD_ERASE; 168 + break; 169 + case NAND_CMD_SEQIN: 170 + op->is_write = true; 171 + break; 172 + case NAND_CMD_PAGEPROG: 173 + if (!op->is_write) 174 + return -EOPNOTSUPP; 175 + op->cmd_reg = LOONGSON_NAND_CMD_WRITE; 176 + break; 177 + case NAND_CMD_READ0: 178 + op->is_read = true; 179 + break; 180 + case NAND_CMD_READSTART: 181 + if (!op->is_read) 182 + return -EOPNOTSUPP; 183 + op->cmd_reg = LOONGSON_NAND_CMD_READ; 184 + break; 185 + case NAND_CMD_RNDOUT: 186 + op->is_change_column = true; 187 + break; 188 + case NAND_CMD_RNDOUTSTART: 189 + if (!op->is_change_column) 190 + return -EOPNOTSUPP; 191 + op->cmd_reg = LOONGSON_NAND_CMD_READ; 192 + break; 193 + default: 194 + dev_dbg(host->dev, "unsupported opcode: %u\n", opcode); 195 + return -EOPNOTSUPP; 196 + } 197 + 198 + return 0; 199 + } 200 + 201 + static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop, 202 + struct loongson_nand_op *op) 203 + { 204 + unsigned int op_id; 205 + int ret; 206 + 207 + for (op_id = 0; op_id < subop->ninstrs; op_id++) { 208 + const struct nand_op_instr *instr = &subop->instrs[op_id]; 209 + unsigned int offset, naddrs; 210 + const u8 *addrs; 211 + 212 + switch (instr->type) { 213 + case NAND_OP_CMD_INSTR: 214 + ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode); 215 + if (ret < 0) 216 + return ret; 217 + 218 + break; 219 + case NAND_OP_ADDR_INSTR: 220 + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); 221 + if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC) 222 + return -EOPNOTSUPP; 223 + op->naddrs = naddrs; 224 + offset = nand_subop_get_addr_start_off(subop, op_id); 225 + addrs = &instr->ctx.addr.addrs[offset]; 226 + memcpy(op->addrs + op->addrs_offset, addrs, naddrs); 227 + break; 228 + case NAND_OP_DATA_IN_INSTR: 229 + case NAND_OP_DATA_OUT_INSTR: 230 + offset = nand_subop_get_data_start_off(subop, op_id); 231 + op->orig_len = nand_subop_get_data_len(subop, op_id); 232 + if (instr->type == NAND_OP_DATA_IN_INSTR) 233 + op->buf = instr->ctx.data.buf.in + offset; 234 + else if (instr->type == NAND_OP_DATA_OUT_INSTR) 235 + op->buf = (void *)instr->ctx.data.buf.out + offset; 236 + 237 + break; 238 + case NAND_OP_WAITRDY_INSTR: 239 + op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; 240 + break; 241 + default: 242 + break; 243 + } 244 + } 245 + 246 + return 0; 247 + } 248 + 249 + static void loongson_nand_set_addr_cs(struct loongson_nand_host *host) 250 + { 251 + struct nand_chip *chip = &host->chip; 252 + struct mtd_info *mtd = nand_to_mtd(chip); 253 + 254 + if (!host->data->nand_cs) 255 + return; 256 + 257 + /* 258 + * The Manufacturer/Chip ID read operation precedes attach_chip, at which point 259 + * information such as NAND chip selection and capacity is unknown. As a 260 + * workaround, we use 128MB cellsize (2KB pagesize) as a fallback. 261 + */ 262 + if (!mtd->writesize) 263 + host->addr_cs_field = GENMASK(17, 16); 264 + 265 + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field, 266 + host->data->nand_cs << __ffs(host->addr_cs_field)); 267 + } 268 + 269 + static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op) 270 + { 271 + struct nand_chip *chip = &host->chip; 272 + int i; 273 + 274 + for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) { 275 + int shift, mask, val; 276 + 277 + if (i < LOONGSON_NAND_COL_ADDR_CYC) { 278 + shift = i * BITS_PER_BYTE; 279 + mask = (u32)0xff << shift; 280 + mask &= GENMASK(chip->page_shift, 0); 281 + val = (u32)op->addrs[i] << shift; 282 + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val); 283 + } else if (!op->is_change_column) { 284 + shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; 285 + mask = (u32)0xff << shift; 286 + val = (u32)op->addrs[i] << shift; 287 + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val); 288 + 289 + if (i == 4) { 290 + mask = (u32)0xff >> (BITS_PER_WORD - shift); 291 + val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift); 292 + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val); 293 + } 294 + } 295 + } 296 + } 297 + 298 + static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op) 299 + { 300 + int i; 301 + 302 + for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) { 303 + int shift, mask, val; 304 + 305 + if (i < LOONGSON_NAND_COL_ADDR_CYC) { 306 + shift = i * BITS_PER_BYTE; 307 + mask = (u32)0xff << shift; 308 + val = (u32)op->addrs[i] << shift; 309 + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val); 310 + } else if (!op->is_change_column) { 311 + shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; 312 + mask = (u32)0xff << shift; 313 + val = (u32)op->addrs[i] << shift; 314 + regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val); 315 + } 316 + } 317 + 318 + loongson_nand_set_addr_cs(host); 319 + } 320 + 321 + static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op) 322 + { 323 + struct nand_chip *chip = &host->chip; 324 + struct mtd_info *mtd = nand_to_mtd(chip); 325 + int col0 = op->addrs[0]; 326 + short col; 327 + 328 + if (!IS_ALIGNED(col0, chip->buf_align)) { 329 + col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align); 330 + op->aligned_offset = op->addrs[0] - col0; 331 + op->addrs[0] = col0; 332 + } 333 + 334 + if (host->data->set_addr) 335 + host->data->set_addr(host, op); 336 + 337 + /* set operation length */ 338 + if (op->is_write || op->is_read || op->is_change_column) 339 + op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align); 340 + else if (op->is_erase) 341 + op->len = 1; 342 + else 343 + op->len = op->orig_len; 344 + 345 + writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM); 346 + 347 + /* set operation area and scope */ 348 + col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0]; 349 + if (op->orig_len && !op->is_readid) { 350 + unsigned int op_scope = 0; 351 + 352 + if (col < mtd->writesize) { 353 + op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN; 354 + op_scope = mtd->writesize; 355 + } 356 + 357 + op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE; 358 + op_scope += mtd->oobsize; 359 + 360 + op_scope <<= __ffs(host->data->op_scope_field); 361 + regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, 362 + host->data->op_scope_field, op_scope); 363 + } 364 + 365 + /* set command */ 366 + writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD); 367 + 368 + /* trigger operation */ 369 + regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID, 370 + LOONGSON_NAND_CMD_VALID); 371 + } 372 + 373 + static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host, 374 + struct loongson_nand_op *op) 375 + { 376 + unsigned int val; 377 + int ret = 0; 378 + 379 + if (op->rdy_timeout_ms) { 380 + ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD, 381 + val, val & LOONGSON_NAND_CMD_OP_DONE, 382 + 0, op->rdy_timeout_ms * MSEC_PER_SEC); 383 + if (ret) 384 + dev_err(host->dev, "operation failed\n"); 385 + } 386 + 387 + return ret; 388 + } 389 + 390 + static void loongson_nand_dma_callback(void *data) 391 + { 392 + struct loongson_nand_host *host = (struct loongson_nand_host *)data; 393 + struct dma_chan *chan = host->dma_chan; 394 + struct device *dev = chan->device->dev; 395 + enum dma_status status; 396 + 397 + status = dmaengine_tx_status(chan, host->dma_cookie, NULL); 398 + if (likely(status == DMA_COMPLETE)) { 399 + dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie); 400 + complete(&host->dma_complete); 401 + } else { 402 + dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie); 403 + } 404 + } 405 + 406 + static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op) 407 + { 408 + struct nand_chip *chip = &host->chip; 409 + struct dma_chan *chan = host->dma_chan; 410 + struct device *dev = chan->device->dev; 411 + struct dma_async_tx_descriptor *desc; 412 + enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 413 + enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 414 + void *buf = op->buf; 415 + char *dma_buf = NULL; 416 + dma_addr_t dma_addr; 417 + int ret; 418 + 419 + if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) && 420 + IS_ALIGNED(op->orig_len, chip->buf_align)) { 421 + dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir); 422 + if (dma_mapping_error(dev, dma_addr)) { 423 + dev_err(dev, "failed to map DMA buffer\n"); 424 + return -ENXIO; 425 + } 426 + } else if (!op->is_write) { 427 + dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL); 428 + if (!dma_buf) 429 + return -ENOMEM; 430 + } else { 431 + dev_err(dev, "subpage writing not supported\n"); 432 + return -EOPNOTSUPP; 433 + } 434 + 435 + desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT); 436 + if (!desc) { 437 + dev_err(dev, "failed to prepare DMA descriptor\n"); 438 + ret = -ENOMEM; 439 + goto err; 440 + } 441 + desc->callback = loongson_nand_dma_callback; 442 + desc->callback_param = host; 443 + 444 + host->dma_cookie = dmaengine_submit(desc); 445 + ret = dma_submit_error(host->dma_cookie); 446 + if (ret) { 447 + dev_err(dev, "failed to submit DMA descriptor\n"); 448 + goto err; 449 + } 450 + 451 + dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie); 452 + dma_async_issue_pending(chan); 453 + 454 + if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) { 455 + dmaengine_terminate_sync(chan); 456 + reinit_completion(&host->dma_complete); 457 + ret = -ETIMEDOUT; 458 + goto err; 459 + } 460 + 461 + if (dma_buf) 462 + memcpy(buf, dma_buf + op->aligned_offset, op->orig_len); 463 + err: 464 + if (dma_buf) 465 + dma_free_coherent(dev, op->len, dma_buf, dma_addr); 466 + else 467 + dma_unmap_single(dev, dma_addr, op->orig_len, data_dir); 468 + 469 + return ret; 470 + } 471 + 472 + static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 473 + { 474 + struct loongson_nand_host *host = nand_get_controller_data(chip); 475 + struct loongson_nand_op op = {}; 476 + int ret; 477 + 478 + ret = loongson_nand_parse_instructions(chip, subop, &op); 479 + if (ret) 480 + return ret; 481 + 482 + loongson_nand_trigger_op(host, &op); 483 + 484 + ret = loongson_nand_dma_transfer(host, &op); 485 + if (ret) 486 + return ret; 487 + 488 + return loongson_nand_wait_for_op_done(host, &op); 489 + } 490 + 491 + static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop, 492 + struct loongson_nand_op *op) 493 + { 494 + struct loongson_nand_host *host = nand_get_controller_data(chip); 495 + int ret; 496 + 497 + ret = loongson_nand_parse_instructions(chip, subop, op); 498 + if (ret) 499 + return ret; 500 + 501 + loongson_nand_trigger_op(host, op); 502 + 503 + return loongson_nand_wait_for_op_done(host, op); 504 + } 505 + 506 + static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 507 + { 508 + struct loongson_nand_op op = {}; 509 + 510 + return loongson_nand_misc_type_exec(chip, subop, &op); 511 + } 512 + 513 + static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 514 + { 515 + struct loongson_nand_host *host = nand_get_controller_data(chip); 516 + struct loongson_nand_op op = {}; 517 + int i, ret; 518 + union { 519 + char ids[6]; 520 + struct { 521 + int idl; 522 + u16 idh; 523 + }; 524 + } nand_id; 525 + 526 + ret = loongson_nand_misc_type_exec(chip, subop, &op); 527 + if (ret) 528 + return ret; 529 + 530 + ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl, 531 + LOONGSON_NAND_READ_ID_SLEEP_US, 532 + LOONGSON_NAND_READ_ID_TIMEOUT_US); 533 + if (ret) 534 + return ret; 535 + 536 + nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS); 537 + 538 + for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++) 539 + op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i]; 540 + 541 + return ret; 542 + } 543 + 544 + static int loongson_nand_read_status_type_exec(struct nand_chip *chip, 545 + const struct nand_subop *subop) 546 + { 547 + struct loongson_nand_host *host = nand_get_controller_data(chip); 548 + struct loongson_nand_op op = {}; 549 + int val, ret; 550 + 551 + ret = loongson_nand_misc_type_exec(chip, subop, &op); 552 + if (ret) 553 + return ret; 554 + 555 + val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS); 556 + val &= ~host->data->status_field; 557 + op.buf[0] = val << ffs(host->data->status_field); 558 + 559 + return ret; 560 + } 561 + 562 + static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER( 563 + NAND_OP_PARSER_PATTERN( 564 + loongson_nand_read_id_type_exec, 565 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 566 + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), 567 + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), 568 + NAND_OP_PARSER_PATTERN( 569 + loongson_nand_read_status_type_exec, 570 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 571 + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), 572 + NAND_OP_PARSER_PATTERN( 573 + loongson_nand_zerolen_type_exec, 574 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 575 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 576 + NAND_OP_PARSER_PATTERN( 577 + loongson_nand_zerolen_type_exec, 578 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 579 + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), 580 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 581 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 582 + NAND_OP_PARSER_PATTERN( 583 + loongson_nand_data_type_exec, 584 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 585 + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), 586 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 587 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), 588 + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)), 589 + NAND_OP_PARSER_PATTERN( 590 + loongson_nand_data_type_exec, 591 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 592 + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC), 593 + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0), 594 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 595 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 596 + ); 597 + 598 + static int loongson_nand_is_valid_cmd(u8 opcode) 599 + { 600 + if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID) 601 + return 0; 602 + 603 + return -EOPNOTSUPP; 604 + } 605 + 606 + static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2) 607 + { 608 + if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART) 609 + return 0; 610 + 611 + if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART) 612 + return 0; 613 + 614 + if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2) 615 + return 0; 616 + 617 + if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG) 618 + return 0; 619 + 620 + return -EOPNOTSUPP; 621 + } 622 + 623 + static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op) 624 + { 625 + const struct nand_op_instr *instr1 = NULL, *instr2 = NULL; 626 + int op_id; 627 + 628 + for (op_id = 0; op_id < op->ninstrs; op_id++) { 629 + const struct nand_op_instr *instr = &op->instrs[op_id]; 630 + 631 + if (instr->type == NAND_OP_CMD_INSTR) { 632 + if (!instr1) 633 + instr1 = instr; 634 + else if (!instr2) 635 + instr2 = instr; 636 + else 637 + break; 638 + } 639 + } 640 + 641 + if (!instr1) 642 + return -EOPNOTSUPP; 643 + 644 + if (!instr2) 645 + return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode); 646 + 647 + return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode); 648 + } 649 + 650 + static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op, 651 + bool check_only) 652 + { 653 + if (check_only) 654 + return loongson_nand_check_op(chip, op); 655 + 656 + return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only); 657 + } 658 + 659 + static int loongson_nand_get_chip_capacity(struct nand_chip *chip) 660 + { 661 + struct loongson_nand_host *host = nand_get_controller_data(chip); 662 + u64 chipsize = nanddev_target_size(&chip->base); 663 + struct mtd_info *mtd = nand_to_mtd(chip); 664 + 665 + switch (mtd->writesize) { 666 + case SZ_512: 667 + switch (chipsize) { 668 + case SZ_8M: 669 + host->addr_cs_field = GENMASK(15, 14); 670 + return 0x9; 671 + case SZ_16M: 672 + host->addr_cs_field = GENMASK(16, 15); 673 + return 0xa; 674 + case SZ_32M: 675 + host->addr_cs_field = GENMASK(17, 16); 676 + return 0xb; 677 + case SZ_64M: 678 + host->addr_cs_field = GENMASK(18, 17); 679 + return 0xc; 680 + case SZ_128M: 681 + host->addr_cs_field = GENMASK(19, 18); 682 + return 0xd; 683 + } 684 + break; 685 + case SZ_2K: 686 + switch (chipsize) { 687 + case SZ_128M: 688 + host->addr_cs_field = GENMASK(17, 16); 689 + return 0x0; 690 + case SZ_256M: 691 + host->addr_cs_field = GENMASK(18, 17); 692 + return 0x1; 693 + case SZ_512M: 694 + host->addr_cs_field = GENMASK(19, 18); 695 + return 0x2; 696 + case SZ_1G: 697 + host->addr_cs_field = GENMASK(20, 19); 698 + return 0x3; 699 + } 700 + break; 701 + case SZ_4K: 702 + if (chipsize == SZ_2G) { 703 + host->addr_cs_field = GENMASK(20, 19); 704 + return 0x4; 705 + } 706 + break; 707 + case SZ_8K: 708 + switch (chipsize) { 709 + case SZ_4G: 710 + host->addr_cs_field = GENMASK(20, 19); 711 + return 0x5; 712 + case SZ_8G: 713 + host->addr_cs_field = GENMASK(21, 20); 714 + return 0x6; 715 + case SZ_16G: 716 + host->addr_cs_field = GENMASK(22, 21); 717 + return 0x7; 718 + } 719 + break; 720 + } 721 + 722 + dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n", 723 + chipsize, mtd->writesize); 724 + return -EINVAL; 725 + } 726 + 727 + static int loongson_nand_attach_chip(struct nand_chip *chip) 728 + { 729 + struct loongson_nand_host *host = nand_get_controller_data(chip); 730 + int cell_size = loongson_nand_get_chip_capacity(chip); 731 + 732 + if (cell_size < 0) 733 + return cell_size; 734 + 735 + switch (chip->ecc.engine_type) { 736 + case NAND_ECC_ENGINE_TYPE_NONE: 737 + break; 738 + case NAND_ECC_ENGINE_TYPE_SOFT: 739 + break; 740 + default: 741 + return -EINVAL; 742 + } 743 + 744 + /* set cell size */ 745 + regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK, 746 + FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size)); 747 + 748 + regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK, 749 + FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle)); 750 + 751 + regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK, 752 + FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle)); 753 + 754 + chip->ecc.read_page_raw = nand_monolithic_read_page_raw; 755 + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; 756 + 757 + return 0; 758 + } 759 + 760 + static const struct nand_controller_ops loongson_nand_controller_ops = { 761 + .exec_op = loongson_nand_exec_op, 762 + .attach_chip = loongson_nand_attach_chip, 763 + }; 764 + 765 + static void loongson_nand_controller_cleanup(struct loongson_nand_host *host) 766 + { 767 + if (host->dma_chan) 768 + dma_release_channel(host->dma_chan); 769 + } 770 + 771 + static int ls2k1000_nand_apbdma_config(struct device *dev) 772 + { 773 + struct platform_device *pdev = to_platform_device(dev); 774 + void __iomem *regs; 775 + int val; 776 + 777 + regs = devm_platform_ioremap_resource_byname(pdev, "dma-config"); 778 + if (IS_ERR(regs)) 779 + return PTR_ERR(regs); 780 + 781 + val = readl(regs); 782 + val |= FIELD_PREP(LS2K1000_NAND_DMA_MASK, LS2K1000_DMA0_CONF); 783 + writel(val, regs); 784 + 785 + return 0; 786 + } 787 + 788 + static int loongson_nand_controller_init(struct loongson_nand_host *host) 789 + { 790 + struct device *dev = host->dev; 791 + struct dma_chan *chan; 792 + struct dma_slave_config cfg = {}; 793 + int ret, val; 794 + 795 + host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config); 796 + if (IS_ERR(host->regmap)) 797 + return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n"); 798 + 799 + if (host->data->id_cycle_field) 800 + regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field, 801 + host->data->max_id_cycle << __ffs(host->data->id_cycle_field)); 802 + 803 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(host->data->dma_bits)); 804 + if (ret) 805 + return dev_err_probe(dev, ret, "failed to set DMA mask\n"); 806 + 807 + val = FIELD_PREP(LOONGSON_NAND_MAP_CS1_SEL, LOONGSON_NAND_CS_SEL1) | 808 + FIELD_PREP(LOONGSON_NAND_MAP_RDY1_SEL, LOONGSON_NAND_CS_RDY1) | 809 + FIELD_PREP(LOONGSON_NAND_MAP_CS2_SEL, LOONGSON_NAND_CS_SEL2) | 810 + FIELD_PREP(LOONGSON_NAND_MAP_RDY2_SEL, LOONGSON_NAND_CS_RDY2) | 811 + FIELD_PREP(LOONGSON_NAND_MAP_CS3_SEL, LOONGSON_NAND_CS_SEL3) | 812 + FIELD_PREP(LOONGSON_NAND_MAP_RDY3_SEL, LOONGSON_NAND_CS_RDY3); 813 + 814 + regmap_write(host->regmap, LOONGSON_NAND_CS_RDY_MAP, val); 815 + 816 + if (host->data->dma_config) { 817 + ret = host->data->dma_config(dev); 818 + if (ret) 819 + return dev_err_probe(dev, ret, "failed to config DMA routing\n"); 820 + } 821 + 822 + chan = dma_request_chan(dev, "rxtx"); 823 + if (IS_ERR(chan)) 824 + return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n"); 825 + host->dma_chan = chan; 826 + 827 + cfg.src_addr = host->dma_base; 828 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 829 + cfg.dst_addr = host->dma_base; 830 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 831 + ret = dmaengine_slave_config(host->dma_chan, &cfg); 832 + if (ret) 833 + return dev_err_probe(dev, ret, "failed to config DMA channel\n"); 834 + 835 + init_completion(&host->dma_complete); 836 + 837 + return 0; 838 + } 839 + 840 + static int loongson_nand_chip_init(struct loongson_nand_host *host) 841 + { 842 + struct device *dev = host->dev; 843 + int nchips = of_get_child_count(dev->of_node); 844 + struct device_node *chip_np; 845 + struct nand_chip *chip = &host->chip; 846 + struct mtd_info *mtd = nand_to_mtd(chip); 847 + int ret; 848 + 849 + if (nchips != 1) 850 + return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n"); 851 + 852 + chip_np = of_get_next_child(dev->of_node, NULL); 853 + if (!chip_np) 854 + return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n"); 855 + 856 + nand_set_flash_node(chip, chip_np); 857 + of_node_put(chip_np); 858 + if (!mtd->name) 859 + return dev_err_probe(dev, -EINVAL, "Missing MTD label\n"); 860 + 861 + nand_set_controller_data(chip, host); 862 + chip->controller = &host->controller; 863 + chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD; 864 + chip->buf_align = 16; 865 + mtd->dev.parent = dev; 866 + mtd->owner = THIS_MODULE; 867 + 868 + ret = nand_scan(chip, 1); 869 + if (ret) 870 + return dev_err_probe(dev, ret, "failed to scan NAND chip\n"); 871 + 872 + ret = mtd_device_register(mtd, NULL, 0); 873 + if (ret) { 874 + nand_cleanup(chip); 875 + return dev_err_probe(dev, ret, "failed to register MTD device\n"); 876 + } 877 + 878 + return 0; 879 + } 880 + 881 + static int loongson_nand_probe(struct platform_device *pdev) 882 + { 883 + struct device *dev = &pdev->dev; 884 + const struct loongson_nand_data *data; 885 + struct loongson_nand_host *host; 886 + struct resource *res; 887 + int ret; 888 + 889 + data = of_device_get_match_data(dev); 890 + if (!data) 891 + return -ENODEV; 892 + 893 + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 894 + if (!host) 895 + return -ENOMEM; 896 + 897 + host->reg_base = devm_platform_ioremap_resource(pdev, 0); 898 + if (IS_ERR(host->reg_base)) 899 + return PTR_ERR(host->reg_base); 900 + 901 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma"); 902 + if (!res) 903 + return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n"); 904 + 905 + host->dma_base = dma_map_resource(dev, res->start, resource_size(res), 906 + DMA_BIDIRECTIONAL, 0); 907 + if (dma_mapping_error(dev, host->dma_base)) 908 + return -ENXIO; 909 + 910 + host->dev = dev; 911 + host->data = data; 912 + host->controller.ops = &loongson_nand_controller_ops; 913 + 914 + nand_controller_init(&host->controller); 915 + 916 + ret = loongson_nand_controller_init(host); 917 + if (ret) 918 + goto err; 919 + 920 + ret = loongson_nand_chip_init(host); 921 + if (ret) 922 + goto err; 923 + 924 + platform_set_drvdata(pdev, host); 925 + 926 + return 0; 927 + err: 928 + loongson_nand_controller_cleanup(host); 929 + 930 + return ret; 931 + } 932 + 933 + static void loongson_nand_remove(struct platform_device *pdev) 934 + { 935 + struct loongson_nand_host *host = platform_get_drvdata(pdev); 936 + struct nand_chip *chip = &host->chip; 937 + int ret; 938 + 939 + ret = mtd_device_unregister(nand_to_mtd(chip)); 940 + WARN_ON(ret); 941 + nand_cleanup(chip); 942 + loongson_nand_controller_cleanup(host); 943 + } 944 + 945 + static const struct loongson_nand_data ls1b_nand_data = { 946 + .max_id_cycle = 5, 947 + .status_field = GENMASK(15, 8), 948 + .hold_cycle = 0x2, 949 + .wait_cycle = 0xc, 950 + .dma_bits = 32, 951 + .set_addr = ls1b_nand_set_addr, 952 + }; 953 + 954 + static const struct loongson_nand_data ls1c_nand_data = { 955 + .max_id_cycle = 6, 956 + .id_cycle_field = GENMASK(14, 12), 957 + .status_field = GENMASK(23, 16), 958 + .op_scope_field = GENMASK(29, 16), 959 + .hold_cycle = 0x2, 960 + .wait_cycle = 0xc, 961 + .dma_bits = 32, 962 + .set_addr = ls1c_nand_set_addr, 963 + }; 964 + 965 + static const struct loongson_nand_data ls2k0500_nand_data = { 966 + .max_id_cycle = 6, 967 + .id_cycle_field = GENMASK(14, 12), 968 + .status_field = GENMASK(23, 16), 969 + .op_scope_field = GENMASK(29, 16), 970 + .hold_cycle = 0x4, 971 + .wait_cycle = 0x12, 972 + .dma_bits = 64, 973 + .set_addr = ls1c_nand_set_addr, 974 + }; 975 + 976 + static const struct loongson_nand_data ls2k1000_nand_data = { 977 + .max_id_cycle = 6, 978 + .id_cycle_field = GENMASK(14, 12), 979 + .status_field = GENMASK(23, 16), 980 + .op_scope_field = GENMASK(29, 16), 981 + .hold_cycle = 0x4, 982 + .wait_cycle = 0x12, 983 + .nand_cs = 0x2, 984 + .dma_bits = 64, 985 + .dma_config = ls2k1000_nand_apbdma_config, 986 + .set_addr = ls1c_nand_set_addr, 987 + }; 988 + 989 + static const struct of_device_id loongson_nand_match[] = { 990 + { 991 + .compatible = "loongson,ls1b-nand-controller", 992 + .data = &ls1b_nand_data, 993 + }, 994 + { 995 + .compatible = "loongson,ls1c-nand-controller", 996 + .data = &ls1c_nand_data, 997 + }, 998 + { 999 + .compatible = "loongson,ls2k0500-nand-controller", 1000 + .data = &ls2k0500_nand_data, 1001 + }, 1002 + { 1003 + .compatible = "loongson,ls2k1000-nand-controller", 1004 + .data = &ls2k1000_nand_data, 1005 + }, 1006 + { /* sentinel */ } 1007 + }; 1008 + MODULE_DEVICE_TABLE(of, loongson_nand_match); 1009 + 1010 + static struct platform_driver loongson_nand_driver = { 1011 + .probe = loongson_nand_probe, 1012 + .remove = loongson_nand_remove, 1013 + .driver = { 1014 + .name = KBUILD_MODNAME, 1015 + .of_match_table = loongson_nand_match, 1016 + }, 1017 + }; 1018 + 1019 + module_platform_driver(loongson_nand_driver); 1020 + 1021 + MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); 1022 + MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>"); 1023 + MODULE_DESCRIPTION("Loongson NAND Controller Driver"); 1024 + MODULE_LICENSE("GPL");
-836
drivers/mtd/nand/raw/loongson1-nand-controller.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * NAND Controller Driver for Loongson-1 SoC 4 - * 5 - * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com> 6 - */ 7 - 8 - #include <linux/kernel.h> 9 - #include <linux/module.h> 10 - #include <linux/dmaengine.h> 11 - #include <linux/dma-mapping.h> 12 - #include <linux/iopoll.h> 13 - #include <linux/mtd/mtd.h> 14 - #include <linux/mtd/rawnand.h> 15 - #include <linux/of.h> 16 - #include <linux/platform_device.h> 17 - #include <linux/regmap.h> 18 - #include <linux/sizes.h> 19 - 20 - /* Loongson-1 NAND Controller Registers */ 21 - #define LS1X_NAND_CMD 0x0 22 - #define LS1X_NAND_ADDR1 0x4 23 - #define LS1X_NAND_ADDR2 0x8 24 - #define LS1X_NAND_TIMING 0xc 25 - #define LS1X_NAND_IDL 0x10 26 - #define LS1X_NAND_IDH_STATUS 0x14 27 - #define LS1X_NAND_PARAM 0x18 28 - #define LS1X_NAND_OP_NUM 0x1c 29 - 30 - /* NAND Command Register Bits */ 31 - #define LS1X_NAND_CMD_OP_DONE BIT(10) 32 - #define LS1X_NAND_CMD_OP_SPARE BIT(9) 33 - #define LS1X_NAND_CMD_OP_MAIN BIT(8) 34 - #define LS1X_NAND_CMD_STATUS BIT(7) 35 - #define LS1X_NAND_CMD_RESET BIT(6) 36 - #define LS1X_NAND_CMD_READID BIT(5) 37 - #define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4) 38 - #define LS1X_NAND_CMD_ERASE BIT(3) 39 - #define LS1X_NAND_CMD_WRITE BIT(2) 40 - #define LS1X_NAND_CMD_READ BIT(1) 41 - #define LS1X_NAND_CMD_VALID BIT(0) 42 - 43 - #define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0) 44 - #define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8) 45 - #define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8) 46 - 47 - #define LS1X_NAND_COL_ADDR_CYC 2U 48 - #define LS1X_NAND_MAX_ADDR_CYC 5U 49 - 50 - #define BITS_PER_WORD (4 * BITS_PER_BYTE) 51 - 52 - struct ls1x_nand_host; 53 - 54 - struct ls1x_nand_op { 55 - char addrs[LS1X_NAND_MAX_ADDR_CYC]; 56 - unsigned int naddrs; 57 - unsigned int addrs_offset; 58 - unsigned int aligned_offset; 59 - unsigned int cmd_reg; 60 - unsigned int row_start; 61 - unsigned int rdy_timeout_ms; 62 - unsigned int orig_len; 63 - bool is_readid; 64 - bool is_erase; 65 - bool is_write; 66 - bool is_read; 67 - bool is_change_column; 68 - size_t len; 69 - char *buf; 70 - }; 71 - 72 - struct ls1x_nand_data { 73 - unsigned int status_field; 74 - unsigned int op_scope_field; 75 - unsigned int hold_cycle; 76 - unsigned int wait_cycle; 77 - void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op); 78 - }; 79 - 80 - struct ls1x_nand_host { 81 - struct device *dev; 82 - struct nand_chip chip; 83 - struct nand_controller controller; 84 - const struct ls1x_nand_data *data; 85 - void __iomem *reg_base; 86 - struct regmap *regmap; 87 - /* DMA Engine stuff */ 88 - dma_addr_t dma_base; 89 - struct dma_chan *dma_chan; 90 - dma_cookie_t dma_cookie; 91 - struct completion dma_complete; 92 - }; 93 - 94 - static const struct regmap_config ls1x_nand_regmap_config = { 95 - .reg_bits = 32, 96 - .val_bits = 32, 97 - .reg_stride = 4, 98 - }; 99 - 100 - static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode) 101 - { 102 - struct ls1x_nand_host *host = nand_get_controller_data(chip); 103 - 104 - op->row_start = chip->page_shift + 1; 105 - 106 - /* The controller abstracts the following NAND operations. */ 107 - switch (opcode) { 108 - case NAND_CMD_STATUS: 109 - op->cmd_reg = LS1X_NAND_CMD_STATUS; 110 - break; 111 - case NAND_CMD_RESET: 112 - op->cmd_reg = LS1X_NAND_CMD_RESET; 113 - break; 114 - case NAND_CMD_READID: 115 - op->is_readid = true; 116 - op->cmd_reg = LS1X_NAND_CMD_READID; 117 - break; 118 - case NAND_CMD_ERASE1: 119 - op->is_erase = true; 120 - op->addrs_offset = LS1X_NAND_COL_ADDR_CYC; 121 - break; 122 - case NAND_CMD_ERASE2: 123 - if (!op->is_erase) 124 - return -EOPNOTSUPP; 125 - /* During erasing, row_start differs from the default value. */ 126 - op->row_start = chip->page_shift; 127 - op->cmd_reg = LS1X_NAND_CMD_ERASE; 128 - break; 129 - case NAND_CMD_SEQIN: 130 - op->is_write = true; 131 - break; 132 - case NAND_CMD_PAGEPROG: 133 - if (!op->is_write) 134 - return -EOPNOTSUPP; 135 - op->cmd_reg = LS1X_NAND_CMD_WRITE; 136 - break; 137 - case NAND_CMD_READ0: 138 - op->is_read = true; 139 - break; 140 - case NAND_CMD_READSTART: 141 - if (!op->is_read) 142 - return -EOPNOTSUPP; 143 - op->cmd_reg = LS1X_NAND_CMD_READ; 144 - break; 145 - case NAND_CMD_RNDOUT: 146 - op->is_change_column = true; 147 - break; 148 - case NAND_CMD_RNDOUTSTART: 149 - if (!op->is_change_column) 150 - return -EOPNOTSUPP; 151 - op->cmd_reg = LS1X_NAND_CMD_READ; 152 - break; 153 - default: 154 - dev_dbg(host->dev, "unsupported opcode: %u\n", opcode); 155 - return -EOPNOTSUPP; 156 - } 157 - 158 - return 0; 159 - } 160 - 161 - static int ls1x_nand_parse_instructions(struct nand_chip *chip, 162 - const struct nand_subop *subop, struct ls1x_nand_op *op) 163 - { 164 - unsigned int op_id; 165 - int ret; 166 - 167 - for (op_id = 0; op_id < subop->ninstrs; op_id++) { 168 - const struct nand_op_instr *instr = &subop->instrs[op_id]; 169 - unsigned int offset, naddrs; 170 - const u8 *addrs; 171 - 172 - switch (instr->type) { 173 - case NAND_OP_CMD_INSTR: 174 - ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode); 175 - if (ret < 0) 176 - return ret; 177 - 178 - break; 179 - case NAND_OP_ADDR_INSTR: 180 - naddrs = nand_subop_get_num_addr_cyc(subop, op_id); 181 - if (naddrs > LS1X_NAND_MAX_ADDR_CYC) 182 - return -EOPNOTSUPP; 183 - op->naddrs = naddrs; 184 - offset = nand_subop_get_addr_start_off(subop, op_id); 185 - addrs = &instr->ctx.addr.addrs[offset]; 186 - memcpy(op->addrs + op->addrs_offset, addrs, naddrs); 187 - break; 188 - case NAND_OP_DATA_IN_INSTR: 189 - case NAND_OP_DATA_OUT_INSTR: 190 - offset = nand_subop_get_data_start_off(subop, op_id); 191 - op->orig_len = nand_subop_get_data_len(subop, op_id); 192 - if (instr->type == NAND_OP_DATA_IN_INSTR) 193 - op->buf = instr->ctx.data.buf.in + offset; 194 - else if (instr->type == NAND_OP_DATA_OUT_INSTR) 195 - op->buf = (void *)instr->ctx.data.buf.out + offset; 196 - 197 - break; 198 - case NAND_OP_WAITRDY_INSTR: 199 - op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; 200 - break; 201 - default: 202 - break; 203 - } 204 - } 205 - 206 - return 0; 207 - } 208 - 209 - static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) 210 - { 211 - struct nand_chip *chip = &host->chip; 212 - int i; 213 - 214 - for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { 215 - int shift, mask, val; 216 - 217 - if (i < LS1X_NAND_COL_ADDR_CYC) { 218 - shift = i * BITS_PER_BYTE; 219 - mask = (u32)0xff << shift; 220 - mask &= GENMASK(chip->page_shift, 0); 221 - val = (u32)op->addrs[i] << shift; 222 - regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); 223 - } else if (!op->is_change_column) { 224 - shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; 225 - mask = (u32)0xff << shift; 226 - val = (u32)op->addrs[i] << shift; 227 - regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); 228 - 229 - if (i == 4) { 230 - mask = (u32)0xff >> (BITS_PER_WORD - shift); 231 - val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift); 232 - regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); 233 - } 234 - } 235 - } 236 - } 237 - 238 - static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) 239 - { 240 - int i; 241 - 242 - for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { 243 - int shift, mask, val; 244 - 245 - if (i < LS1X_NAND_COL_ADDR_CYC) { 246 - shift = i * BITS_PER_BYTE; 247 - mask = (u32)0xff << shift; 248 - val = (u32)op->addrs[i] << shift; 249 - regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); 250 - } else if (!op->is_change_column) { 251 - shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; 252 - mask = (u32)0xff << shift; 253 - val = (u32)op->addrs[i] << shift; 254 - regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); 255 - } 256 - } 257 - } 258 - 259 - static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op) 260 - { 261 - struct nand_chip *chip = &host->chip; 262 - struct mtd_info *mtd = nand_to_mtd(chip); 263 - int col0 = op->addrs[0]; 264 - short col; 265 - 266 - if (!IS_ALIGNED(col0, chip->buf_align)) { 267 - col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align); 268 - op->aligned_offset = op->addrs[0] - col0; 269 - op->addrs[0] = col0; 270 - } 271 - 272 - if (host->data->set_addr) 273 - host->data->set_addr(host, op); 274 - 275 - /* set operation length */ 276 - if (op->is_write || op->is_read || op->is_change_column) 277 - op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align); 278 - else if (op->is_erase) 279 - op->len = 1; 280 - else 281 - op->len = op->orig_len; 282 - 283 - writel(op->len, host->reg_base + LS1X_NAND_OP_NUM); 284 - 285 - /* set operation area and scope */ 286 - col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0]; 287 - if (op->orig_len && !op->is_readid) { 288 - unsigned int op_scope = 0; 289 - 290 - if (col < mtd->writesize) { 291 - op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN; 292 - op_scope = mtd->writesize; 293 - } 294 - 295 - op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE; 296 - op_scope += mtd->oobsize; 297 - 298 - op_scope <<= __ffs(host->data->op_scope_field); 299 - regmap_update_bits(host->regmap, LS1X_NAND_PARAM, 300 - host->data->op_scope_field, op_scope); 301 - } 302 - 303 - /* set command */ 304 - writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD); 305 - 306 - /* trigger operation */ 307 - regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID); 308 - } 309 - 310 - static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op) 311 - { 312 - unsigned int val; 313 - int ret = 0; 314 - 315 - if (op->rdy_timeout_ms) { 316 - ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD, 317 - val, val & LS1X_NAND_CMD_OP_DONE, 318 - 0, op->rdy_timeout_ms * MSEC_PER_SEC); 319 - if (ret) 320 - dev_err(host->dev, "operation failed\n"); 321 - } 322 - 323 - return ret; 324 - } 325 - 326 - static void ls1x_nand_dma_callback(void *data) 327 - { 328 - struct ls1x_nand_host *host = (struct ls1x_nand_host *)data; 329 - struct dma_chan *chan = host->dma_chan; 330 - struct device *dev = chan->device->dev; 331 - enum dma_status status; 332 - 333 - status = dmaengine_tx_status(chan, host->dma_cookie, NULL); 334 - if (likely(status == DMA_COMPLETE)) { 335 - dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie); 336 - complete(&host->dma_complete); 337 - } else { 338 - dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie); 339 - } 340 - } 341 - 342 - static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op) 343 - { 344 - struct nand_chip *chip = &host->chip; 345 - struct dma_chan *chan = host->dma_chan; 346 - struct device *dev = chan->device->dev; 347 - struct dma_async_tx_descriptor *desc; 348 - enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 349 - enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 350 - void *buf = op->buf; 351 - char *dma_buf = NULL; 352 - dma_addr_t dma_addr; 353 - int ret; 354 - 355 - if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) && 356 - IS_ALIGNED(op->orig_len, chip->buf_align)) { 357 - dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir); 358 - if (dma_mapping_error(dev, dma_addr)) { 359 - dev_err(dev, "failed to map DMA buffer\n"); 360 - return -ENXIO; 361 - } 362 - } else if (!op->is_write) { 363 - dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL); 364 - if (!dma_buf) 365 - return -ENOMEM; 366 - } else { 367 - dev_err(dev, "subpage writing not supported\n"); 368 - return -EOPNOTSUPP; 369 - } 370 - 371 - desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT); 372 - if (!desc) { 373 - dev_err(dev, "failed to prepare DMA descriptor\n"); 374 - ret = -ENOMEM; 375 - goto err; 376 - } 377 - desc->callback = ls1x_nand_dma_callback; 378 - desc->callback_param = host; 379 - 380 - host->dma_cookie = dmaengine_submit(desc); 381 - ret = dma_submit_error(host->dma_cookie); 382 - if (ret) { 383 - dev_err(dev, "failed to submit DMA descriptor\n"); 384 - goto err; 385 - } 386 - 387 - dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie); 388 - dma_async_issue_pending(chan); 389 - 390 - if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) { 391 - dmaengine_terminate_sync(chan); 392 - reinit_completion(&host->dma_complete); 393 - ret = -ETIMEDOUT; 394 - goto err; 395 - } 396 - 397 - if (dma_buf) 398 - memcpy(buf, dma_buf + op->aligned_offset, op->orig_len); 399 - err: 400 - if (dma_buf) 401 - dma_free_coherent(dev, op->len, dma_buf, dma_addr); 402 - else 403 - dma_unmap_single(dev, dma_addr, op->orig_len, data_dir); 404 - 405 - return ret; 406 - } 407 - 408 - static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 409 - { 410 - struct ls1x_nand_host *host = nand_get_controller_data(chip); 411 - struct ls1x_nand_op op = {}; 412 - int ret; 413 - 414 - ret = ls1x_nand_parse_instructions(chip, subop, &op); 415 - if (ret) 416 - return ret; 417 - 418 - ls1x_nand_trigger_op(host, &op); 419 - 420 - ret = ls1x_nand_dma_transfer(host, &op); 421 - if (ret) 422 - return ret; 423 - 424 - return ls1x_nand_wait_for_op_done(host, &op); 425 - } 426 - 427 - static int ls1x_nand_misc_type_exec(struct nand_chip *chip, 428 - const struct nand_subop *subop, struct ls1x_nand_op *op) 429 - { 430 - struct ls1x_nand_host *host = nand_get_controller_data(chip); 431 - int ret; 432 - 433 - ret = ls1x_nand_parse_instructions(chip, subop, op); 434 - if (ret) 435 - return ret; 436 - 437 - ls1x_nand_trigger_op(host, op); 438 - 439 - return ls1x_nand_wait_for_op_done(host, op); 440 - } 441 - 442 - static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 443 - { 444 - struct ls1x_nand_op op = {}; 445 - 446 - return ls1x_nand_misc_type_exec(chip, subop, &op); 447 - } 448 - 449 - static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 450 - { 451 - struct ls1x_nand_host *host = nand_get_controller_data(chip); 452 - struct ls1x_nand_op op = {}; 453 - int i, ret; 454 - union { 455 - char ids[5]; 456 - struct { 457 - int idl; 458 - char idh; 459 - }; 460 - } nand_id; 461 - 462 - ret = ls1x_nand_misc_type_exec(chip, subop, &op); 463 - if (ret) 464 - return ret; 465 - 466 - nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL); 467 - nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS); 468 - 469 - for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++) 470 - op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i]; 471 - 472 - return ret; 473 - } 474 - 475 - static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop) 476 - { 477 - struct ls1x_nand_host *host = nand_get_controller_data(chip); 478 - struct ls1x_nand_op op = {}; 479 - int val, ret; 480 - 481 - ret = ls1x_nand_misc_type_exec(chip, subop, &op); 482 - if (ret) 483 - return ret; 484 - 485 - val = readl(host->reg_base + LS1X_NAND_IDH_STATUS); 486 - val &= ~host->data->status_field; 487 - op.buf[0] = val << ffs(host->data->status_field); 488 - 489 - return ret; 490 - } 491 - 492 - static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER( 493 - NAND_OP_PARSER_PATTERN( 494 - ls1x_nand_read_id_type_exec, 495 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 496 - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), 497 - NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), 498 - NAND_OP_PARSER_PATTERN( 499 - ls1x_nand_read_status_type_exec, 500 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 501 - NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), 502 - NAND_OP_PARSER_PATTERN( 503 - ls1x_nand_zerolen_type_exec, 504 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 505 - NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 506 - NAND_OP_PARSER_PATTERN( 507 - ls1x_nand_zerolen_type_exec, 508 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 509 - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), 510 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 511 - NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 512 - NAND_OP_PARSER_PATTERN( 513 - ls1x_nand_data_type_exec, 514 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 515 - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), 516 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 517 - NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), 518 - NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)), 519 - NAND_OP_PARSER_PATTERN( 520 - ls1x_nand_data_type_exec, 521 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 522 - NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), 523 - NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0), 524 - NAND_OP_PARSER_PAT_CMD_ELEM(false), 525 - NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 526 - ); 527 - 528 - static int ls1x_nand_is_valid_cmd(u8 opcode) 529 - { 530 - if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID) 531 - return 0; 532 - 533 - return -EOPNOTSUPP; 534 - } 535 - 536 - static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2) 537 - { 538 - if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART) 539 - return 0; 540 - 541 - if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART) 542 - return 0; 543 - 544 - if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2) 545 - return 0; 546 - 547 - if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG) 548 - return 0; 549 - 550 - return -EOPNOTSUPP; 551 - } 552 - 553 - static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op) 554 - { 555 - const struct nand_op_instr *instr1 = NULL, *instr2 = NULL; 556 - int op_id; 557 - 558 - for (op_id = 0; op_id < op->ninstrs; op_id++) { 559 - const struct nand_op_instr *instr = &op->instrs[op_id]; 560 - 561 - if (instr->type == NAND_OP_CMD_INSTR) { 562 - if (!instr1) 563 - instr1 = instr; 564 - else if (!instr2) 565 - instr2 = instr; 566 - else 567 - break; 568 - } 569 - } 570 - 571 - if (!instr1) 572 - return -EOPNOTSUPP; 573 - 574 - if (!instr2) 575 - return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode); 576 - 577 - return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode); 578 - } 579 - 580 - static int ls1x_nand_exec_op(struct nand_chip *chip, 581 - const struct nand_operation *op, bool check_only) 582 - { 583 - if (check_only) 584 - return ls1x_nand_check_op(chip, op); 585 - 586 - return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only); 587 - } 588 - 589 - static int ls1x_nand_attach_chip(struct nand_chip *chip) 590 - { 591 - struct ls1x_nand_host *host = nand_get_controller_data(chip); 592 - u64 chipsize = nanddev_target_size(&chip->base); 593 - int cell_size = 0; 594 - 595 - switch (chipsize) { 596 - case SZ_128M: 597 - cell_size = 0x0; 598 - break; 599 - case SZ_256M: 600 - cell_size = 0x1; 601 - break; 602 - case SZ_512M: 603 - cell_size = 0x2; 604 - break; 605 - case SZ_1G: 606 - cell_size = 0x3; 607 - break; 608 - case SZ_2G: 609 - cell_size = 0x4; 610 - break; 611 - case SZ_4G: 612 - cell_size = 0x5; 613 - break; 614 - case SZ_8G: 615 - cell_size = 0x6; 616 - break; 617 - case SZ_16G: 618 - cell_size = 0x7; 619 - break; 620 - default: 621 - dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize); 622 - return -EINVAL; 623 - } 624 - 625 - switch (chip->ecc.engine_type) { 626 - case NAND_ECC_ENGINE_TYPE_NONE: 627 - break; 628 - case NAND_ECC_ENGINE_TYPE_SOFT: 629 - break; 630 - default: 631 - return -EINVAL; 632 - } 633 - 634 - /* set cell size */ 635 - regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK, 636 - FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size)); 637 - 638 - regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK, 639 - FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle)); 640 - 641 - regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK, 642 - FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle)); 643 - 644 - chip->ecc.read_page_raw = nand_monolithic_read_page_raw; 645 - chip->ecc.write_page_raw = nand_monolithic_write_page_raw; 646 - 647 - return 0; 648 - } 649 - 650 - static const struct nand_controller_ops ls1x_nand_controller_ops = { 651 - .exec_op = ls1x_nand_exec_op, 652 - .attach_chip = ls1x_nand_attach_chip, 653 - }; 654 - 655 - static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host) 656 - { 657 - if (host->dma_chan) 658 - dma_release_channel(host->dma_chan); 659 - } 660 - 661 - static int ls1x_nand_controller_init(struct ls1x_nand_host *host) 662 - { 663 - struct device *dev = host->dev; 664 - struct dma_chan *chan; 665 - struct dma_slave_config cfg = {}; 666 - int ret; 667 - 668 - host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config); 669 - if (IS_ERR(host->regmap)) 670 - return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n"); 671 - 672 - chan = dma_request_chan(dev, "rxtx"); 673 - if (IS_ERR(chan)) 674 - return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n"); 675 - host->dma_chan = chan; 676 - 677 - cfg.src_addr = host->dma_base; 678 - cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 679 - cfg.dst_addr = host->dma_base; 680 - cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 681 - ret = dmaengine_slave_config(host->dma_chan, &cfg); 682 - if (ret) 683 - return dev_err_probe(dev, ret, "failed to config DMA channel\n"); 684 - 685 - init_completion(&host->dma_complete); 686 - 687 - return 0; 688 - } 689 - 690 - static int ls1x_nand_chip_init(struct ls1x_nand_host *host) 691 - { 692 - struct device *dev = host->dev; 693 - int nchips = of_get_child_count(dev->of_node); 694 - struct device_node *chip_np; 695 - struct nand_chip *chip = &host->chip; 696 - struct mtd_info *mtd = nand_to_mtd(chip); 697 - int ret; 698 - 699 - if (nchips != 1) 700 - return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n"); 701 - 702 - chip_np = of_get_next_child(dev->of_node, NULL); 703 - if (!chip_np) 704 - return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n"); 705 - 706 - nand_set_flash_node(chip, chip_np); 707 - of_node_put(chip_np); 708 - if (!mtd->name) 709 - return dev_err_probe(dev, -EINVAL, "Missing MTD label\n"); 710 - 711 - nand_set_controller_data(chip, host); 712 - chip->controller = &host->controller; 713 - chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD; 714 - chip->buf_align = 16; 715 - mtd->dev.parent = dev; 716 - mtd->owner = THIS_MODULE; 717 - 718 - ret = nand_scan(chip, 1); 719 - if (ret) 720 - return dev_err_probe(dev, ret, "failed to scan NAND chip\n"); 721 - 722 - ret = mtd_device_register(mtd, NULL, 0); 723 - if (ret) { 724 - nand_cleanup(chip); 725 - return dev_err_probe(dev, ret, "failed to register MTD device\n"); 726 - } 727 - 728 - return 0; 729 - } 730 - 731 - static int ls1x_nand_probe(struct platform_device *pdev) 732 - { 733 - struct device *dev = &pdev->dev; 734 - const struct ls1x_nand_data *data; 735 - struct ls1x_nand_host *host; 736 - struct resource *res; 737 - int ret; 738 - 739 - data = of_device_get_match_data(dev); 740 - if (!data) 741 - return -ENODEV; 742 - 743 - host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 744 - if (!host) 745 - return -ENOMEM; 746 - 747 - host->reg_base = devm_platform_ioremap_resource(pdev, 0); 748 - if (IS_ERR(host->reg_base)) 749 - return PTR_ERR(host->reg_base); 750 - 751 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma"); 752 - if (!res) 753 - return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n"); 754 - 755 - host->dma_base = dma_map_resource(dev, res->start, resource_size(res), 756 - DMA_BIDIRECTIONAL, 0); 757 - if (dma_mapping_error(dev, host->dma_base)) 758 - return -ENXIO; 759 - 760 - host->dev = dev; 761 - host->data = data; 762 - host->controller.ops = &ls1x_nand_controller_ops; 763 - 764 - nand_controller_init(&host->controller); 765 - 766 - ret = ls1x_nand_controller_init(host); 767 - if (ret) 768 - goto err; 769 - 770 - ret = ls1x_nand_chip_init(host); 771 - if (ret) 772 - goto err; 773 - 774 - platform_set_drvdata(pdev, host); 775 - 776 - return 0; 777 - err: 778 - ls1x_nand_controller_cleanup(host); 779 - 780 - return ret; 781 - } 782 - 783 - static void ls1x_nand_remove(struct platform_device *pdev) 784 - { 785 - struct ls1x_nand_host *host = platform_get_drvdata(pdev); 786 - struct nand_chip *chip = &host->chip; 787 - int ret; 788 - 789 - ret = mtd_device_unregister(nand_to_mtd(chip)); 790 - WARN_ON(ret); 791 - nand_cleanup(chip); 792 - ls1x_nand_controller_cleanup(host); 793 - } 794 - 795 - static const struct ls1x_nand_data ls1b_nand_data = { 796 - .status_field = GENMASK(15, 8), 797 - .hold_cycle = 0x2, 798 - .wait_cycle = 0xc, 799 - .set_addr = ls1b_nand_set_addr, 800 - }; 801 - 802 - static const struct ls1x_nand_data ls1c_nand_data = { 803 - .status_field = GENMASK(23, 16), 804 - .op_scope_field = GENMASK(29, 16), 805 - .hold_cycle = 0x2, 806 - .wait_cycle = 0xc, 807 - .set_addr = ls1c_nand_set_addr, 808 - }; 809 - 810 - static const struct of_device_id ls1x_nand_match[] = { 811 - { 812 - .compatible = "loongson,ls1b-nand-controller", 813 - .data = &ls1b_nand_data, 814 - }, 815 - { 816 - .compatible = "loongson,ls1c-nand-controller", 817 - .data = &ls1c_nand_data, 818 - }, 819 - { /* sentinel */ } 820 - }; 821 - MODULE_DEVICE_TABLE(of, ls1x_nand_match); 822 - 823 - static struct platform_driver ls1x_nand_driver = { 824 - .probe = ls1x_nand_probe, 825 - .remove = ls1x_nand_remove, 826 - .driver = { 827 - .name = KBUILD_MODNAME, 828 - .of_match_table = ls1x_nand_match, 829 - }, 830 - }; 831 - 832 - module_platform_driver(ls1x_nand_driver); 833 - 834 - MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); 835 - MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver"); 836 - MODULE_LICENSE("GPL");
-131
drivers/mtd/nand/raw/nand_base.c
··· 2784 2784 } 2785 2785 2786 2786 /** 2787 - * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 2788 - * @buf: buffer to test 2789 - * @len: buffer length 2790 - * @bitflips_threshold: maximum number of bitflips 2791 - * 2792 - * Check if a buffer contains only 0xff, which means the underlying region 2793 - * has been erased and is ready to be programmed. 2794 - * The bitflips_threshold specify the maximum number of bitflips before 2795 - * considering the region is not erased. 2796 - * Note: The logic of this function has been extracted from the memweight 2797 - * implementation, except that nand_check_erased_buf function exit before 2798 - * testing the whole buffer if the number of bitflips exceed the 2799 - * bitflips_threshold value. 2800 - * 2801 - * Returns a positive number of bitflips less than or equal to 2802 - * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2803 - * threshold. 2804 - */ 2805 - static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 2806 - { 2807 - const unsigned char *bitmap = buf; 2808 - int bitflips = 0; 2809 - int weight; 2810 - 2811 - for (; len && ((uintptr_t)bitmap) % sizeof(long); 2812 - len--, bitmap++) { 2813 - weight = hweight8(*bitmap); 2814 - bitflips += BITS_PER_BYTE - weight; 2815 - if (unlikely(bitflips > bitflips_threshold)) 2816 - return -EBADMSG; 2817 - } 2818 - 2819 - for (; len >= sizeof(long); 2820 - len -= sizeof(long), bitmap += sizeof(long)) { 2821 - unsigned long d = *((unsigned long *)bitmap); 2822 - if (d == ~0UL) 2823 - continue; 2824 - weight = hweight_long(d); 2825 - bitflips += BITS_PER_LONG - weight; 2826 - if (unlikely(bitflips > bitflips_threshold)) 2827 - return -EBADMSG; 2828 - } 2829 - 2830 - for (; len > 0; len--, bitmap++) { 2831 - weight = hweight8(*bitmap); 2832 - bitflips += BITS_PER_BYTE - weight; 2833 - if (unlikely(bitflips > bitflips_threshold)) 2834 - return -EBADMSG; 2835 - } 2836 - 2837 - return bitflips; 2838 - } 2839 - 2840 - /** 2841 - * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 2842 - * 0xff data 2843 - * @data: data buffer to test 2844 - * @datalen: data length 2845 - * @ecc: ECC buffer 2846 - * @ecclen: ECC length 2847 - * @extraoob: extra OOB buffer 2848 - * @extraooblen: extra OOB length 2849 - * @bitflips_threshold: maximum number of bitflips 2850 - * 2851 - * Check if a data buffer and its associated ECC and OOB data contains only 2852 - * 0xff pattern, which means the underlying region has been erased and is 2853 - * ready to be programmed. 2854 - * The bitflips_threshold specify the maximum number of bitflips before 2855 - * considering the region as not erased. 2856 - * 2857 - * Note: 2858 - * 1/ ECC algorithms are working on pre-defined block sizes which are usually 2859 - * different from the NAND page size. When fixing bitflips, ECC engines will 2860 - * report the number of errors per chunk, and the NAND core infrastructure 2861 - * expect you to return the maximum number of bitflips for the whole page. 2862 - * This is why you should always use this function on a single chunk and 2863 - * not on the whole page. After checking each chunk you should update your 2864 - * max_bitflips value accordingly. 2865 - * 2/ When checking for bitflips in erased pages you should not only check 2866 - * the payload data but also their associated ECC data, because a user might 2867 - * have programmed almost all bits to 1 but a few. In this case, we 2868 - * shouldn't consider the chunk as erased, and checking ECC bytes prevent 2869 - * this case. 2870 - * 3/ The extraoob argument is optional, and should be used if some of your OOB 2871 - * data are protected by the ECC engine. 2872 - * It could also be used if you support subpages and want to attach some 2873 - * extra OOB data to an ECC chunk. 2874 - * 2875 - * Returns a positive number of bitflips less than or equal to 2876 - * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2877 - * threshold. In case of success, the passed buffers are filled with 0xff. 2878 - */ 2879 - int nand_check_erased_ecc_chunk(void *data, int datalen, 2880 - void *ecc, int ecclen, 2881 - void *extraoob, int extraooblen, 2882 - int bitflips_threshold) 2883 - { 2884 - int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 2885 - 2886 - data_bitflips = nand_check_erased_buf(data, datalen, 2887 - bitflips_threshold); 2888 - if (data_bitflips < 0) 2889 - return data_bitflips; 2890 - 2891 - bitflips_threshold -= data_bitflips; 2892 - 2893 - ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 2894 - if (ecc_bitflips < 0) 2895 - return ecc_bitflips; 2896 - 2897 - bitflips_threshold -= ecc_bitflips; 2898 - 2899 - extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 2900 - bitflips_threshold); 2901 - if (extraoob_bitflips < 0) 2902 - return extraoob_bitflips; 2903 - 2904 - if (data_bitflips) 2905 - memset(data, 0xff, datalen); 2906 - 2907 - if (ecc_bitflips) 2908 - memset(ecc, 0xff, ecclen); 2909 - 2910 - if (extraoob_bitflips) 2911 - memset(extraoob, 0xff, extraooblen); 2912 - 2913 - return data_bitflips + ecc_bitflips + extraoob_bitflips; 2914 - } 2915 - EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 2916 - 2917 - /** 2918 2787 * nand_read_page_raw_notsupp - dummy read raw page function 2919 2788 * @chip: nand chip info structure 2920 2789 * @buf: buffer to store read data
+21 -5
drivers/mtd/nand/raw/omap2.c
··· 1979 1979 err = rawnand_sw_bch_init(chip); 1980 1980 if (err) { 1981 1981 dev_err(dev, "Unable to use BCH library\n"); 1982 - return err; 1982 + goto err_put_elm_dev; 1983 1983 } 1984 1984 break; 1985 1985 ··· 2016 2016 err = rawnand_sw_bch_init(chip); 2017 2017 if (err) { 2018 2018 dev_err(dev, "unable to use BCH library\n"); 2019 - return err; 2019 + goto err_put_elm_dev; 2020 2020 } 2021 2021 break; 2022 2022 ··· 2054 2054 break; 2055 2055 default: 2056 2056 dev_err(dev, "Invalid or unsupported ECC scheme\n"); 2057 - return -EINVAL; 2057 + err = -EINVAL; 2058 + goto err_put_elm_dev; 2058 2059 } 2059 2060 2060 2061 if (elm_bch_strength >= 0) { ··· 2074 2073 info->nsteps_per_eccpg, chip->ecc.size, 2075 2074 chip->ecc.bytes); 2076 2075 if (err < 0) 2077 - return err; 2076 + goto err_put_elm_dev; 2078 2077 } 2079 2078 2080 2079 /* Check if NAND device's OOB is enough to store ECC signatures */ ··· 2084 2083 dev_err(dev, 2085 2084 "Not enough OOB bytes: required = %d, available=%d\n", 2086 2085 min_oobbytes, mtd->oobsize); 2087 - return -EINVAL; 2086 + err = -EINVAL; 2087 + goto err_put_elm_dev; 2088 2088 } 2089 2089 2090 2090 return 0; 2091 + 2092 + err_put_elm_dev: 2093 + put_device(info->elm_dev); 2094 + 2095 + return err; 2096 + } 2097 + 2098 + static void omap_nand_detach_chip(struct nand_chip *chip) 2099 + { 2100 + struct mtd_info *mtd = nand_to_mtd(chip); 2101 + struct omap_nand_info *info = mtd_to_omap(mtd); 2102 + 2103 + put_device(info->elm_dev); 2091 2104 } 2092 2105 2093 2106 static void omap_nand_data_in(struct nand_chip *chip, void *buf, ··· 2202 2187 2203 2188 static const struct nand_controller_ops omap_nand_controller_ops = { 2204 2189 .attach_chip = omap_nand_attach_chip, 2190 + .detach_chip = omap_nand_detach_chip, 2205 2191 .exec_op = omap_nand_exec_op, 2206 2192 }; 2207 2193
+1 -1
drivers/mtd/nand/raw/pl35x-nand-controller.c
··· 1137 1137 struct device *smc_dev = pdev->dev.parent; 1138 1138 struct amba_device *smc_amba = to_amba_device(smc_dev); 1139 1139 struct pl35x_nandc *nfc; 1140 - u32 ret; 1140 + int ret; 1141 1141 1142 1142 nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL); 1143 1143 if (!nfc)
-1230
drivers/mtd/nand/raw/s3c2410.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Copyright © 2004-2008 Simtec Electronics 4 - * http://armlinux.simtec.co.uk/ 5 - * Ben Dooks <ben@simtec.co.uk> 6 - * 7 - * Samsung S3C2410/S3C2440/S3C2412 NAND driver 8 - */ 9 - 10 - #define pr_fmt(fmt) "nand-s3c2410: " fmt 11 - 12 - #ifdef CONFIG_MTD_NAND_S3C2410_DEBUG 13 - #define DEBUG 14 - #endif 15 - 16 - #include <linux/module.h> 17 - #include <linux/types.h> 18 - #include <linux/kernel.h> 19 - #include <linux/string.h> 20 - #include <linux/io.h> 21 - #include <linux/ioport.h> 22 - #include <linux/platform_device.h> 23 - #include <linux/delay.h> 24 - #include <linux/err.h> 25 - #include <linux/slab.h> 26 - #include <linux/clk.h> 27 - #include <linux/cpufreq.h> 28 - #include <linux/of.h> 29 - 30 - #include <linux/mtd/mtd.h> 31 - #include <linux/mtd/rawnand.h> 32 - #include <linux/mtd/partitions.h> 33 - 34 - #include <linux/platform_data/mtd-nand-s3c2410.h> 35 - 36 - #define S3C2410_NFREG(x) (x) 37 - 38 - #define S3C2410_NFCONF S3C2410_NFREG(0x00) 39 - #define S3C2410_NFCMD S3C2410_NFREG(0x04) 40 - #define S3C2410_NFADDR S3C2410_NFREG(0x08) 41 - #define S3C2410_NFDATA S3C2410_NFREG(0x0C) 42 - #define S3C2410_NFSTAT S3C2410_NFREG(0x10) 43 - #define S3C2410_NFECC S3C2410_NFREG(0x14) 44 - #define S3C2440_NFCONT S3C2410_NFREG(0x04) 45 - #define S3C2440_NFCMD S3C2410_NFREG(0x08) 46 - #define S3C2440_NFADDR S3C2410_NFREG(0x0C) 47 - #define S3C2440_NFDATA S3C2410_NFREG(0x10) 48 - #define S3C2440_NFSTAT S3C2410_NFREG(0x20) 49 - #define S3C2440_NFMECC0 S3C2410_NFREG(0x2C) 50 - #define S3C2412_NFSTAT S3C2410_NFREG(0x28) 51 - #define S3C2412_NFMECC0 S3C2410_NFREG(0x34) 52 - #define S3C2410_NFCONF_EN (1<<15) 53 - #define S3C2410_NFCONF_INITECC (1<<12) 54 - #define S3C2410_NFCONF_nFCE (1<<11) 55 - #define S3C2410_NFCONF_TACLS(x) ((x)<<8) 56 - #define S3C2410_NFCONF_TWRPH0(x) ((x)<<4) 57 - #define S3C2410_NFCONF_TWRPH1(x) ((x)<<0) 58 - #define S3C2410_NFSTAT_BUSY (1<<0) 59 - #define S3C2440_NFCONF_TACLS(x) ((x)<<12) 60 - #define S3C2440_NFCONF_TWRPH0(x) ((x)<<8) 61 - #define S3C2440_NFCONF_TWRPH1(x) ((x)<<4) 62 - #define S3C2440_NFCONT_INITECC (1<<4) 63 - #define S3C2440_NFCONT_nFCE (1<<1) 64 - #define S3C2440_NFCONT_ENABLE (1<<0) 65 - #define S3C2440_NFSTAT_READY (1<<0) 66 - #define S3C2412_NFCONF_NANDBOOT (1<<31) 67 - #define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5) 68 - #define S3C2412_NFCONT_nFCE0 (1<<1) 69 - #define S3C2412_NFSTAT_READY (1<<0) 70 - 71 - /* new oob placement block for use with hardware ecc generation 72 - */ 73 - static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section, 74 - struct mtd_oob_region *oobregion) 75 - { 76 - if (section) 77 - return -ERANGE; 78 - 79 - oobregion->offset = 0; 80 - oobregion->length = 3; 81 - 82 - return 0; 83 - } 84 - 85 - static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section, 86 - struct mtd_oob_region *oobregion) 87 - { 88 - if (section) 89 - return -ERANGE; 90 - 91 - oobregion->offset = 8; 92 - oobregion->length = 8; 93 - 94 - return 0; 95 - } 96 - 97 - static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = { 98 - .ecc = s3c2410_ooblayout_ecc, 99 - .free = s3c2410_ooblayout_free, 100 - }; 101 - 102 - /* controller and mtd information */ 103 - 104 - struct s3c2410_nand_info; 105 - 106 - /** 107 - * struct s3c2410_nand_mtd - driver MTD structure 108 - * @chip: The NAND chip information. 109 - * @set: The platform information supplied for this set of NAND chips. 110 - * @info: Link back to the hardware information. 111 - */ 112 - struct s3c2410_nand_mtd { 113 - struct nand_chip chip; 114 - struct s3c2410_nand_set *set; 115 - struct s3c2410_nand_info *info; 116 - }; 117 - 118 - enum s3c_cpu_type { 119 - TYPE_S3C2410, 120 - TYPE_S3C2412, 121 - TYPE_S3C2440, 122 - }; 123 - 124 - enum s3c_nand_clk_state { 125 - CLOCK_DISABLE = 0, 126 - CLOCK_ENABLE, 127 - CLOCK_SUSPEND, 128 - }; 129 - 130 - /* overview of the s3c2410 nand state */ 131 - 132 - /** 133 - * struct s3c2410_nand_info - NAND controller state. 134 - * @controller: Base controller structure. 135 - * @mtds: An array of MTD instances on this controller. 136 - * @platform: The platform data for this board. 137 - * @device: The platform device we bound to. 138 - * @clk: The clock resource for this controller. 139 - * @regs: The area mapped for the hardware registers. 140 - * @sel_reg: Pointer to the register controlling the NAND selection. 141 - * @sel_bit: The bit in @sel_reg to select the NAND chip. 142 - * @mtd_count: The number of MTDs created from this controller. 143 - * @save_sel: The contents of @sel_reg to be saved over suspend. 144 - * @clk_rate: The clock rate from @clk. 145 - * @clk_state: The current clock state. 146 - * @cpu_type: The exact type of this controller. 147 - */ 148 - struct s3c2410_nand_info { 149 - /* mtd info */ 150 - struct nand_controller controller; 151 - struct s3c2410_nand_mtd *mtds; 152 - struct s3c2410_platform_nand *platform; 153 - 154 - /* device info */ 155 - struct device *device; 156 - struct clk *clk; 157 - void __iomem *regs; 158 - void __iomem *sel_reg; 159 - int sel_bit; 160 - int mtd_count; 161 - unsigned long save_sel; 162 - unsigned long clk_rate; 163 - enum s3c_nand_clk_state clk_state; 164 - 165 - enum s3c_cpu_type cpu_type; 166 - }; 167 - 168 - struct s3c24XX_nand_devtype_data { 169 - enum s3c_cpu_type type; 170 - }; 171 - 172 - static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = { 173 - .type = TYPE_S3C2410, 174 - }; 175 - 176 - static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = { 177 - .type = TYPE_S3C2412, 178 - }; 179 - 180 - static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = { 181 - .type = TYPE_S3C2440, 182 - }; 183 - 184 - /* conversion functions */ 185 - 186 - static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd) 187 - { 188 - return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd, 189 - chip); 190 - } 191 - 192 - static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd) 193 - { 194 - return s3c2410_nand_mtd_toours(mtd)->info; 195 - } 196 - 197 - static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev) 198 - { 199 - return platform_get_drvdata(dev); 200 - } 201 - 202 - static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev) 203 - { 204 - return dev_get_platdata(&dev->dev); 205 - } 206 - 207 - static inline int allow_clk_suspend(struct s3c2410_nand_info *info) 208 - { 209 - #ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP 210 - return 1; 211 - #else 212 - return 0; 213 - #endif 214 - } 215 - 216 - /** 217 - * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock. 218 - * @info: The controller instance. 219 - * @new_state: State to which clock should be set. 220 - */ 221 - static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info, 222 - enum s3c_nand_clk_state new_state) 223 - { 224 - if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND) 225 - return; 226 - 227 - if (info->clk_state == CLOCK_ENABLE) { 228 - if (new_state != CLOCK_ENABLE) 229 - clk_disable_unprepare(info->clk); 230 - } else { 231 - if (new_state == CLOCK_ENABLE) 232 - clk_prepare_enable(info->clk); 233 - } 234 - 235 - info->clk_state = new_state; 236 - } 237 - 238 - /* timing calculations */ 239 - 240 - #define NS_IN_KHZ 1000000 241 - 242 - /** 243 - * s3c_nand_calc_rate - calculate timing data. 244 - * @wanted: The cycle time in nanoseconds. 245 - * @clk: The clock rate in kHz. 246 - * @max: The maximum divider value. 247 - * 248 - * Calculate the timing value from the given parameters. 249 - */ 250 - static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) 251 - { 252 - int result; 253 - 254 - result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ); 255 - 256 - pr_debug("result %d from %ld, %d\n", result, clk, wanted); 257 - 258 - if (result > max) { 259 - pr_err("%d ns is too big for current clock rate %ld\n", 260 - wanted, clk); 261 - return -1; 262 - } 263 - 264 - if (result < 1) 265 - result = 1; 266 - 267 - return result; 268 - } 269 - 270 - #define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) 271 - 272 - /* controller setup */ 273 - 274 - /** 275 - * s3c2410_nand_setrate - setup controller timing information. 276 - * @info: The controller instance. 277 - * 278 - * Given the information supplied by the platform, calculate and set 279 - * the necessary timing registers in the hardware to generate the 280 - * necessary timing cycles to the hardware. 281 - */ 282 - static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) 283 - { 284 - struct s3c2410_platform_nand *plat = info->platform; 285 - int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; 286 - int tacls, twrph0, twrph1; 287 - unsigned long clkrate = clk_get_rate(info->clk); 288 - unsigned long set, cfg, mask; 289 - unsigned long flags; 290 - 291 - /* calculate the timing information for the controller */ 292 - 293 - info->clk_rate = clkrate; 294 - clkrate /= 1000; /* turn clock into kHz for ease of use */ 295 - 296 - if (plat != NULL) { 297 - tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max); 298 - twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8); 299 - twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8); 300 - } else { 301 - /* default timings */ 302 - tacls = tacls_max; 303 - twrph0 = 8; 304 - twrph1 = 8; 305 - } 306 - 307 - if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { 308 - dev_err(info->device, "cannot get suitable timings\n"); 309 - return -EINVAL; 310 - } 311 - 312 - dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", 313 - tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), 314 - twrph1, to_ns(twrph1, clkrate)); 315 - 316 - switch (info->cpu_type) { 317 - case TYPE_S3C2410: 318 - mask = (S3C2410_NFCONF_TACLS(3) | 319 - S3C2410_NFCONF_TWRPH0(7) | 320 - S3C2410_NFCONF_TWRPH1(7)); 321 - set = S3C2410_NFCONF_EN; 322 - set |= S3C2410_NFCONF_TACLS(tacls - 1); 323 - set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); 324 - set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); 325 - break; 326 - 327 - case TYPE_S3C2440: 328 - case TYPE_S3C2412: 329 - mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) | 330 - S3C2440_NFCONF_TWRPH0(7) | 331 - S3C2440_NFCONF_TWRPH1(7)); 332 - 333 - set = S3C2440_NFCONF_TACLS(tacls - 1); 334 - set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); 335 - set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); 336 - break; 337 - 338 - default: 339 - BUG(); 340 - } 341 - 342 - local_irq_save(flags); 343 - 344 - cfg = readl(info->regs + S3C2410_NFCONF); 345 - cfg &= ~mask; 346 - cfg |= set; 347 - writel(cfg, info->regs + S3C2410_NFCONF); 348 - 349 - local_irq_restore(flags); 350 - 351 - dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg); 352 - 353 - return 0; 354 - } 355 - 356 - /** 357 - * s3c2410_nand_inithw - basic hardware initialisation 358 - * @info: The hardware state. 359 - * 360 - * Do the basic initialisation of the hardware, using s3c2410_nand_setrate() 361 - * to setup the hardware access speeds and set the controller to be enabled. 362 - */ 363 - static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) 364 - { 365 - int ret; 366 - 367 - ret = s3c2410_nand_setrate(info); 368 - if (ret < 0) 369 - return ret; 370 - 371 - switch (info->cpu_type) { 372 - case TYPE_S3C2410: 373 - default: 374 - break; 375 - 376 - case TYPE_S3C2440: 377 - case TYPE_S3C2412: 378 - /* enable the controller and de-assert nFCE */ 379 - 380 - writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); 381 - } 382 - 383 - return 0; 384 - } 385 - 386 - /** 387 - * s3c2410_nand_select_chip - select the given nand chip 388 - * @this: NAND chip object. 389 - * @chip: The chip number. 390 - * 391 - * This is called by the MTD layer to either select a given chip for the 392 - * @mtd instance, or to indicate that the access has finished and the 393 - * chip can be de-selected. 394 - * 395 - * The routine ensures that the nFCE line is correctly setup, and any 396 - * platform specific selection code is called to route nFCE to the specific 397 - * chip. 398 - */ 399 - static void s3c2410_nand_select_chip(struct nand_chip *this, int chip) 400 - { 401 - struct s3c2410_nand_info *info; 402 - struct s3c2410_nand_mtd *nmtd; 403 - unsigned long cur; 404 - 405 - nmtd = nand_get_controller_data(this); 406 - info = nmtd->info; 407 - 408 - if (chip != -1) 409 - s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); 410 - 411 - cur = readl(info->sel_reg); 412 - 413 - if (chip == -1) { 414 - cur |= info->sel_bit; 415 - } else { 416 - if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { 417 - dev_err(info->device, "invalid chip %d\n", chip); 418 - return; 419 - } 420 - 421 - if (info->platform != NULL) { 422 - if (info->platform->select_chip != NULL) 423 - (info->platform->select_chip) (nmtd->set, chip); 424 - } 425 - 426 - cur &= ~info->sel_bit; 427 - } 428 - 429 - writel(cur, info->sel_reg); 430 - 431 - if (chip == -1) 432 - s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); 433 - } 434 - 435 - /* s3c2410_nand_hwcontrol 436 - * 437 - * Issue command and address cycles to the chip 438 - */ 439 - 440 - static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd, 441 - unsigned int ctrl) 442 - { 443 - struct mtd_info *mtd = nand_to_mtd(chip); 444 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 445 - 446 - if (cmd == NAND_CMD_NONE) 447 - return; 448 - 449 - if (ctrl & NAND_CLE) 450 - writeb(cmd, info->regs + S3C2410_NFCMD); 451 - else 452 - writeb(cmd, info->regs + S3C2410_NFADDR); 453 - } 454 - 455 - /* command and control functions */ 456 - 457 - static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd, 458 - unsigned int ctrl) 459 - { 460 - struct mtd_info *mtd = nand_to_mtd(chip); 461 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 462 - 463 - if (cmd == NAND_CMD_NONE) 464 - return; 465 - 466 - if (ctrl & NAND_CLE) 467 - writeb(cmd, info->regs + S3C2440_NFCMD); 468 - else 469 - writeb(cmd, info->regs + S3C2440_NFADDR); 470 - } 471 - 472 - /* s3c2410_nand_devready() 473 - * 474 - * returns 0 if the nand is busy, 1 if it is ready 475 - */ 476 - 477 - static int s3c2410_nand_devready(struct nand_chip *chip) 478 - { 479 - struct mtd_info *mtd = nand_to_mtd(chip); 480 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 481 - return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; 482 - } 483 - 484 - static int s3c2440_nand_devready(struct nand_chip *chip) 485 - { 486 - struct mtd_info *mtd = nand_to_mtd(chip); 487 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 488 - return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; 489 - } 490 - 491 - static int s3c2412_nand_devready(struct nand_chip *chip) 492 - { 493 - struct mtd_info *mtd = nand_to_mtd(chip); 494 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 495 - return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY; 496 - } 497 - 498 - /* ECC handling functions */ 499 - 500 - static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat, 501 - u_char *read_ecc, u_char *calc_ecc) 502 - { 503 - struct mtd_info *mtd = nand_to_mtd(chip); 504 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 505 - unsigned int diff0, diff1, diff2; 506 - unsigned int bit, byte; 507 - 508 - pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc); 509 - 510 - diff0 = read_ecc[0] ^ calc_ecc[0]; 511 - diff1 = read_ecc[1] ^ calc_ecc[1]; 512 - diff2 = read_ecc[2] ^ calc_ecc[2]; 513 - 514 - pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n", 515 - __func__, 3, read_ecc, 3, calc_ecc, 516 - diff0, diff1, diff2); 517 - 518 - if (diff0 == 0 && diff1 == 0 && diff2 == 0) 519 - return 0; /* ECC is ok */ 520 - 521 - /* sometimes people do not think about using the ECC, so check 522 - * to see if we have an 0xff,0xff,0xff read ECC and then ignore 523 - * the error, on the assumption that this is an un-eccd page. 524 - */ 525 - if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff 526 - && info->platform->ignore_unset_ecc) 527 - return 0; 528 - 529 - /* Can we correct this ECC (ie, one row and column change). 530 - * Note, this is similar to the 256 error code on smartmedia */ 531 - 532 - if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 && 533 - ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 && 534 - ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { 535 - /* calculate the bit position of the error */ 536 - 537 - bit = ((diff2 >> 3) & 1) | 538 - ((diff2 >> 4) & 2) | 539 - ((diff2 >> 5) & 4); 540 - 541 - /* calculate the byte position of the error */ 542 - 543 - byte = ((diff2 << 7) & 0x100) | 544 - ((diff1 << 0) & 0x80) | 545 - ((diff1 << 1) & 0x40) | 546 - ((diff1 << 2) & 0x20) | 547 - ((diff1 << 3) & 0x10) | 548 - ((diff0 >> 4) & 0x08) | 549 - ((diff0 >> 3) & 0x04) | 550 - ((diff0 >> 2) & 0x02) | 551 - ((diff0 >> 1) & 0x01); 552 - 553 - dev_dbg(info->device, "correcting error bit %d, byte %d\n", 554 - bit, byte); 555 - 556 - dat[byte] ^= (1 << bit); 557 - return 1; 558 - } 559 - 560 - /* if there is only one bit difference in the ECC, then 561 - * one of only a row or column parity has changed, which 562 - * means the error is most probably in the ECC itself */ 563 - 564 - diff0 |= (diff1 << 8); 565 - diff0 |= (diff2 << 16); 566 - 567 - /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */ 568 - if ((diff0 & (diff0 - 1)) == 0) 569 - return 1; 570 - 571 - return -1; 572 - } 573 - 574 - /* ECC functions 575 - * 576 - * These allow the s3c2410 and s3c2440 to use the controller's ECC 577 - * generator block to ECC the data as it passes through] 578 - */ 579 - 580 - static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode) 581 - { 582 - struct s3c2410_nand_info *info; 583 - unsigned long ctrl; 584 - 585 - info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip)); 586 - ctrl = readl(info->regs + S3C2410_NFCONF); 587 - ctrl |= S3C2410_NFCONF_INITECC; 588 - writel(ctrl, info->regs + S3C2410_NFCONF); 589 - } 590 - 591 - static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode) 592 - { 593 - struct s3c2410_nand_info *info; 594 - unsigned long ctrl; 595 - 596 - info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip)); 597 - ctrl = readl(info->regs + S3C2440_NFCONT); 598 - writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, 599 - info->regs + S3C2440_NFCONT); 600 - } 601 - 602 - static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode) 603 - { 604 - struct s3c2410_nand_info *info; 605 - unsigned long ctrl; 606 - 607 - info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip)); 608 - ctrl = readl(info->regs + S3C2440_NFCONT); 609 - writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); 610 - } 611 - 612 - static int s3c2410_nand_calculate_ecc(struct nand_chip *chip, 613 - const u_char *dat, u_char *ecc_code) 614 - { 615 - struct mtd_info *mtd = nand_to_mtd(chip); 616 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 617 - 618 - ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0); 619 - ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); 620 - ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); 621 - 622 - pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); 623 - 624 - return 0; 625 - } 626 - 627 - static int s3c2412_nand_calculate_ecc(struct nand_chip *chip, 628 - const u_char *dat, u_char *ecc_code) 629 - { 630 - struct mtd_info *mtd = nand_to_mtd(chip); 631 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 632 - unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); 633 - 634 - ecc_code[0] = ecc; 635 - ecc_code[1] = ecc >> 8; 636 - ecc_code[2] = ecc >> 16; 637 - 638 - pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); 639 - 640 - return 0; 641 - } 642 - 643 - static int s3c2440_nand_calculate_ecc(struct nand_chip *chip, 644 - const u_char *dat, u_char *ecc_code) 645 - { 646 - struct mtd_info *mtd = nand_to_mtd(chip); 647 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 648 - unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); 649 - 650 - ecc_code[0] = ecc; 651 - ecc_code[1] = ecc >> 8; 652 - ecc_code[2] = ecc >> 16; 653 - 654 - pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff); 655 - 656 - return 0; 657 - } 658 - 659 - /* over-ride the standard functions for a little more speed. We can 660 - * use read/write block to move the data buffers to/from the controller 661 - */ 662 - 663 - static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len) 664 - { 665 - readsb(this->legacy.IO_ADDR_R, buf, len); 666 - } 667 - 668 - static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len) 669 - { 670 - struct mtd_info *mtd = nand_to_mtd(this); 671 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 672 - 673 - readsl(info->regs + S3C2440_NFDATA, buf, len >> 2); 674 - 675 - /* cleanup if we've got less than a word to do */ 676 - if (len & 3) { 677 - buf += len & ~3; 678 - 679 - for (; len & 3; len--) 680 - *buf++ = readb(info->regs + S3C2440_NFDATA); 681 - } 682 - } 683 - 684 - static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf, 685 - int len) 686 - { 687 - writesb(this->legacy.IO_ADDR_W, buf, len); 688 - } 689 - 690 - static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf, 691 - int len) 692 - { 693 - struct mtd_info *mtd = nand_to_mtd(this); 694 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 695 - 696 - writesl(info->regs + S3C2440_NFDATA, buf, len >> 2); 697 - 698 - /* cleanup any fractional write */ 699 - if (len & 3) { 700 - buf += len & ~3; 701 - 702 - for (; len & 3; len--, buf++) 703 - writeb(*buf, info->regs + S3C2440_NFDATA); 704 - } 705 - } 706 - 707 - /* device management functions */ 708 - 709 - static void s3c24xx_nand_remove(struct platform_device *pdev) 710 - { 711 - struct s3c2410_nand_info *info = to_nand_info(pdev); 712 - 713 - if (info == NULL) 714 - return; 715 - 716 - /* Release all our mtds and their partitions, then go through 717 - * freeing the resources used 718 - */ 719 - 720 - if (info->mtds != NULL) { 721 - struct s3c2410_nand_mtd *ptr = info->mtds; 722 - int mtdno; 723 - 724 - for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { 725 - pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); 726 - WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip))); 727 - nand_cleanup(&ptr->chip); 728 - } 729 - } 730 - 731 - /* free the common resources */ 732 - 733 - if (!IS_ERR(info->clk)) 734 - s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 735 - } 736 - 737 - static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 738 - struct s3c2410_nand_mtd *mtd, 739 - struct s3c2410_nand_set *set) 740 - { 741 - if (set) { 742 - struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip); 743 - 744 - mtdinfo->name = set->name; 745 - 746 - return mtd_device_register(mtdinfo, set->partitions, 747 - set->nr_partitions); 748 - } 749 - 750 - return -ENODEV; 751 - } 752 - 753 - static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline, 754 - const struct nand_interface_config *conf) 755 - { 756 - struct mtd_info *mtd = nand_to_mtd(chip); 757 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 758 - struct s3c2410_platform_nand *pdata = info->platform; 759 - const struct nand_sdr_timings *timings; 760 - int tacls; 761 - 762 - timings = nand_get_sdr_timings(conf); 763 - if (IS_ERR(timings)) 764 - return -ENOTSUPP; 765 - 766 - tacls = timings->tCLS_min - timings->tWP_min; 767 - if (tacls < 0) 768 - tacls = 0; 769 - 770 - pdata->tacls = DIV_ROUND_UP(tacls, 1000); 771 - pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000); 772 - pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000); 773 - 774 - return s3c2410_nand_setrate(info); 775 - } 776 - 777 - /** 778 - * s3c2410_nand_init_chip - initialise a single instance of an chip 779 - * @info: The base NAND controller the chip is on. 780 - * @nmtd: The new controller MTD instance to fill in. 781 - * @set: The information passed from the board specific platform data. 782 - * 783 - * Initialise the given @nmtd from the information in @info and @set. This 784 - * readies the structure for use with the MTD layer functions by ensuring 785 - * all pointers are setup and the necessary control routines selected. 786 - */ 787 - static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, 788 - struct s3c2410_nand_mtd *nmtd, 789 - struct s3c2410_nand_set *set) 790 - { 791 - struct device_node *np = info->device->of_node; 792 - struct nand_chip *chip = &nmtd->chip; 793 - void __iomem *regs = info->regs; 794 - 795 - nand_set_flash_node(chip, set->of_node); 796 - 797 - chip->legacy.write_buf = s3c2410_nand_write_buf; 798 - chip->legacy.read_buf = s3c2410_nand_read_buf; 799 - chip->legacy.select_chip = s3c2410_nand_select_chip; 800 - chip->legacy.chip_delay = 50; 801 - nand_set_controller_data(chip, nmtd); 802 - chip->options = set->options; 803 - chip->controller = &info->controller; 804 - 805 - /* 806 - * let's keep behavior unchanged for legacy boards booting via pdata and 807 - * auto-detect timings only when booting with a device tree. 808 - */ 809 - if (!np) 810 - chip->options |= NAND_KEEP_TIMINGS; 811 - 812 - switch (info->cpu_type) { 813 - case TYPE_S3C2410: 814 - chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA; 815 - info->sel_reg = regs + S3C2410_NFCONF; 816 - info->sel_bit = S3C2410_NFCONF_nFCE; 817 - chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol; 818 - chip->legacy.dev_ready = s3c2410_nand_devready; 819 - break; 820 - 821 - case TYPE_S3C2440: 822 - chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA; 823 - info->sel_reg = regs + S3C2440_NFCONT; 824 - info->sel_bit = S3C2440_NFCONT_nFCE; 825 - chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol; 826 - chip->legacy.dev_ready = s3c2440_nand_devready; 827 - chip->legacy.read_buf = s3c2440_nand_read_buf; 828 - chip->legacy.write_buf = s3c2440_nand_write_buf; 829 - break; 830 - 831 - case TYPE_S3C2412: 832 - chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA; 833 - info->sel_reg = regs + S3C2440_NFCONT; 834 - info->sel_bit = S3C2412_NFCONT_nFCE0; 835 - chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol; 836 - chip->legacy.dev_ready = s3c2412_nand_devready; 837 - 838 - if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT) 839 - dev_info(info->device, "System booted from NAND\n"); 840 - 841 - break; 842 - } 843 - 844 - chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W; 845 - 846 - nmtd->info = info; 847 - nmtd->set = set; 848 - 849 - chip->ecc.engine_type = info->platform->engine_type; 850 - 851 - /* 852 - * If you use u-boot BBT creation code, specifying this flag will 853 - * let the kernel fish out the BBT from the NAND. 854 - */ 855 - if (set->flash_bbt) 856 - chip->bbt_options |= NAND_BBT_USE_FLASH; 857 - } 858 - 859 - /** 860 - * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan 861 - * @chip: The NAND chip 862 - * 863 - * This hook is called by the core after the identification of the NAND chip, 864 - * once the relevant per-chip information is up to date.. This call ensure that 865 - * we update the internal state accordingly. 866 - * 867 - * The internal state is currently limited to the ECC state information. 868 - */ 869 - static int s3c2410_nand_attach_chip(struct nand_chip *chip) 870 - { 871 - struct mtd_info *mtd = nand_to_mtd(chip); 872 - struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 873 - 874 - switch (chip->ecc.engine_type) { 875 - 876 - case NAND_ECC_ENGINE_TYPE_NONE: 877 - dev_info(info->device, "ECC disabled\n"); 878 - break; 879 - 880 - case NAND_ECC_ENGINE_TYPE_SOFT: 881 - /* 882 - * This driver expects Hamming based ECC when engine_type is set 883 - * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to 884 - * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field 885 - * to s3c2410_platform_nand. 886 - */ 887 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 888 - dev_info(info->device, "soft ECC\n"); 889 - break; 890 - 891 - case NAND_ECC_ENGINE_TYPE_ON_HOST: 892 - chip->ecc.calculate = s3c2410_nand_calculate_ecc; 893 - chip->ecc.correct = s3c2410_nand_correct_data; 894 - chip->ecc.strength = 1; 895 - 896 - switch (info->cpu_type) { 897 - case TYPE_S3C2410: 898 - chip->ecc.hwctl = s3c2410_nand_enable_hwecc; 899 - chip->ecc.calculate = s3c2410_nand_calculate_ecc; 900 - break; 901 - 902 - case TYPE_S3C2412: 903 - chip->ecc.hwctl = s3c2412_nand_enable_hwecc; 904 - chip->ecc.calculate = s3c2412_nand_calculate_ecc; 905 - break; 906 - 907 - case TYPE_S3C2440: 908 - chip->ecc.hwctl = s3c2440_nand_enable_hwecc; 909 - chip->ecc.calculate = s3c2440_nand_calculate_ecc; 910 - break; 911 - } 912 - 913 - dev_dbg(info->device, "chip %p => page shift %d\n", 914 - chip, chip->page_shift); 915 - 916 - /* change the behaviour depending on whether we are using 917 - * the large or small page nand device */ 918 - if (chip->page_shift > 10) { 919 - chip->ecc.size = 256; 920 - chip->ecc.bytes = 3; 921 - } else { 922 - chip->ecc.size = 512; 923 - chip->ecc.bytes = 3; 924 - mtd_set_ooblayout(nand_to_mtd(chip), 925 - &s3c2410_ooblayout_ops); 926 - } 927 - 928 - dev_info(info->device, "hardware ECC\n"); 929 - break; 930 - 931 - default: 932 - dev_err(info->device, "invalid ECC mode!\n"); 933 - return -EINVAL; 934 - } 935 - 936 - if (chip->bbt_options & NAND_BBT_USE_FLASH) 937 - chip->options |= NAND_SKIP_BBTSCAN; 938 - 939 - return 0; 940 - } 941 - 942 - static const struct nand_controller_ops s3c24xx_nand_controller_ops = { 943 - .attach_chip = s3c2410_nand_attach_chip, 944 - .setup_interface = s3c2410_nand_setup_interface, 945 - }; 946 - 947 - static const struct of_device_id s3c24xx_nand_dt_ids[] = { 948 - { 949 - .compatible = "samsung,s3c2410-nand", 950 - .data = &s3c2410_nand_devtype_data, 951 - }, { 952 - /* also compatible with s3c6400 */ 953 - .compatible = "samsung,s3c2412-nand", 954 - .data = &s3c2412_nand_devtype_data, 955 - }, { 956 - .compatible = "samsung,s3c2440-nand", 957 - .data = &s3c2440_nand_devtype_data, 958 - }, 959 - { /* sentinel */ } 960 - }; 961 - MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids); 962 - 963 - static int s3c24xx_nand_probe_dt(struct platform_device *pdev) 964 - { 965 - const struct s3c24XX_nand_devtype_data *devtype_data; 966 - struct s3c2410_platform_nand *pdata; 967 - struct s3c2410_nand_info *info = platform_get_drvdata(pdev); 968 - struct device_node *np = pdev->dev.of_node, *child; 969 - struct s3c2410_nand_set *sets; 970 - 971 - devtype_data = of_device_get_match_data(&pdev->dev); 972 - if (!devtype_data) 973 - return -ENODEV; 974 - 975 - info->cpu_type = devtype_data->type; 976 - 977 - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 978 - if (!pdata) 979 - return -ENOMEM; 980 - 981 - pdev->dev.platform_data = pdata; 982 - 983 - pdata->nr_sets = of_get_child_count(np); 984 - if (!pdata->nr_sets) 985 - return 0; 986 - 987 - sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets), 988 - GFP_KERNEL); 989 - if (!sets) 990 - return -ENOMEM; 991 - 992 - pdata->sets = sets; 993 - 994 - for_each_available_child_of_node(np, child) { 995 - sets->name = (char *)child->name; 996 - sets->of_node = child; 997 - sets->nr_chips = 1; 998 - 999 - of_node_get(child); 1000 - 1001 - sets++; 1002 - } 1003 - 1004 - return 0; 1005 - } 1006 - 1007 - static int s3c24xx_nand_probe_pdata(struct platform_device *pdev) 1008 - { 1009 - struct s3c2410_nand_info *info = platform_get_drvdata(pdev); 1010 - 1011 - info->cpu_type = platform_get_device_id(pdev)->driver_data; 1012 - 1013 - return 0; 1014 - } 1015 - 1016 - /* s3c24xx_nand_probe 1017 - * 1018 - * called by device layer when it finds a device matching 1019 - * one our driver can handled. This code checks to see if 1020 - * it can allocate all necessary resources then calls the 1021 - * nand layer to look for devices 1022 - */ 1023 - static int s3c24xx_nand_probe(struct platform_device *pdev) 1024 - { 1025 - struct s3c2410_platform_nand *plat; 1026 - struct s3c2410_nand_info *info; 1027 - struct s3c2410_nand_mtd *nmtd; 1028 - struct s3c2410_nand_set *sets; 1029 - struct resource *res; 1030 - int err = 0; 1031 - int size; 1032 - int nr_sets; 1033 - int setno; 1034 - 1035 - info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1036 - if (info == NULL) { 1037 - err = -ENOMEM; 1038 - goto exit_error; 1039 - } 1040 - 1041 - platform_set_drvdata(pdev, info); 1042 - 1043 - nand_controller_init(&info->controller); 1044 - info->controller.ops = &s3c24xx_nand_controller_ops; 1045 - 1046 - /* get the clock source and enable it */ 1047 - 1048 - info->clk = devm_clk_get(&pdev->dev, "nand"); 1049 - if (IS_ERR(info->clk)) { 1050 - dev_err(&pdev->dev, "failed to get clock\n"); 1051 - err = -ENOENT; 1052 - goto exit_error; 1053 - } 1054 - 1055 - s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); 1056 - 1057 - if (pdev->dev.of_node) 1058 - err = s3c24xx_nand_probe_dt(pdev); 1059 - else 1060 - err = s3c24xx_nand_probe_pdata(pdev); 1061 - 1062 - if (err) 1063 - goto exit_error; 1064 - 1065 - plat = to_nand_plat(pdev); 1066 - 1067 - /* allocate and map the resource */ 1068 - 1069 - /* currently we assume we have the one resource */ 1070 - res = pdev->resource; 1071 - size = resource_size(res); 1072 - 1073 - info->device = &pdev->dev; 1074 - info->platform = plat; 1075 - 1076 - info->regs = devm_ioremap_resource(&pdev->dev, res); 1077 - if (IS_ERR(info->regs)) { 1078 - err = PTR_ERR(info->regs); 1079 - goto exit_error; 1080 - } 1081 - 1082 - dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs); 1083 - 1084 - if (!plat->sets || plat->nr_sets < 1) { 1085 - err = -EINVAL; 1086 - goto exit_error; 1087 - } 1088 - 1089 - sets = plat->sets; 1090 - nr_sets = plat->nr_sets; 1091 - 1092 - info->mtd_count = nr_sets; 1093 - 1094 - /* allocate our information */ 1095 - 1096 - size = nr_sets * sizeof(*info->mtds); 1097 - info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 1098 - if (info->mtds == NULL) { 1099 - err = -ENOMEM; 1100 - goto exit_error; 1101 - } 1102 - 1103 - /* initialise all possible chips */ 1104 - 1105 - nmtd = info->mtds; 1106 - 1107 - for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) { 1108 - struct mtd_info *mtd = nand_to_mtd(&nmtd->chip); 1109 - 1110 - pr_debug("initialising set %d (%p, info %p)\n", 1111 - setno, nmtd, info); 1112 - 1113 - mtd->dev.parent = &pdev->dev; 1114 - s3c2410_nand_init_chip(info, nmtd, sets); 1115 - 1116 - err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1); 1117 - if (err) 1118 - goto exit_error; 1119 - 1120 - s3c2410_nand_add_partition(info, nmtd, sets); 1121 - } 1122 - 1123 - /* initialise the hardware */ 1124 - err = s3c2410_nand_inithw(info); 1125 - if (err != 0) 1126 - goto exit_error; 1127 - 1128 - if (allow_clk_suspend(info)) { 1129 - dev_info(&pdev->dev, "clock idle support enabled\n"); 1130 - s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); 1131 - } 1132 - 1133 - return 0; 1134 - 1135 - exit_error: 1136 - s3c24xx_nand_remove(pdev); 1137 - 1138 - if (err == 0) 1139 - err = -EINVAL; 1140 - return err; 1141 - } 1142 - 1143 - /* PM Support */ 1144 - #ifdef CONFIG_PM 1145 - 1146 - static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm) 1147 - { 1148 - struct s3c2410_nand_info *info = platform_get_drvdata(dev); 1149 - 1150 - if (info) { 1151 - info->save_sel = readl(info->sel_reg); 1152 - 1153 - /* For the moment, we must ensure nFCE is high during 1154 - * the time we are suspended. This really should be 1155 - * handled by suspending the MTDs we are using, but 1156 - * that is currently not the case. */ 1157 - 1158 - writel(info->save_sel | info->sel_bit, info->sel_reg); 1159 - 1160 - s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 1161 - } 1162 - 1163 - return 0; 1164 - } 1165 - 1166 - static int s3c24xx_nand_resume(struct platform_device *dev) 1167 - { 1168 - struct s3c2410_nand_info *info = platform_get_drvdata(dev); 1169 - unsigned long sel; 1170 - 1171 - if (info) { 1172 - s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); 1173 - s3c2410_nand_inithw(info); 1174 - 1175 - /* Restore the state of the nFCE line. */ 1176 - 1177 - sel = readl(info->sel_reg); 1178 - sel &= ~info->sel_bit; 1179 - sel |= info->save_sel & info->sel_bit; 1180 - writel(sel, info->sel_reg); 1181 - 1182 - s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); 1183 - } 1184 - 1185 - return 0; 1186 - } 1187 - 1188 - #else 1189 - #define s3c24xx_nand_suspend NULL 1190 - #define s3c24xx_nand_resume NULL 1191 - #endif 1192 - 1193 - /* driver device registration */ 1194 - 1195 - static const struct platform_device_id s3c24xx_driver_ids[] = { 1196 - { 1197 - .name = "s3c2410-nand", 1198 - .driver_data = TYPE_S3C2410, 1199 - }, { 1200 - .name = "s3c2440-nand", 1201 - .driver_data = TYPE_S3C2440, 1202 - }, { 1203 - .name = "s3c2412-nand", 1204 - .driver_data = TYPE_S3C2412, 1205 - }, { 1206 - .name = "s3c6400-nand", 1207 - .driver_data = TYPE_S3C2412, /* compatible with 2412 */ 1208 - }, 1209 - { } 1210 - }; 1211 - 1212 - MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); 1213 - 1214 - static struct platform_driver s3c24xx_nand_driver = { 1215 - .probe = s3c24xx_nand_probe, 1216 - .remove = s3c24xx_nand_remove, 1217 - .suspend = s3c24xx_nand_suspend, 1218 - .resume = s3c24xx_nand_resume, 1219 - .id_table = s3c24xx_driver_ids, 1220 - .driver = { 1221 - .name = "s3c24xx-nand", 1222 - .of_match_table = s3c24xx_nand_dt_ids, 1223 - }, 1224 - }; 1225 - 1226 - module_platform_driver(s3c24xx_nand_driver); 1227 - 1228 - MODULE_LICENSE("GPL"); 1229 - MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 1230 - MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
+1 -1
drivers/mtd/nand/spi/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 spinand-objs := core.o otp.o 3 - spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o 3 + spinand-objs += alliancememory.o ato.o esmt.o fmsh.o foresee.o gigadevice.o macronix.o 4 4 spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o 5 5 obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
+60 -15
drivers/mtd/nand/spi/core.c
··· 430 430 * Dirmap accesses are allowed to toggle the CS. 431 431 * Toggling the CS during a continuous read is forbidden. 432 432 */ 433 - if (nbytes && req->continuous) 434 - return -EIO; 433 + if (nbytes && req->continuous) { 434 + /* 435 + * Spi controller with broken support of continuous 436 + * reading was detected. Disable future use of 437 + * continuous reading and return -EAGAIN to retry 438 + * reading within regular mode. 439 + */ 440 + spinand->cont_read_possible = false; 441 + return -EAGAIN; 442 + } 435 443 } 436 444 437 445 if (req->datalen) ··· 907 899 908 900 old_stats = mtd->ecc_stats; 909 901 910 - if (spinand_use_cont_read(mtd, from, ops)) 902 + if (spinand_use_cont_read(mtd, from, ops)) { 911 903 ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips); 912 - else 904 + if (ret == -EAGAIN && !spinand->cont_read_possible) { 905 + /* 906 + * Spi controller with broken support of continuous 907 + * reading was detected (see spinand_read_from_cache_op()), 908 + * repeat reading in regular mode. 909 + */ 910 + ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); 911 + } 912 + } else { 913 913 ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); 914 + } 914 915 915 916 if (ops->stats) { 916 917 ops->stats->uncorrectable_errors += ··· 1110 1093 return ret; 1111 1094 } 1112 1095 1096 + static struct spi_mem_dirmap_desc *spinand_create_rdesc( 1097 + struct spinand_device *spinand, 1098 + struct spi_mem_dirmap_info *info) 1099 + { 1100 + struct nand_device *nand = spinand_to_nand(spinand); 1101 + struct spi_mem_dirmap_desc *desc = NULL; 1102 + 1103 + if (spinand->cont_read_possible) { 1104 + /* 1105 + * spi controller may return an error if info->length is 1106 + * too large 1107 + */ 1108 + info->length = nanddev_eraseblock_size(nand); 1109 + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1110 + spinand->spimem, info); 1111 + } 1112 + 1113 + if (IS_ERR_OR_NULL(desc)) { 1114 + /* 1115 + * continuous reading is not supported by flash or 1116 + * its spi controller, use regular reading 1117 + */ 1118 + spinand->cont_read_possible = false; 1119 + 1120 + info->length = nanddev_page_size(nand) + 1121 + nanddev_per_page_oobsize(nand); 1122 + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1123 + spinand->spimem, info); 1124 + } 1125 + 1126 + return desc; 1127 + } 1128 + 1113 1129 static int spinand_create_dirmap(struct spinand_device *spinand, 1114 1130 unsigned int plane) 1115 1131 { 1116 1132 struct nand_device *nand = spinand_to_nand(spinand); 1117 - struct spi_mem_dirmap_info info = { 1118 - .length = nanddev_page_size(nand) + 1119 - nanddev_per_page_oobsize(nand), 1120 - }; 1133 + struct spi_mem_dirmap_info info = { 0 }; 1121 1134 struct spi_mem_dirmap_desc *desc; 1122 - 1123 - if (spinand->cont_read_possible) 1124 - info.length = nanddev_eraseblock_size(nand); 1125 1135 1126 1136 /* The plane number is passed in MSB just above the column address */ 1127 1137 info.offset = plane << fls(nand->memorg.pagesize); 1128 1138 1139 + info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 1129 1140 info.op_tmpl = *spinand->op_templates.update_cache; 1130 1141 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1131 1142 spinand->spimem, &info); ··· 1163 1118 spinand->dirmaps[plane].wdesc = desc; 1164 1119 1165 1120 info.op_tmpl = *spinand->op_templates.read_cache; 1166 - desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1167 - spinand->spimem, &info); 1121 + desc = spinand_create_rdesc(spinand, &info); 1168 1122 if (IS_ERR(desc)) 1169 1123 return PTR_ERR(desc); 1170 1124 ··· 1176 1132 return 0; 1177 1133 } 1178 1134 1135 + info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 1179 1136 info.op_tmpl = *spinand->op_templates.update_cache; 1180 1137 info.op_tmpl.data.ecc = true; 1181 1138 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, ··· 1188 1143 1189 1144 info.op_tmpl = *spinand->op_templates.read_cache; 1190 1145 info.op_tmpl.data.ecc = true; 1191 - desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1192 - spinand->spimem, &info); 1146 + desc = spinand_create_rdesc(spinand, &info); 1193 1147 if (IS_ERR(desc)) 1194 1148 return PTR_ERR(desc); 1195 1149 ··· 1228 1184 &alliancememory_spinand_manufacturer, 1229 1185 &ato_spinand_manufacturer, 1230 1186 &esmt_c8_spinand_manufacturer, 1187 + &fmsh_spinand_manufacturer, 1231 1188 &foresee_spinand_manufacturer, 1232 1189 &gigadevice_spinand_manufacturer, 1233 1190 &macronix_spinand_manufacturer,
+74
drivers/mtd/nand/spi/fmsh.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd. 4 + * 5 + * Author: Dingqiang Lin <jon.lin@rock-chips.com> 6 + */ 7 + 8 + #include <linux/device.h> 9 + #include <linux/kernel.h> 10 + #include <linux/mtd/spinand.h> 11 + 12 + #define SPINAND_MFR_FMSH 0xA1 13 + 14 + static SPINAND_OP_VARIANTS(read_cache_variants, 15 + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0), 16 + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0), 17 + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0), 18 + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0), 19 + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0), 20 + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0)); 21 + 22 + static SPINAND_OP_VARIANTS(write_cache_variants, 23 + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), 24 + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); 25 + 26 + static SPINAND_OP_VARIANTS(update_cache_variants, 27 + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), 28 + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); 29 + 30 + static int fm25s01a_ooblayout_ecc(struct mtd_info *mtd, int section, 31 + struct mtd_oob_region *region) 32 + { 33 + return -ERANGE; 34 + } 35 + 36 + static int fm25s01a_ooblayout_free(struct mtd_info *mtd, int section, 37 + struct mtd_oob_region *region) 38 + { 39 + if (section) 40 + return -ERANGE; 41 + 42 + region->offset = 2; 43 + region->length = 62; 44 + 45 + return 0; 46 + } 47 + 48 + static const struct mtd_ooblayout_ops fm25s01a_ooblayout = { 49 + .ecc = fm25s01a_ooblayout_ecc, 50 + .free = fm25s01a_ooblayout_free, 51 + }; 52 + 53 + static const struct spinand_info fmsh_spinand_table[] = { 54 + SPINAND_INFO("FM25S01A", 55 + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4), 56 + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 57 + NAND_ECCREQ(1, 512), 58 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 59 + &write_cache_variants, 60 + &update_cache_variants), 61 + SPINAND_HAS_QE_BIT, 62 + SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)), 63 + }; 64 + 65 + static const struct spinand_manufacturer_ops fmsh_spinand_manuf_ops = { 66 + }; 67 + 68 + const struct spinand_manufacturer fmsh_spinand_manufacturer = { 69 + .id = SPINAND_MFR_FMSH, 70 + .name = "Fudan Micro", 71 + .chips = fmsh_spinand_table, 72 + .nchips = ARRAY_SIZE(fmsh_spinand_table), 73 + .ops = &fmsh_spinand_manuf_ops, 74 + };
+105 -2
drivers/mtd/nand/spi/gigadevice.c
··· 4 4 * Chuanhong Guo <gch981213@gmail.com> 5 5 */ 6 6 7 + #include <linux/bitfield.h> 7 8 #include <linux/device.h> 8 9 #include <linux/kernel.h> 9 10 #include <linux/mtd/spinand.h> ··· 23 22 #define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4) 24 23 #define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4) 25 24 #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) 25 + 26 + /* Feature bit definitions */ 27 + #define GD_FEATURE_NR BIT(3) /* Normal Read(1=normal,0=continuous) */ 28 + #define GD_FEATURE_CRDC BIT(2) /* Continuous Read Dummy */ 29 + 30 + /* ECC status extraction helpers */ 31 + #define GD_ECCSR_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr) 32 + #define GD_ECCSR_ACCUMULATED(eccsr) FIELD_GET(GENMASK(7, 4), eccsr) 33 + 34 + struct gigadevice_priv { 35 + bool continuous_read; 36 + }; 26 37 27 38 static SPINAND_OP_VARIANTS(read_cache_variants, 28 39 SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0), ··· 75 62 static SPINAND_OP_VARIANTS(update_cache_variants, 76 63 SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), 77 64 SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); 65 + 66 + static int gd5fxgm9_get_eccsr(struct spinand_device *spinand, u8 *eccsr) 67 + { 68 + struct gigadevice_priv *priv = spinand->priv; 69 + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1), 70 + SPI_MEM_OP_NO_ADDR, 71 + SPI_MEM_OP_DUMMY(1, 1), 72 + SPI_MEM_OP_DATA_IN(1, eccsr, 1)); 73 + int ret; 74 + 75 + ret = spi_mem_exec_op(spinand->spimem, &op); 76 + if (ret) 77 + return ret; 78 + 79 + if (priv->continuous_read) 80 + *eccsr = GD_ECCSR_ACCUMULATED(*eccsr); 81 + else 82 + *eccsr = GD_ECCSR_LAST_PAGE(*eccsr); 83 + 84 + return 0; 85 + } 86 + 87 + static int gd5fxgm9_ecc_get_status(struct spinand_device *spinand, u8 status) 88 + { 89 + struct nand_device *nand = spinand_to_nand(spinand); 90 + u8 eccsr; 91 + int ret; 92 + 93 + switch (status & STATUS_ECC_MASK) { 94 + case STATUS_ECC_NO_BITFLIPS: 95 + return 0; 96 + 97 + case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: 98 + ret = gd5fxgm9_get_eccsr(spinand, spinand->scratchbuf); 99 + if (ret) 100 + return nanddev_get_ecc_conf(nand)->strength; 101 + 102 + eccsr = *spinand->scratchbuf; 103 + if (WARN_ON(!eccsr || eccsr > nanddev_get_ecc_conf(nand)->strength)) 104 + return nanddev_get_ecc_conf(nand)->strength; 105 + 106 + return eccsr; 107 + 108 + case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: 109 + return 8; 110 + 111 + case STATUS_ECC_UNCOR_ERROR: 112 + return -EBADMSG; 113 + 114 + default: 115 + return -EINVAL; 116 + } 117 + } 118 + 119 + static int gd5fxgm9_set_continuous_read(struct spinand_device *spinand, bool enable) 120 + { 121 + struct gigadevice_priv *priv = spinand->priv; 122 + int ret; 123 + 124 + ret = spinand_upd_cfg(spinand, GD_FEATURE_NR, 125 + enable ? 0 : GD_FEATURE_NR); 126 + if (ret) 127 + return ret; 128 + 129 + priv->continuous_read = enable; 130 + 131 + return 0; 132 + } 78 133 79 134 static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section, 80 135 struct mtd_oob_region *region) ··· 623 542 &update_cache_variants), 624 543 SPINAND_HAS_QE_BIT, 625 544 SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, 626 - gd5fxgq4uexxg_ecc_get_status)), 545 + gd5fxgm9_ecc_get_status), 546 + SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)), 627 547 SPINAND_INFO("GD5F1GM9RExxG", 628 548 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01), 629 549 NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), ··· 634 552 &update_cache_variants), 635 553 SPINAND_HAS_QE_BIT, 636 554 SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, 637 - gd5fxgq4uexxg_ecc_get_status)), 555 + gd5fxgm9_ecc_get_status), 556 + SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)), 638 557 }; 639 558 559 + static int gd5fxgm9_spinand_init(struct spinand_device *spinand) 560 + { 561 + struct gigadevice_priv *priv; 562 + 563 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 564 + if (!priv) 565 + return -ENOMEM; 566 + 567 + spinand->priv = priv; 568 + 569 + return 0; 570 + } 571 + 572 + static void gd5fxgm9_spinand_cleanup(struct spinand_device *spinand) 573 + { 574 + kfree(spinand->priv); 575 + } 576 + 640 577 static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { 578 + .init = gd5fxgm9_spinand_init, 579 + .cleanup = gd5fxgm9_spinand_cleanup, 641 580 }; 642 581 643 582 const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
-14
include/linux/mtd/nand-qpic-common.h
··· 71 71 72 72 /* NAND_DEVn_CFG0 bits */ 73 73 #define DISABLE_STATUS_AFTER_WRITE BIT(4) 74 - #define CW_PER_PAGE 6 75 74 #define CW_PER_PAGE_MASK GENMASK(8, 6) 76 - #define UD_SIZE_BYTES 9 77 75 #define UD_SIZE_BYTES_MASK GENMASK(18, 9) 78 76 #define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19) 79 - #define SPARE_SIZE_BYTES 23 80 77 #define SPARE_SIZE_BYTES_MASK GENMASK(26, 23) 81 - #define NUM_ADDR_CYCLES 27 82 78 #define NUM_ADDR_CYCLES_MASK GENMASK(29, 27) 83 79 #define STATUS_BFR_READ BIT(30) 84 80 #define SET_RD_MODE_AFTER_STATUS BIT(31) ··· 82 86 /* NAND_DEVn_CFG0 bits */ 83 87 #define DEV0_CFG1_ECC_DISABLE BIT(0) 84 88 #define WIDE_FLASH BIT(1) 85 - #define NAND_RECOVERY_CYCLES 2 86 89 #define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2) 87 90 #define CS_ACTIVE_BSY BIT(5) 88 - #define BAD_BLOCK_BYTE_NUM 6 89 91 #define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6) 90 92 #define BAD_BLOCK_IN_SPARE_AREA BIT(16) 91 - #define WR_RD_BSY_GAP 17 92 93 #define WR_RD_BSY_GAP_MASK GENMASK(22, 17) 93 94 #define ENABLE_BCH_ECC BIT(27) 94 95 95 96 /* NAND_DEV0_ECC_CFG bits */ 96 97 #define ECC_CFG_ECC_DISABLE BIT(0) 97 98 #define ECC_SW_RESET BIT(1) 98 - #define ECC_MODE 4 99 99 #define ECC_MODE_MASK GENMASK(5, 4) 100 100 #define ECC_MODE_4BIT 0 101 101 #define ECC_MODE_8BIT 1 102 - #define ECC_PARITY_SIZE_BYTES_BCH 8 103 102 #define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8) 104 - #define ECC_NUM_DATA_BYTES 16 105 103 #define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16) 106 104 #define ECC_FORCE_CLK_OPEN BIT(30) 107 105 ··· 110 120 #define SEQ_READ_START_VLD BIT(4) 111 121 112 122 /* NAND_EBI2_ECC_BUF_CFG bits */ 113 - #define NUM_STEPS 0 114 123 #define NUM_STEPS_MASK GENMASK(9, 0) 115 124 116 125 /* NAND_ERASED_CW_DETECT_CFG bits */ ··· 130 141 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) 131 142 132 143 /* NAND_READ_LOCATION_n bits */ 133 - #define READ_LOCATION_OFFSET 0 134 144 #define READ_LOCATION_OFFSET_MASK GENMASK(9, 0) 135 - #define READ_LOCATION_SIZE 16 136 145 #define READ_LOCATION_SIZE_MASK GENMASK(25, 16) 137 - #define READ_LOCATION_LAST 31 138 146 #define READ_LOCATION_LAST_MASK BIT(31) 139 147 140 148 /* Version Mask */
+5
include/linux/mtd/nand.h
··· 1136 1136 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); 1137 1137 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len); 1138 1138 1139 + int nand_check_erased_ecc_chunk(void *data, int datalen, 1140 + void *ecc, int ecclen, 1141 + void *extraoob, int extraooblen, 1142 + int threshold); 1143 + 1139 1144 #endif /* __LINUX_MTD_NAND_H */
-5
include/linux/mtd/rawnand.h
··· 1519 1519 unsigned char *read_ecc, unsigned char *calc_ecc); 1520 1520 void rawnand_sw_bch_cleanup(struct nand_chip *chip); 1521 1521 1522 - int nand_check_erased_ecc_chunk(void *data, int datalen, 1523 - void *ecc, int ecclen, 1524 - void *extraoob, int extraooblen, 1525 - int threshold); 1526 - 1527 1522 int nand_ecc_choose_conf(struct nand_chip *chip, 1528 1523 const struct nand_ecc_caps *caps, int oobavail); 1529 1524
+1
include/linux/mtd/spinand.h
··· 355 355 extern const struct spinand_manufacturer alliancememory_spinand_manufacturer; 356 356 extern const struct spinand_manufacturer ato_spinand_manufacturer; 357 357 extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer; 358 + extern const struct spinand_manufacturer fmsh_spinand_manufacturer; 358 359 extern const struct spinand_manufacturer foresee_spinand_manufacturer; 359 360 extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; 360 361 extern const struct spinand_manufacturer macronix_spinand_manufacturer;
-70
include/linux/platform_data/mtd-nand-s3c2410.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Copyright (c) 2004 Simtec Electronics 4 - * Ben Dooks <ben@simtec.co.uk> 5 - * 6 - * S3C2410 - NAND device controller platform_device info 7 - */ 8 - 9 - #ifndef __MTD_NAND_S3C2410_H 10 - #define __MTD_NAND_S3C2410_H 11 - 12 - #include <linux/mtd/rawnand.h> 13 - 14 - /** 15 - * struct s3c2410_nand_set - define a set of one or more nand chips 16 - * @flash_bbt: Openmoko u-boot can create a Bad Block Table 17 - * Setting this flag will allow the kernel to 18 - * look for it at boot time and also skip the NAND 19 - * scan. 20 - * @options: Default value to set into 'struct nand_chip' options. 21 - * @nr_chips: Number of chips in this set 22 - * @nr_partitions: Number of partitions pointed to by @partitions 23 - * @name: Name of set (optional) 24 - * @nr_map: Map for low-layer logical to physical chip numbers (option) 25 - * @partitions: The mtd partition list 26 - * 27 - * define a set of one or more nand chips registered with an unique mtd. Also 28 - * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger 29 - * a warning at boot time. 30 - */ 31 - struct s3c2410_nand_set { 32 - unsigned int flash_bbt:1; 33 - 34 - unsigned int options; 35 - int nr_chips; 36 - int nr_partitions; 37 - char *name; 38 - int *nr_map; 39 - struct mtd_partition *partitions; 40 - struct device_node *of_node; 41 - }; 42 - 43 - struct s3c2410_platform_nand { 44 - /* timing information for controller, all times in nanoseconds */ 45 - 46 - int tacls; /* time for active CLE/ALE to nWE/nOE */ 47 - int twrph0; /* active time for nWE/nOE */ 48 - int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */ 49 - 50 - unsigned int ignore_unset_ecc:1; 51 - 52 - enum nand_ecc_engine_type engine_type; 53 - 54 - int nr_sets; 55 - struct s3c2410_nand_set *sets; 56 - 57 - void (*select_chip)(struct s3c2410_nand_set *, 58 - int chip); 59 - }; 60 - 61 - /** 62 - * s3c_nand_set_platdata() - register NAND platform data. 63 - * @nand: The NAND platform data to register with s3c_device_nand. 64 - * 65 - * This function copies the given NAND platform data, @nand and registers 66 - * it with the s3c_device_nand. This allows @nand to be __initdata. 67 - */ 68 - extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand); 69 - 70 - #endif /*__MTD_NAND_S3C2410_H */