Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-20160523' of git://git.infradead.org/linux-mtd

Pull MTD updates from Brian Norris:
"First cycle with Boris as NAND maintainer! Many (most) bullets stolen
from him.

Generic:
- Migrated NAND LED trigger to be a generic MTD trigger

NAND:
- Introduction of the "ECC algorithm" concept, to avoid overloading
the ECC mode field too much more
- Replaced the nand_ecclayout infrastructure with something a little
more flexible (finally!) and future proof
- Rework of the OMAP GPMC and NAND drivers; the TI folks pulled some
of this into their own tree as well
- Prepare the sunxi NAND driver to receive DMA support
- Handle bitflips in erased pages on GPMI revisions that do not
support this in hardware.

SPI NOR:
- Start using the spi_flash_read() API for SPI drivers that support
it (i.e., SPI drivers with special memory-mapped flash modes)

And other small scattered improvments"

* tag 'for-linus-20160523' of git://git.infradead.org/linux-mtd: (155 commits)
mtd: spi-nor: support GigaDevice gd25lq64c
mtd: nand_bch: fix spelling of "probably"
mtd: brcmnand: respect ECC algorithm set by NAND subsystem
gpmi-nand: Handle ECC Errors in erased pages
Documentation: devicetree: deprecate "soft_bch" nand-ecc-mode value
mtd: nand: add support for "nand-ecc-algo" DT property
mtd: mtd: drop NAND_ECC_SOFT_BCH enum value
mtd: drop support for NAND_ECC_SOFT_BCH as "soft_bch" mapping
mtd: nand: read ECC algorithm from the new field
mtd: nand: fsmc: validate ECC setup by checking algorithm directly
mtd: nand: set ECC algorithm to Hamming on fallback
staging: mt29f_spinand: set ECC algorithm explicitly
CRIS v32: nand: set ECC algorithm explicitly
mtd: nand: atmel: set ECC algorithm explicitly
mtd: nand: davinci: set ECC algorithm explicitly
mtd: nand: bf5xx: set ECC algorithm explicitly
mtd: nand: omap2: Fix high memory dma prefetch transfer
mtd: nand: omap2: Start dma request before enabling prefetch
mtd: nandsim: add __init attribute
mtd: nand: move of_get_nand_xxx() helpers into nand_base.c
...

+4352 -2922
+17
Documentation/devicetree/bindings/bus/ti-gpmc.txt Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
··· 32 32 bootloader) are used for the physical address decoding. 33 33 As this will change in the future, filling correct 34 34 values here is a requirement. 35 + - interrupt-controller: The GPMC driver implements and interrupt controller for 36 + the NAND events "fifoevent" and "termcount" plus the 37 + rising/falling edges on the GPMC_WAIT pins. 38 + The interrupt number mapping is as follows 39 + 0 - NAND_fifoevent 40 + 1 - NAND_termcount 41 + 2 - GPMC_WAIT0 pin edge 42 + 3 - GPMC_WAIT1 pin edge, and so on. 43 + - interrupt-cells: Must be set to 2 44 + - gpio-controller: The GPMC driver implements a GPIO controller for the 45 + GPMC WAIT pins that can be used as general purpose inputs. 46 + 0 maps to GPMC_WAIT0 pin. 47 + - gpio-cells: Must be set to 2 35 48 36 49 Timing properties for child nodes. All are optional and default to 0. 37 50 ··· 143 130 #address-cells = <2>; 144 131 #size-cells = <1>; 145 132 ranges = <0 0 0x08000000 0x10000000>; /* CS0 @addr 0x8000000, size 0x10000000 */ 133 + interrupt-controller; 134 + #interrupt-cells = <2>; 135 + gpio-controller; 136 + #gpio-cells = <2>; 146 137 147 138 /* child nodes go here */ 148 139 };
+1
Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
··· 24 24 brcm,brcmnand-v5.0 25 25 brcm,brcmnand-v6.0 26 26 brcm,brcmnand-v6.1 27 + brcm,brcmnand-v6.2 27 28 brcm,brcmnand-v7.0 28 29 brcm,brcmnand-v7.1 29 30 brcm,brcmnand
+15 -4
Documentation/devicetree/bindings/mtd/gpmc-nand.txt
··· 13 13 14 14 Required properties: 15 15 16 - - reg: The CS line the peripheral is connected to 16 + - compatible: "ti,omap2-nand" 17 + - reg: range id (CS number), base offset and length of the 18 + NAND I/O space 19 + - interrupt-parent: must point to gpmc node 20 + - interrupts: Two interrupt specifiers, one for fifoevent, one for termcount. 17 21 18 22 Optional properties: 19 23 ··· 48 44 locating ECC errors for BCHx algorithms. SoC devices which have 49 45 ELM hardware engines should specify this device node in .dtsi 50 46 Using ELM for ECC error correction frees some CPU cycles. 47 + - rb-gpios: GPIO specifier for the ready/busy# pin. 51 48 52 49 For inline partition table parsing (optional): 53 50 ··· 60 55 gpmc: gpmc@50000000 { 61 56 compatible = "ti,am3352-gpmc"; 62 57 ti,hwmods = "gpmc"; 63 - reg = <0x50000000 0x1000000>; 58 + reg = <0x50000000 0x36c>; 64 59 interrupts = <100>; 65 60 gpmc,num-cs = <8>; 66 61 gpmc,num-waitpins = <2>; 67 62 #address-cells = <2>; 68 63 #size-cells = <1>; 69 - ranges = <0 0 0x08000000 0x2000>; /* CS0: NAND */ 64 + ranges = <0 0 0x08000000 0x1000000>; /* CS0 space, 16MB */ 70 65 elm_id = <&elm>; 66 + interrupt-controller; 67 + #interrupt-cells = <2>; 71 68 72 69 nand@0,0 { 73 - reg = <0 0 0>; /* CS0, offset 0 */ 70 + compatible = "ti,omap2-nand"; 71 + reg = <0 0 4>; /* CS0, offset 0, NAND I/O window 4 */ 72 + interrupt-parent = <&gpmc>; 73 + interrupts = <0 IRQ_TYPE_NONE>, <1 IRQ_TYPE NONE>; 74 74 nand-bus-width = <16>; 75 75 ti,nand-ecc-opt = "bch8"; 76 76 ti,nand-xfer-type = "polled"; 77 + rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ 77 78 78 79 gpmc,sync-clk-ps = <0>; 79 80 gpmc,cs-on-ns = <0>;
+42 -3
Documentation/devicetree/bindings/mtd/nand.txt
··· 1 - * MTD generic binding 1 + * NAND chip and NAND controller generic binding 2 + 3 + NAND controller/NAND chip representation: 4 + 5 + The NAND controller should be represented with its own DT node, and all 6 + NAND chips attached to this controller should be defined as children nodes 7 + of the NAND controller. This representation should be enforced even for 8 + simple controllers supporting only one chip. 9 + 10 + Mandatory NAND controller properties: 11 + - #address-cells: depends on your controller. Should at least be 1 to 12 + encode the CS line id. 13 + - #size-cells: depends on your controller. Put zero unless you need a 14 + mapping between CS lines and dedicated memory regions 15 + 16 + Optional NAND controller properties 17 + - ranges: only needed if you need to define a mapping between CS lines and 18 + memory regions 19 + 20 + Optional NAND chip properties: 2 21 3 22 - nand-ecc-mode : String, operation mode of the NAND ecc mode. 4 - Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first", 5 - "soft_bch". 23 + Supported values are: "none", "soft", "hw", "hw_syndrome", 24 + "hw_oob_first". 25 + Deprecated values: 26 + "soft_bch": use "soft" and nand-ecc-algo instead 27 + - nand-ecc-algo: string, algorithm of NAND ECC. 28 + Supported values are: "hamming", "bch". 6 29 - nand-bus-width : 8 or 16 bus width if not present 8 7 30 - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false 8 31 ··· 42 19 The interpretation of these parameters is implementation-defined, so not all 43 20 implementations must support all possible combinations. However, implementations 44 21 are encouraged to further specify the value(s) they support. 22 + 23 + Example: 24 + 25 + nand-controller { 26 + #address-cells = <1>; 27 + #size-cells = <0>; 28 + 29 + /* controller specific properties */ 30 + 31 + nand@0 { 32 + reg = <0>; 33 + nand-ecc-mode = "soft_bch"; 34 + 35 + /* controller specific properties */ 36 + }; 37 + };
+1 -6
arch/arm/mach-omap2/gpmc-nand.c
··· 97 97 gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT); 98 98 99 99 memset(&s, 0, sizeof(struct gpmc_settings)); 100 - if (gpmc_nand_data->of_node) 101 - gpmc_read_settings_dt(gpmc_nand_data->of_node, &s); 102 - else 103 - gpmc_set_legacy(gpmc_nand_data, &s); 100 + gpmc_set_legacy(gpmc_nand_data, &s); 104 101 105 102 s.device_nand = true; 106 103 ··· 117 120 err = gpmc_configure(GPMC_CONFIG_WP, 0); 118 121 if (err < 0) 119 122 goto out_free_cs; 120 - 121 - gpmc_update_nand_reg(&gpmc_nand_data->reg, gpmc_nand_data->cs); 122 123 123 124 if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) { 124 125 pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
+45 -10
arch/arm/mach-pxa/spitz.c
··· 763 763 .pattern = scan_ff_pattern 764 764 }; 765 765 766 - static struct nand_ecclayout akita_oobinfo = { 767 - .oobfree = { {0x08, 0x09} }, 768 - .eccbytes = 24, 769 - .eccpos = { 770 - 0x05, 0x01, 0x02, 0x03, 0x06, 0x07, 0x15, 0x11, 771 - 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23, 772 - 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37, 773 - }, 766 + static int akita_ooblayout_ecc(struct mtd_info *mtd, int section, 767 + struct mtd_oob_region *oobregion) 768 + { 769 + if (section > 12) 770 + return -ERANGE; 771 + 772 + switch (section % 3) { 773 + case 0: 774 + oobregion->offset = 5; 775 + oobregion->length = 1; 776 + break; 777 + 778 + case 1: 779 + oobregion->offset = 1; 780 + oobregion->length = 3; 781 + break; 782 + 783 + case 2: 784 + oobregion->offset = 6; 785 + oobregion->length = 2; 786 + break; 787 + } 788 + 789 + oobregion->offset += (section / 3) * 0x10; 790 + 791 + return 0; 792 + } 793 + 794 + static int akita_ooblayout_free(struct mtd_info *mtd, int section, 795 + struct mtd_oob_region *oobregion) 796 + { 797 + if (section) 798 + return -ERANGE; 799 + 800 + oobregion->offset = 8; 801 + oobregion->length = 9; 802 + 803 + return 0; 804 + } 805 + 806 + static const struct mtd_ooblayout_ops akita_ooblayout_ops = { 807 + .ecc = akita_ooblayout_ecc, 808 + .free = akita_ooblayout_free, 774 809 }; 775 810 776 811 static struct sharpsl_nand_platform_data spitz_nand_pdata = { ··· 839 804 } else if (machine_is_akita()) { 840 805 spitz_nand_partitions[1].size = 58 * 1024 * 1024; 841 806 spitz_nand_bbt.len = 1; 842 - spitz_nand_pdata.ecc_layout = &akita_oobinfo; 807 + spitz_nand_pdata.ecc_layout = &akita_ooblayout_ops; 843 808 } else if (machine_is_borzoi()) { 844 809 spitz_nand_partitions[1].size = 32 * 1024 * 1024; 845 810 spitz_nand_bbt.len = 1; 846 - spitz_nand_pdata.ecc_layout = &akita_oobinfo; 811 + spitz_nand_pdata.ecc_layout = &akita_ooblayout_ops; 847 812 } 848 813 849 814 platform_device_register(&spitz_nand_device);
+1
arch/cris/arch-v32/drivers/mach-a3/nandflash.c
··· 157 157 /* 20 us command delay time */ 158 158 this->chip_delay = 20; 159 159 this->ecc.mode = NAND_ECC_SOFT; 160 + this->ecc.algo = NAND_ECC_HAMMING; 160 161 161 162 /* Enable the following for a flash based bad block table */ 162 163 /* this->bbt_options = NAND_BBT_USE_FLASH; */
+1
arch/cris/arch-v32/drivers/mach-fs/nandflash.c
··· 148 148 /* 20 us command delay time */ 149 149 this->chip_delay = 20; 150 150 this->ecc.mode = NAND_ECC_SOFT; 151 + this->ecc.algo = NAND_ECC_HAMMING; 151 152 152 153 /* Enable the following for a flash based bad block table */ 153 154 /* this->bbt_options = NAND_BBT_USE_FLASH; */
+1 -1
arch/mips/include/asm/mach-jz4740/jz4740_nand.h
··· 27 27 28 28 unsigned char banks[JZ_NAND_NUM_BANKS]; 29 29 30 - void (*ident_callback)(struct platform_device *, struct nand_chip *, 30 + void (*ident_callback)(struct platform_device *, struct mtd_info *, 31 31 struct mtd_partition **, int *num_partitions); 32 32 }; 33 33
+51 -36
arch/mips/jz4740/board-qi_lb60.c
··· 48 48 #define QI_LB60_GPIO_KEYIN8 JZ_GPIO_PORTD(26) 49 49 50 50 /* NAND */ 51 - static struct nand_ecclayout qi_lb60_ecclayout_1gb = { 52 - .eccbytes = 36, 53 - .eccpos = { 54 - 6, 7, 8, 9, 10, 11, 12, 13, 55 - 14, 15, 16, 17, 18, 19, 20, 21, 56 - 22, 23, 24, 25, 26, 27, 28, 29, 57 - 30, 31, 32, 33, 34, 35, 36, 37, 58 - 38, 39, 40, 41 59 - }, 60 - .oobfree = { 61 - { .offset = 2, .length = 4 }, 62 - { .offset = 42, .length = 22 } 63 - }, 64 - }; 65 51 66 52 /* Early prototypes of the QI LB60 had only 1GB of NAND. 67 53 * In order to support these devices as well the partition and ecc layout is ··· 70 84 }, 71 85 }; 72 86 73 - static struct nand_ecclayout qi_lb60_ecclayout_2gb = { 74 - .eccbytes = 72, 75 - .eccpos = { 76 - 12, 13, 14, 15, 16, 17, 18, 19, 77 - 20, 21, 22, 23, 24, 25, 26, 27, 78 - 28, 29, 30, 31, 32, 33, 34, 35, 79 - 36, 37, 38, 39, 40, 41, 42, 43, 80 - 44, 45, 46, 47, 48, 49, 50, 51, 81 - 52, 53, 54, 55, 56, 57, 58, 59, 82 - 60, 61, 62, 63, 64, 65, 66, 67, 83 - 68, 69, 70, 71, 72, 73, 74, 75, 84 - 76, 77, 78, 79, 80, 81, 82, 83 85 - }, 86 - .oobfree = { 87 - { .offset = 2, .length = 10 }, 88 - { .offset = 84, .length = 44 }, 89 - }, 90 - }; 91 - 92 87 static struct mtd_partition qi_lb60_partitions_2gb[] = { 93 88 { 94 89 .name = "NAND BOOT partition", ··· 88 121 }, 89 122 }; 90 123 124 + static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section, 125 + struct mtd_oob_region *oobregion) 126 + { 127 + if (section) 128 + return -ERANGE; 129 + 130 + oobregion->length = 36; 131 + oobregion->offset = 6; 132 + 133 + if (mtd->oobsize == 128) { 134 + oobregion->length *= 2; 135 + oobregion->offset *= 2; 136 + } 137 + 138 + return 0; 139 + } 140 + 141 + static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section, 142 + struct mtd_oob_region *oobregion) 143 + { 144 + int eccbytes = 36, eccoff = 6; 145 + 146 + if (section > 1) 147 + return -ERANGE; 148 + 149 + if (mtd->oobsize == 128) { 150 + eccbytes *= 2; 151 + eccoff *= 2; 152 + } 153 + 154 + if (!section) { 155 + oobregion->offset = 2; 156 + oobregion->length = eccoff - 2; 157 + } else { 158 + oobregion->offset = eccoff + eccbytes; 159 + oobregion->length = mtd->oobsize - oobregion->offset; 160 + } 161 + 162 + return 0; 163 + } 164 + 165 + static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = { 166 + .ecc = qi_lb60_ooblayout_ecc, 167 + .free = qi_lb60_ooblayout_free, 168 + }; 169 + 91 170 static void qi_lb60_nand_ident(struct platform_device *pdev, 92 - struct nand_chip *chip, struct mtd_partition **partitions, 171 + struct mtd_info *mtd, struct mtd_partition **partitions, 93 172 int *num_partitions) 94 173 { 174 + struct nand_chip *chip = mtd_to_nand(mtd); 175 + 95 176 if (chip->page_shift == 12) { 96 - chip->ecc.layout = &qi_lb60_ecclayout_2gb; 97 177 *partitions = qi_lb60_partitions_2gb; 98 178 *num_partitions = ARRAY_SIZE(qi_lb60_partitions_2gb); 99 179 } else { 100 - chip->ecc.layout = &qi_lb60_ecclayout_1gb; 101 180 *partitions = qi_lb60_partitions_1gb; 102 181 *num_partitions = ARRAY_SIZE(qi_lb60_partitions_1gb); 103 182 } 183 + 184 + mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops); 104 185 } 105 186 106 187 static struct jz_nand_platform_data qi_lb60_nand_pdata = {
-1
drivers/bcma/driver_chipcommon_sflash.c
··· 146 146 return -ENOTSUPP; 147 147 } 148 148 149 - sflash->window = BCMA_SOC_FLASH2; 150 149 sflash->blocksize = e->blocksize; 151 150 sflash->numblocks = e->numblocks; 152 151 sflash->size = sflash->blocksize * sflash->numblocks;
+1
drivers/memory/Kconfig
··· 51 51 52 52 config OMAP_GPMC 53 53 bool 54 + select GPIOLIB 54 55 help 55 56 This driver is for the General Purpose Memory Controller (GPMC) 56 57 present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
+18 -18
drivers/memory/fsl_ifc.c
··· 59 59 { 60 60 int i = 0; 61 61 62 - if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) 62 + if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) 63 63 return -ENODEV; 64 64 65 65 for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { 66 - u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); 66 + u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); 67 67 if (cspr & CSPR_V && (cspr & CSPR_BA) == 68 68 convert_ifc_address(addr_base)) 69 69 return i; ··· 75 75 76 76 static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) 77 77 { 78 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 78 + struct fsl_ifc_global __iomem *ifc = ctrl->gregs; 79 79 80 80 /* 81 81 * Clear all the common status and event registers ··· 104 104 irq_dispose_mapping(ctrl->nand_irq); 105 105 irq_dispose_mapping(ctrl->irq); 106 106 107 - iounmap(ctrl->regs); 107 + iounmap(ctrl->gregs); 108 108 109 109 dev_set_drvdata(&dev->dev, NULL); 110 110 kfree(ctrl); ··· 122 122 123 123 static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) 124 124 { 125 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 125 + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 126 126 unsigned long flags; 127 127 u32 stat; 128 128 ··· 157 157 static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) 158 158 { 159 159 struct fsl_ifc_ctrl *ctrl = data; 160 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 160 + struct fsl_ifc_global __iomem *ifc = ctrl->gregs; 161 161 u32 err_axiid, err_srcid, status, cs_err, err_addr; 162 162 irqreturn_t ret = IRQ_NONE; 163 163 ··· 215 215 { 216 216 int ret = 0; 217 217 int version, banks; 218 + void __iomem *addr; 218 219 219 220 dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); 220 221 ··· 226 225 dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); 227 226 228 227 /* IOMAP the entire IFC region */ 229 - fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); 230 - if (!fsl_ifc_ctrl_dev->regs) { 228 + fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); 229 + if (!fsl_ifc_ctrl_dev->gregs) { 231 230 dev_err(&dev->dev, "failed to get memory region\n"); 232 231 ret = -ENODEV; 233 232 goto err; 234 233 } 235 - 236 - version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) & 237 - FSL_IFC_VERSION_MASK; 238 - banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; 239 - dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", 240 - version >> 24, (version >> 16) & 0xf, banks); 241 - 242 - fsl_ifc_ctrl_dev->version = version; 243 - fsl_ifc_ctrl_dev->banks = banks; 244 234 245 235 if (of_property_read_bool(dev->dev.of_node, "little-endian")) { 246 236 fsl_ifc_ctrl_dev->little_endian = true; ··· 241 249 dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); 242 250 } 243 251 244 - version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) & 252 + version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & 245 253 FSL_IFC_VERSION_MASK; 254 + 246 255 banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; 247 256 dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", 248 257 version >> 24, (version >> 16) & 0xf, banks); 249 258 250 259 fsl_ifc_ctrl_dev->version = version; 251 260 fsl_ifc_ctrl_dev->banks = banks; 261 + 262 + addr = fsl_ifc_ctrl_dev->gregs; 263 + if (version >= FSL_IFC_VERSION_2_0_0) 264 + addr += PGOFFSET_64K; 265 + else 266 + addr += PGOFFSET_4K; 267 + fsl_ifc_ctrl_dev->rregs = addr; 252 268 253 269 /* get the Controller level irq */ 254 270 fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+423 -244
drivers/memory/omap-gpmc.c
··· 21 21 #include <linux/spinlock.h> 22 22 #include <linux/io.h> 23 23 #include <linux/module.h> 24 + #include <linux/gpio/driver.h> 24 25 #include <linux/interrupt.h> 26 + #include <linux/irqdomain.h> 25 27 #include <linux/platform_device.h> 26 28 #include <linux/of.h> 27 29 #include <linux/of_address.h> 28 - #include <linux/of_mtd.h> 29 30 #include <linux/of_device.h> 30 31 #include <linux/of_platform.h> 31 32 #include <linux/omap-gpmc.h> 32 - #include <linux/mtd/nand.h> 33 33 #include <linux/pm_runtime.h> 34 34 35 35 #include <linux/platform_data/mtd-nand-omap2.h> ··· 81 81 82 82 #define GPMC_CONFIG_LIMITEDADDRESS BIT(1) 83 83 84 + #define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS BIT(0) 85 + 84 86 #define GPMC_CONFIG2_CSEXTRADELAY BIT(7) 85 87 #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7) 86 88 #define GPMC_CONFIG4_OEEXTRADELAY BIT(7) ··· 94 92 #define GPMC_CS_SIZE 0x30 95 93 #define GPMC_BCH_SIZE 0x10 96 94 95 + /* 96 + * The first 1MB of GPMC address space is typically mapped to 97 + * the internal ROM. Never allocate the first page, to 98 + * facilitate bug detection; even if we didn't boot from ROM. 99 + * As GPMC minimum partition size is 16MB we can only start from 100 + * there. 101 + */ 102 + #define GPMC_MEM_START 0x1000000 97 103 #define GPMC_MEM_END 0x3FFFFFFF 98 104 99 105 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */ ··· 135 125 #define GPMC_CONFIG_RDY_BSY 0x00000001 136 126 #define GPMC_CONFIG_DEV_SIZE 0x00000002 137 127 #define GPMC_CONFIG_DEV_TYPE 0x00000003 138 - #define GPMC_SET_IRQ_STATUS 0x00000004 139 128 140 129 #define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31) 141 130 #define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30) ··· 183 174 #define GPMC_CONFIG_WRITEPROTECT 0x00000010 184 175 #define WR_RD_PIN_MONITORING 0x00600000 185 176 186 - #define GPMC_ENABLE_IRQ 0x0000000d 187 - 188 177 /* ECC commands */ 189 178 #define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ 190 179 #define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ 191 180 #define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */ 192 181 193 - /* XXX: Only NAND irq has been considered,currently these are the only ones used 194 - */ 195 - #define GPMC_NR_IRQ 2 182 + #define GPMC_NR_NAND_IRQS 2 /* number of NAND specific IRQs */ 196 183 197 184 enum gpmc_clk_domain { 198 185 GPMC_CD_FCLK, ··· 202 197 u32 flags; 203 198 204 199 struct resource mem; 205 - }; 206 - 207 - struct gpmc_client_irq { 208 - unsigned irq; 209 - u32 bitmask; 210 200 }; 211 201 212 202 /* Structure to save gpmc cs context */ ··· 231 231 struct gpmc_cs_config cs_context[GPMC_CS_NUM]; 232 232 }; 233 233 234 - static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; 235 - static struct irq_chip gpmc_irq_chip; 236 - static int gpmc_irq_start; 234 + struct gpmc_device { 235 + struct device *dev; 236 + int irq; 237 + struct irq_chip irq_chip; 238 + struct gpio_chip gpio_chip; 239 + int nirqs; 240 + }; 241 + 242 + static struct irq_domain *gpmc_irq_domain; 237 243 238 244 static struct resource gpmc_mem_root; 239 245 static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM]; ··· 247 241 /* Define chip-selects as reserved by default until probe completes */ 248 242 static unsigned int gpmc_cs_num = GPMC_CS_NUM; 249 243 static unsigned int gpmc_nr_waitpins; 250 - static struct device *gpmc_dev; 251 - static int gpmc_irq; 252 244 static resource_size_t phys_base, mem_size; 253 245 static unsigned gpmc_capability; 254 246 static void __iomem *gpmc_base; ··· 1058 1054 u32 regval; 1059 1055 1060 1056 switch (cmd) { 1061 - case GPMC_ENABLE_IRQ: 1062 - gpmc_write_reg(GPMC_IRQENABLE, wval); 1063 - break; 1064 - 1065 - case GPMC_SET_IRQ_STATUS: 1066 - gpmc_write_reg(GPMC_IRQSTATUS, wval); 1067 - break; 1068 - 1069 1057 case GPMC_CONFIG_WP: 1070 1058 regval = gpmc_read_reg(GPMC_CONFIG); 1071 1059 if (wval) ··· 1080 1084 { 1081 1085 int i; 1082 1086 1083 - reg->gpmc_status = gpmc_base + GPMC_STATUS; 1087 + reg->gpmc_status = NULL; /* deprecated */ 1084 1088 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + 1085 1089 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; 1086 1090 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + ··· 1114 1118 } 1115 1119 } 1116 1120 1117 - int gpmc_get_client_irq(unsigned irq_config) 1121 + static bool gpmc_nand_writebuffer_empty(void) 1118 1122 { 1119 - int i; 1123 + if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS) 1124 + return true; 1120 1125 1121 - if (hweight32(irq_config) > 1) 1122 - return 0; 1123 - 1124 - for (i = 0; i < GPMC_NR_IRQ; i++) 1125 - if (gpmc_client_irq[i].bitmask & irq_config) 1126 - return gpmc_client_irq[i].irq; 1127 - 1128 - return 0; 1126 + return false; 1129 1127 } 1130 1128 1131 - static int gpmc_irq_endis(unsigned irq, bool endis) 1129 + static struct gpmc_nand_ops nand_ops = { 1130 + .nand_writebuffer_empty = gpmc_nand_writebuffer_empty, 1131 + }; 1132 + 1133 + /** 1134 + * gpmc_omap_get_nand_ops - Get the GPMC NAND interface 1135 + * @regs: the GPMC NAND register map exclusive for NAND use. 1136 + * @cs: GPMC chip select number on which the NAND sits. The 1137 + * register map returned will be specific to this chip select. 1138 + * 1139 + * Returns NULL on error e.g. invalid cs. 1140 + */ 1141 + struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs) 1132 1142 { 1133 - int i; 1143 + if (cs >= gpmc_cs_num) 1144 + return NULL; 1145 + 1146 + gpmc_update_nand_reg(reg, cs); 1147 + 1148 + return &nand_ops; 1149 + } 1150 + EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops); 1151 + 1152 + int gpmc_get_client_irq(unsigned irq_config) 1153 + { 1154 + if (!gpmc_irq_domain) { 1155 + pr_warn("%s called before GPMC IRQ domain available\n", 1156 + __func__); 1157 + return 0; 1158 + } 1159 + 1160 + /* we restrict this to NAND IRQs only */ 1161 + if (irq_config >= GPMC_NR_NAND_IRQS) 1162 + return 0; 1163 + 1164 + return irq_create_mapping(gpmc_irq_domain, irq_config); 1165 + } 1166 + 1167 + static int gpmc_irq_endis(unsigned long hwirq, bool endis) 1168 + { 1134 1169 u32 regval; 1135 1170 1136 - for (i = 0; i < GPMC_NR_IRQ; i++) 1137 - if (irq == gpmc_client_irq[i].irq) { 1138 - regval = gpmc_read_reg(GPMC_IRQENABLE); 1139 - if (endis) 1140 - regval |= gpmc_client_irq[i].bitmask; 1141 - else 1142 - regval &= ~gpmc_client_irq[i].bitmask; 1143 - gpmc_write_reg(GPMC_IRQENABLE, regval); 1144 - break; 1145 - } 1171 + /* bits GPMC_NR_NAND_IRQS to 8 are reserved */ 1172 + if (hwirq >= GPMC_NR_NAND_IRQS) 1173 + hwirq += 8 - GPMC_NR_NAND_IRQS; 1174 + 1175 + regval = gpmc_read_reg(GPMC_IRQENABLE); 1176 + if (endis) 1177 + regval |= BIT(hwirq); 1178 + else 1179 + regval &= ~BIT(hwirq); 1180 + gpmc_write_reg(GPMC_IRQENABLE, regval); 1146 1181 1147 1182 return 0; 1148 1183 } 1149 1184 1150 1185 static void gpmc_irq_disable(struct irq_data *p) 1151 1186 { 1152 - gpmc_irq_endis(p->irq, false); 1187 + gpmc_irq_endis(p->hwirq, false); 1153 1188 } 1154 1189 1155 1190 static void gpmc_irq_enable(struct irq_data *p) 1156 1191 { 1157 - gpmc_irq_endis(p->irq, true); 1192 + gpmc_irq_endis(p->hwirq, true); 1158 1193 } 1159 1194 1160 - static void gpmc_irq_noop(struct irq_data *data) { } 1161 - 1162 - static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } 1163 - 1164 - static int gpmc_setup_irq(void) 1195 + static void gpmc_irq_mask(struct irq_data *d) 1165 1196 { 1166 - int i; 1197 + gpmc_irq_endis(d->hwirq, false); 1198 + } 1199 + 1200 + static void gpmc_irq_unmask(struct irq_data *d) 1201 + { 1202 + gpmc_irq_endis(d->hwirq, true); 1203 + } 1204 + 1205 + static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge) 1206 + { 1167 1207 u32 regval; 1168 1208 1169 - if (!gpmc_irq) 1209 + /* NAND IRQs polarity is not configurable */ 1210 + if (hwirq < GPMC_NR_NAND_IRQS) 1211 + return; 1212 + 1213 + /* WAITPIN starts at BIT 8 */ 1214 + hwirq += 8 - GPMC_NR_NAND_IRQS; 1215 + 1216 + regval = gpmc_read_reg(GPMC_CONFIG); 1217 + if (rising_edge) 1218 + regval &= ~BIT(hwirq); 1219 + else 1220 + regval |= BIT(hwirq); 1221 + 1222 + gpmc_write_reg(GPMC_CONFIG, regval); 1223 + } 1224 + 1225 + static void gpmc_irq_ack(struct irq_data *d) 1226 + { 1227 + unsigned int hwirq = d->hwirq; 1228 + 1229 + /* skip reserved bits */ 1230 + if (hwirq >= GPMC_NR_NAND_IRQS) 1231 + hwirq += 8 - GPMC_NR_NAND_IRQS; 1232 + 1233 + /* Setting bit to 1 clears (or Acks) the interrupt */ 1234 + gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq)); 1235 + } 1236 + 1237 + static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger) 1238 + { 1239 + /* can't set type for NAND IRQs */ 1240 + if (d->hwirq < GPMC_NR_NAND_IRQS) 1170 1241 return -EINVAL; 1171 1242 1172 - gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); 1173 - if (gpmc_irq_start < 0) { 1174 - pr_err("irq_alloc_descs failed\n"); 1175 - return gpmc_irq_start; 1243 + /* We can support either rising or falling edge at a time */ 1244 + if (trigger == IRQ_TYPE_EDGE_FALLING) 1245 + gpmc_irq_edge_config(d->hwirq, false); 1246 + else if (trigger == IRQ_TYPE_EDGE_RISING) 1247 + gpmc_irq_edge_config(d->hwirq, true); 1248 + else 1249 + return -EINVAL; 1250 + 1251 + return 0; 1252 + } 1253 + 1254 + static int gpmc_irq_map(struct irq_domain *d, unsigned int virq, 1255 + irq_hw_number_t hw) 1256 + { 1257 + struct gpmc_device *gpmc = d->host_data; 1258 + 1259 + irq_set_chip_data(virq, gpmc); 1260 + if (hw < GPMC_NR_NAND_IRQS) { 1261 + irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN); 1262 + irq_set_chip_and_handler(virq, &gpmc->irq_chip, 1263 + handle_simple_irq); 1264 + } else { 1265 + irq_set_chip_and_handler(virq, &gpmc->irq_chip, 1266 + handle_edge_irq); 1176 1267 } 1177 1268 1178 - gpmc_irq_chip.name = "gpmc"; 1179 - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; 1180 - gpmc_irq_chip.irq_enable = gpmc_irq_enable; 1181 - gpmc_irq_chip.irq_disable = gpmc_irq_disable; 1182 - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; 1183 - gpmc_irq_chip.irq_ack = gpmc_irq_noop; 1184 - gpmc_irq_chip.irq_mask = gpmc_irq_noop; 1185 - gpmc_irq_chip.irq_unmask = gpmc_irq_noop; 1269 + return 0; 1270 + } 1186 1271 1187 - gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; 1188 - gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; 1272 + static const struct irq_domain_ops gpmc_irq_domain_ops = { 1273 + .map = gpmc_irq_map, 1274 + .xlate = irq_domain_xlate_twocell, 1275 + }; 1189 1276 1190 - for (i = 0; i < GPMC_NR_IRQ; i++) { 1191 - gpmc_client_irq[i].irq = gpmc_irq_start + i; 1192 - irq_set_chip_and_handler(gpmc_client_irq[i].irq, 1193 - &gpmc_irq_chip, handle_simple_irq); 1194 - irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST, 1195 - IRQ_NOAUTOEN); 1277 + static irqreturn_t gpmc_handle_irq(int irq, void *data) 1278 + { 1279 + int hwirq, virq; 1280 + u32 regval, regvalx; 1281 + struct gpmc_device *gpmc = data; 1282 + 1283 + regval = gpmc_read_reg(GPMC_IRQSTATUS); 1284 + regvalx = regval; 1285 + 1286 + if (!regval) 1287 + return IRQ_NONE; 1288 + 1289 + for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) { 1290 + /* skip reserved status bits */ 1291 + if (hwirq == GPMC_NR_NAND_IRQS) 1292 + regvalx >>= 8 - GPMC_NR_NAND_IRQS; 1293 + 1294 + if (regvalx & BIT(hwirq)) { 1295 + virq = irq_find_mapping(gpmc_irq_domain, hwirq); 1296 + if (!virq) { 1297 + dev_warn(gpmc->dev, 1298 + "spurious irq detected hwirq %d, virq %d\n", 1299 + hwirq, virq); 1300 + } 1301 + 1302 + generic_handle_irq(virq); 1303 + } 1196 1304 } 1305 + 1306 + gpmc_write_reg(GPMC_IRQSTATUS, regval); 1307 + 1308 + return IRQ_HANDLED; 1309 + } 1310 + 1311 + static int gpmc_setup_irq(struct gpmc_device *gpmc) 1312 + { 1313 + u32 regval; 1314 + int rc; 1197 1315 1198 1316 /* Disable interrupts */ 1199 1317 gpmc_write_reg(GPMC_IRQENABLE, 0); ··· 1316 1206 regval = gpmc_read_reg(GPMC_IRQSTATUS); 1317 1207 gpmc_write_reg(GPMC_IRQSTATUS, regval); 1318 1208 1319 - return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL); 1320 - } 1209 + gpmc->irq_chip.name = "gpmc"; 1210 + gpmc->irq_chip.irq_enable = gpmc_irq_enable; 1211 + gpmc->irq_chip.irq_disable = gpmc_irq_disable; 1212 + gpmc->irq_chip.irq_ack = gpmc_irq_ack; 1213 + gpmc->irq_chip.irq_mask = gpmc_irq_mask; 1214 + gpmc->irq_chip.irq_unmask = gpmc_irq_unmask; 1215 + gpmc->irq_chip.irq_set_type = gpmc_irq_set_type; 1321 1216 1322 - static int gpmc_free_irq(void) 1323 - { 1324 - int i; 1325 - 1326 - if (gpmc_irq) 1327 - free_irq(gpmc_irq, NULL); 1328 - 1329 - for (i = 0; i < GPMC_NR_IRQ; i++) { 1330 - irq_set_handler(gpmc_client_irq[i].irq, NULL); 1331 - irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip); 1217 + gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node, 1218 + gpmc->nirqs, 1219 + &gpmc_irq_domain_ops, 1220 + gpmc); 1221 + if (!gpmc_irq_domain) { 1222 + dev_err(gpmc->dev, "IRQ domain add failed\n"); 1223 + return -ENODEV; 1332 1224 } 1333 1225 1334 - irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ); 1226 + rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc); 1227 + if (rc) { 1228 + dev_err(gpmc->dev, "failed to request irq %d: %d\n", 1229 + gpmc->irq, rc); 1230 + irq_domain_remove(gpmc_irq_domain); 1231 + gpmc_irq_domain = NULL; 1232 + } 1233 + 1234 + return rc; 1235 + } 1236 + 1237 + static int gpmc_free_irq(struct gpmc_device *gpmc) 1238 + { 1239 + int hwirq; 1240 + 1241 + free_irq(gpmc->irq, gpmc); 1242 + 1243 + for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) 1244 + irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq)); 1245 + 1246 + irq_domain_remove(gpmc_irq_domain); 1247 + gpmc_irq_domain = NULL; 1335 1248 1336 1249 return 0; 1337 1250 } ··· 1375 1242 { 1376 1243 int cs; 1377 1244 1378 - /* 1379 - * The first 1MB of GPMC address space is typically mapped to 1380 - * the internal ROM. Never allocate the first page, to 1381 - * facilitate bug detection; even if we didn't boot from ROM. 1382 - */ 1383 - gpmc_mem_root.start = SZ_1M; 1245 + gpmc_mem_root.start = GPMC_MEM_START; 1384 1246 gpmc_mem_root.end = GPMC_MEM_END; 1385 1247 1386 1248 /* Reserve all regions that has been set up by bootloader */ ··· 1924 1796 of_property_read_bool(np, "gpmc,time-para-granularity"); 1925 1797 } 1926 1798 1927 - #if IS_ENABLED(CONFIG_MTD_NAND) 1928 - 1929 - static const char * const nand_xfer_types[] = { 1930 - [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", 1931 - [NAND_OMAP_POLLED] = "polled", 1932 - [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma", 1933 - [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq", 1934 - }; 1935 - 1936 - static int gpmc_probe_nand_child(struct platform_device *pdev, 1937 - struct device_node *child) 1938 - { 1939 - u32 val; 1940 - const char *s; 1941 - struct gpmc_timings gpmc_t; 1942 - struct omap_nand_platform_data *gpmc_nand_data; 1943 - 1944 - if (of_property_read_u32(child, "reg", &val) < 0) { 1945 - dev_err(&pdev->dev, "%s has no 'reg' property\n", 1946 - child->full_name); 1947 - return -ENODEV; 1948 - } 1949 - 1950 - gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data), 1951 - GFP_KERNEL); 1952 - if (!gpmc_nand_data) 1953 - return -ENOMEM; 1954 - 1955 - gpmc_nand_data->cs = val; 1956 - gpmc_nand_data->of_node = child; 1957 - 1958 - /* Detect availability of ELM module */ 1959 - gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); 1960 - if (gpmc_nand_data->elm_of_node == NULL) 1961 - gpmc_nand_data->elm_of_node = 1962 - of_parse_phandle(child, "elm_id", 0); 1963 - 1964 - /* select ecc-scheme for NAND */ 1965 - if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { 1966 - pr_err("%s: ti,nand-ecc-opt not found\n", __func__); 1967 - return -ENODEV; 1968 - } 1969 - 1970 - if (!strcmp(s, "sw")) 1971 - gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW; 1972 - else if (!strcmp(s, "ham1") || 1973 - !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) 1974 - gpmc_nand_data->ecc_opt = 1975 - OMAP_ECC_HAM1_CODE_HW; 1976 - else if (!strcmp(s, "bch4")) 1977 - if (gpmc_nand_data->elm_of_node) 1978 - gpmc_nand_data->ecc_opt = 1979 - OMAP_ECC_BCH4_CODE_HW; 1980 - else 1981 - gpmc_nand_data->ecc_opt = 1982 - OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; 1983 - else if (!strcmp(s, "bch8")) 1984 - if (gpmc_nand_data->elm_of_node) 1985 - gpmc_nand_data->ecc_opt = 1986 - OMAP_ECC_BCH8_CODE_HW; 1987 - else 1988 - gpmc_nand_data->ecc_opt = 1989 - OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; 1990 - else if (!strcmp(s, "bch16")) 1991 - if (gpmc_nand_data->elm_of_node) 1992 - gpmc_nand_data->ecc_opt = 1993 - OMAP_ECC_BCH16_CODE_HW; 1994 - else 1995 - pr_err("%s: BCH16 requires ELM support\n", __func__); 1996 - else 1997 - pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__); 1998 - 1999 - /* select data transfer mode for NAND controller */ 2000 - if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) 2001 - for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++) 2002 - if (!strcasecmp(s, nand_xfer_types[val])) { 2003 - gpmc_nand_data->xfer_type = val; 2004 - break; 2005 - } 2006 - 2007 - gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child); 2008 - 2009 - val = of_get_nand_bus_width(child); 2010 - if (val == 16) 2011 - gpmc_nand_data->devsize = NAND_BUSWIDTH_16; 2012 - 2013 - gpmc_read_timings_dt(child, &gpmc_t); 2014 - gpmc_nand_init(gpmc_nand_data, &gpmc_t); 2015 - 2016 - return 0; 2017 - } 2018 - #else 2019 - static int gpmc_probe_nand_child(struct platform_device *pdev, 2020 - struct device_node *child) 2021 - { 2022 - return 0; 2023 - } 2024 - #endif 2025 - 2026 1799 #if IS_ENABLED(CONFIG_MTD_ONENAND) 2027 1800 static int gpmc_probe_onenand_child(struct platform_device *pdev, 2028 1801 struct device_node *child) ··· 1979 1950 const char *name; 1980 1951 int ret, cs; 1981 1952 u32 val; 1953 + struct gpio_desc *waitpin_desc = NULL; 1954 + struct gpmc_device *gpmc = platform_get_drvdata(pdev); 1982 1955 1983 1956 if (of_property_read_u32(child, "reg", &cs) < 0) { 1984 1957 dev_err(&pdev->dev, "%s has no 'reg' property\n", ··· 2041 2010 if (ret < 0) { 2042 2011 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n", 2043 2012 cs, &res.start); 2013 + if (res.start < GPMC_MEM_START) { 2014 + dev_info(&pdev->dev, 2015 + "GPMC CS %d start cannot be lesser than 0x%x\n", 2016 + cs, GPMC_MEM_START); 2017 + } else if (res.end > GPMC_MEM_END) { 2018 + dev_info(&pdev->dev, 2019 + "GPMC CS %d end cannot be greater than 0x%x\n", 2020 + cs, GPMC_MEM_END); 2021 + } 2044 2022 goto err; 2045 2023 } 2046 2024 2047 - ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); 2048 - if (ret < 0) 2049 - goto err; 2025 + if (of_node_cmp(child->name, "nand") == 0) { 2026 + /* Warn about older DT blobs with no compatible property */ 2027 + if (!of_property_read_bool(child, "compatible")) { 2028 + dev_warn(&pdev->dev, 2029 + "Incompatible NAND node: missing compatible"); 2030 + ret = -EINVAL; 2031 + goto err; 2032 + } 2033 + } 2034 + 2035 + if (of_device_is_compatible(child, "ti,omap2-nand")) { 2036 + /* NAND specific setup */ 2037 + val = 8; 2038 + of_property_read_u32(child, "nand-bus-width", &val); 2039 + switch (val) { 2040 + case 8: 2041 + gpmc_s.device_width = GPMC_DEVWIDTH_8BIT; 2042 + break; 2043 + case 16: 2044 + gpmc_s.device_width = GPMC_DEVWIDTH_16BIT; 2045 + break; 2046 + default: 2047 + dev_err(&pdev->dev, "%s: invalid 'nand-bus-width'\n", 2048 + child->name); 2049 + ret = -EINVAL; 2050 + goto err; 2051 + } 2052 + 2053 + /* disable write protect */ 2054 + gpmc_configure(GPMC_CONFIG_WP, 0); 2055 + gpmc_s.device_nand = true; 2056 + } else { 2057 + ret = of_property_read_u32(child, "bank-width", 2058 + &gpmc_s.device_width); 2059 + if (ret < 0) 2060 + goto err; 2061 + } 2062 + 2063 + /* Reserve wait pin if it is required and valid */ 2064 + if (gpmc_s.wait_on_read || gpmc_s.wait_on_write) { 2065 + unsigned int wait_pin = gpmc_s.wait_pin; 2066 + 2067 + waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip, 2068 + wait_pin, "WAITPIN"); 2069 + if (IS_ERR(waitpin_desc)) { 2070 + dev_err(&pdev->dev, "invalid wait-pin: %d\n", wait_pin); 2071 + ret = PTR_ERR(waitpin_desc); 2072 + goto err; 2073 + } 2074 + } 2050 2075 2051 2076 gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); 2077 + 2052 2078 ret = gpmc_cs_program_settings(cs, &gpmc_s); 2053 2079 if (ret < 0) 2054 - goto err; 2080 + goto err_cs; 2055 2081 2056 2082 ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s); 2057 2083 if (ret) { 2058 2084 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n", 2059 2085 child->name); 2060 - goto err; 2086 + goto err_cs; 2061 2087 } 2062 2088 2063 2089 /* Clear limited address i.e. enable A26-A11 */ ··· 2145 2057 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); 2146 2058 ret = -ENODEV; 2147 2059 2060 + err_cs: 2061 + if (waitpin_desc) 2062 + gpiochip_free_own_desc(waitpin_desc); 2063 + 2148 2064 err: 2149 2065 gpmc_cs_free(cs); 2150 2066 2151 2067 return ret; 2152 2068 } 2153 2069 2070 + static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) 2071 + { 2072 + return 1; /* we're input only */ 2073 + } 2074 + 2075 + static int gpmc_gpio_direction_input(struct gpio_chip *chip, 2076 + unsigned int offset) 2077 + { 2078 + return 0; /* we're input only */ 2079 + } 2080 + 2081 + static int gpmc_gpio_direction_output(struct gpio_chip *chip, 2082 + unsigned int offset, int value) 2083 + { 2084 + return -EINVAL; /* we're input only */ 2085 + } 2086 + 2087 + static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset, 2088 + int value) 2089 + { 2090 + } 2091 + 2092 + static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset) 2093 + { 2094 + u32 reg; 2095 + 2096 + offset += 8; 2097 + 2098 + reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset); 2099 + 2100 + return !!reg; 2101 + } 2102 + 2103 + static int gpmc_gpio_init(struct gpmc_device *gpmc) 2104 + { 2105 + int ret; 2106 + 2107 + gpmc->gpio_chip.parent = gpmc->dev; 2108 + gpmc->gpio_chip.owner = THIS_MODULE; 2109 + gpmc->gpio_chip.label = DEVICE_NAME; 2110 + gpmc->gpio_chip.ngpio = gpmc_nr_waitpins; 2111 + gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction; 2112 + gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input; 2113 + gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output; 2114 + gpmc->gpio_chip.set = gpmc_gpio_set; 2115 + gpmc->gpio_chip.get = gpmc_gpio_get; 2116 + gpmc->gpio_chip.base = -1; 2117 + 2118 + ret = gpiochip_add(&gpmc->gpio_chip); 2119 + if (ret < 0) { 2120 + dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret); 2121 + return ret; 2122 + } 2123 + 2124 + return 0; 2125 + } 2126 + 2127 + static void gpmc_gpio_exit(struct gpmc_device *gpmc) 2128 + { 2129 + gpiochip_remove(&gpmc->gpio_chip); 2130 + } 2131 + 2154 2132 static int gpmc_probe_dt(struct platform_device *pdev) 2155 2133 { 2156 2134 int ret; 2157 - struct device_node *child; 2158 2135 const struct of_device_id *of_id = 2159 2136 of_match_device(gpmc_dt_ids, &pdev->dev); 2160 2137 ··· 2247 2094 return ret; 2248 2095 } 2249 2096 2097 + return 0; 2098 + } 2099 + 2100 + static int gpmc_probe_dt_children(struct platform_device *pdev) 2101 + { 2102 + int ret; 2103 + struct device_node *child; 2104 + 2250 2105 for_each_available_child_of_node(pdev->dev.of_node, child) { 2251 2106 2252 2107 if (!child->name) 2253 2108 continue; 2254 2109 2255 - if (of_node_cmp(child->name, "nand") == 0) 2256 - ret = gpmc_probe_nand_child(pdev, child); 2257 - else if (of_node_cmp(child->name, "onenand") == 0) 2110 + if (of_node_cmp(child->name, "onenand") == 0) 2258 2111 ret = gpmc_probe_onenand_child(pdev, child); 2259 2112 else 2260 2113 ret = gpmc_probe_generic_child(pdev, child); 2114 + 2115 + if (ret) 2116 + return ret; 2261 2117 } 2262 2118 2263 2119 return 0; 2264 2120 } 2265 2121 #else 2266 2122 static int gpmc_probe_dt(struct platform_device *pdev) 2123 + { 2124 + return 0; 2125 + } 2126 + 2127 + static int gpmc_probe_dt_children(struct platform_device *pdev) 2267 2128 { 2268 2129 return 0; 2269 2130 } ··· 2288 2121 int rc; 2289 2122 u32 l; 2290 2123 struct resource *res; 2124 + struct gpmc_device *gpmc; 2125 + 2126 + gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL); 2127 + if (!gpmc) 2128 + return -ENOMEM; 2129 + 2130 + gpmc->dev = &pdev->dev; 2131 + platform_set_drvdata(pdev, gpmc); 2291 2132 2292 2133 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2293 2134 if (res == NULL) ··· 2309 2134 return PTR_ERR(gpmc_base); 2310 2135 2311 2136 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2312 - if (res == NULL) 2313 - dev_warn(&pdev->dev, "Failed to get resource: irq\n"); 2314 - else 2315 - gpmc_irq = res->start; 2137 + if (!res) { 2138 + dev_err(&pdev->dev, "Failed to get resource: irq\n"); 2139 + return -ENOENT; 2140 + } 2141 + 2142 + gpmc->irq = res->start; 2316 2143 2317 2144 gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck"); 2318 2145 if (IS_ERR(gpmc_l3_clk)) { 2319 2146 dev_err(&pdev->dev, "Failed to get GPMC fck\n"); 2320 - gpmc_irq = 0; 2321 2147 return PTR_ERR(gpmc_l3_clk); 2322 2148 } 2323 2149 ··· 2327 2151 return -EINVAL; 2328 2152 } 2329 2153 2154 + if (pdev->dev.of_node) { 2155 + rc = gpmc_probe_dt(pdev); 2156 + if (rc) 2157 + return rc; 2158 + } else { 2159 + gpmc_cs_num = GPMC_CS_NUM; 2160 + gpmc_nr_waitpins = GPMC_NR_WAITPINS; 2161 + } 2162 + 2330 2163 pm_runtime_enable(&pdev->dev); 2331 2164 pm_runtime_get_sync(&pdev->dev); 2332 - 2333 - gpmc_dev = &pdev->dev; 2334 2165 2335 2166 l = gpmc_read_reg(GPMC_REVISION); 2336 2167 ··· 2357 2174 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS; 2358 2175 if (GPMC_REVISION_MAJOR(l) > 0x5) 2359 2176 gpmc_capability |= GPMC_HAS_MUX_AAD; 2360 - dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), 2177 + dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), 2361 2178 GPMC_REVISION_MINOR(l)); 2362 2179 2363 2180 gpmc_mem_init(); 2181 + rc = gpmc_gpio_init(gpmc); 2182 + if (rc) 2183 + goto gpio_init_failed; 2364 2184 2365 - if (gpmc_setup_irq() < 0) 2366 - dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); 2367 - 2368 - if (!pdev->dev.of_node) { 2369 - gpmc_cs_num = GPMC_CS_NUM; 2370 - gpmc_nr_waitpins = GPMC_NR_WAITPINS; 2185 + gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins; 2186 + rc = gpmc_setup_irq(gpmc); 2187 + if (rc) { 2188 + dev_err(gpmc->dev, "gpmc_setup_irq failed\n"); 2189 + goto setup_irq_failed; 2371 2190 } 2372 2191 2373 - rc = gpmc_probe_dt(pdev); 2192 + rc = gpmc_probe_dt_children(pdev); 2374 2193 if (rc < 0) { 2375 - pm_runtime_put_sync(&pdev->dev); 2376 - dev_err(gpmc_dev, "failed to probe DT parameters\n"); 2377 - return rc; 2194 + dev_err(gpmc->dev, "failed to probe DT children\n"); 2195 + goto dt_children_failed; 2378 2196 } 2379 2197 2380 2198 return 0; 2199 + 2200 + dt_children_failed: 2201 + gpmc_free_irq(gpmc); 2202 + setup_irq_failed: 2203 + gpmc_gpio_exit(gpmc); 2204 + gpio_init_failed: 2205 + gpmc_mem_exit(); 2206 + pm_runtime_put_sync(&pdev->dev); 2207 + pm_runtime_disable(&pdev->dev); 2208 + 2209 + return rc; 2381 2210 } 2382 2211 2383 2212 static int gpmc_remove(struct platform_device *pdev) 2384 2213 { 2385 - gpmc_free_irq(); 2214 + struct gpmc_device *gpmc = platform_get_drvdata(pdev); 2215 + 2216 + gpmc_free_irq(gpmc); 2217 + gpmc_gpio_exit(gpmc); 2386 2218 gpmc_mem_exit(); 2387 2219 pm_runtime_put_sync(&pdev->dev); 2388 2220 pm_runtime_disable(&pdev->dev); 2389 - gpmc_dev = NULL; 2221 + 2390 2222 return 0; 2391 2223 } 2392 2224 ··· 2446 2248 2447 2249 postcore_initcall(gpmc_init); 2448 2250 module_exit(gpmc_exit); 2449 - 2450 - static irqreturn_t gpmc_handle_irq(int irq, void *dev) 2451 - { 2452 - int i; 2453 - u32 regval; 2454 - 2455 - regval = gpmc_read_reg(GPMC_IRQSTATUS); 2456 - 2457 - if (!regval) 2458 - return IRQ_NONE; 2459 - 2460 - for (i = 0; i < GPMC_NR_IRQ; i++) 2461 - if (regval & gpmc_client_irq[i].bitmask) 2462 - generic_handle_irq(gpmc_client_irq[i].irq); 2463 - 2464 - gpmc_write_reg(GPMC_IRQSTATUS, regval); 2465 - 2466 - return IRQ_HANDLED; 2467 - } 2468 2251 2469 2252 static struct omap3_gpmc_regs gpmc_context; 2470 2253
+1
drivers/mtd/chips/Kconfig
··· 115 115 116 116 config MTD_MAP_BANK_WIDTH_32 117 117 bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY 118 + select MTD_COMPLEX_MAPPINGS if HAS_IOMEM 118 119 default n 119 120 help 120 121 If you wish to support CFI devices on a physical bus which is
+24 -5
drivers/mtd/devices/bcm47xxsflash.c
··· 2 2 #include <linux/module.h> 3 3 #include <linux/slab.h> 4 4 #include <linux/delay.h> 5 + #include <linux/ioport.h> 5 6 #include <linux/mtd/mtd.h> 6 7 #include <linux/platform_device.h> 7 8 #include <linux/bcma/bcma.h> ··· 110 109 if ((from + len) > mtd->size) 111 110 return -EINVAL; 112 111 113 - memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from), 114 - len); 112 + memcpy_fromio(buf, b47s->window + from, len); 115 113 *retlen = len; 116 114 117 115 return len; ··· 275 275 276 276 static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) 277 277 { 278 - struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 278 + struct device *dev = &pdev->dev; 279 + struct bcma_sflash *sflash = dev_get_platdata(dev); 279 280 struct bcm47xxsflash *b47s; 281 + struct resource *res; 280 282 int err; 281 283 282 - b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL); 284 + b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL); 283 285 if (!b47s) 284 286 return -ENOMEM; 285 287 sflash->priv = b47s; 288 + 289 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 290 + if (!res) { 291 + dev_err(dev, "invalid resource\n"); 292 + return -EINVAL; 293 + } 294 + if (!devm_request_mem_region(dev, res->start, resource_size(res), 295 + res->name)) { 296 + dev_err(dev, "can't request region for resource %pR\n", res); 297 + return -EBUSY; 298 + } 299 + b47s->window = ioremap_cache(res->start, resource_size(res)); 300 + if (!b47s->window) { 301 + dev_err(dev, "ioremap failed for resource %pR\n", res); 302 + return -ENOMEM; 303 + } 286 304 287 305 b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash); 288 306 b47s->cc_read = bcm47xxsflash_bcma_cc_read; ··· 315 297 break; 316 298 } 317 299 318 - b47s->window = sflash->window; 319 300 b47s->blocksize = sflash->blocksize; 320 301 b47s->numblocks = sflash->numblocks; 321 302 b47s->size = sflash->size; ··· 323 306 err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0); 324 307 if (err) { 325 308 pr_err("Failed to register MTD device: %d\n", err); 309 + iounmap(b47s->window); 326 310 return err; 327 311 } 328 312 ··· 339 321 struct bcm47xxsflash *b47s = sflash->priv; 340 322 341 323 mtd_device_unregister(&b47s->mtd); 324 + iounmap(b47s->window); 342 325 343 326 return 0; 344 327 }
+2 -1
drivers/mtd/devices/bcm47xxsflash.h
··· 65 65 66 66 enum bcm47xxsflash_type type; 67 67 68 - u32 window; 68 + void __iomem *window; 69 + 69 70 u32 blocksize; 70 71 u16 numblocks; 71 72 u32 size;
+35 -11
drivers/mtd/devices/docg3.c
··· 67 67 MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, " 68 68 "2=reliable) : MLC normal operations are in normal mode"); 69 69 70 - /** 71 - * struct docg3_oobinfo - DiskOnChip G3 OOB layout 72 - * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC) 73 - * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC) 74 - * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15 75 - */ 76 - static struct nand_ecclayout docg3_oobinfo = { 77 - .eccbytes = 8, 78 - .eccpos = {7, 8, 9, 10, 11, 12, 13, 14}, 79 - .oobfree = {{0, 7}, {15, 1} }, 70 + static int docg3_ooblayout_ecc(struct mtd_info *mtd, int section, 71 + struct mtd_oob_region *oobregion) 72 + { 73 + if (section) 74 + return -ERANGE; 75 + 76 + /* byte 7 is Hamming ECC, byte 8-14 are BCH ECC */ 77 + oobregion->offset = 7; 78 + oobregion->length = 8; 79 + 80 + return 0; 81 + } 82 + 83 + static int docg3_ooblayout_free(struct mtd_info *mtd, int section, 84 + struct mtd_oob_region *oobregion) 85 + { 86 + if (section > 1) 87 + return -ERANGE; 88 + 89 + /* free bytes: byte 0 until byte 6, byte 15 */ 90 + if (!section) { 91 + oobregion->offset = 0; 92 + oobregion->length = 7; 93 + } else { 94 + oobregion->offset = 15; 95 + oobregion->length = 1; 96 + } 97 + 98 + return 0; 99 + } 100 + 101 + static const struct mtd_ooblayout_ops nand_ooblayout_docg3_ops = { 102 + .ecc = docg3_ooblayout_ecc, 103 + .free = docg3_ooblayout_free, 80 104 }; 81 105 82 106 static inline u8 doc_readb(struct docg3 *docg3, u16 reg) ··· 1881 1857 mtd->_read_oob = doc_read_oob; 1882 1858 mtd->_write_oob = doc_write_oob; 1883 1859 mtd->_block_isbad = doc_block_isbad; 1884 - mtd->ecclayout = &docg3_oobinfo; 1860 + mtd_set_ooblayout(mtd, &nand_ooblayout_docg3_ops); 1885 1861 mtd->oobavail = 8; 1886 1862 mtd->ecc_strength = DOC_ECC_BCH_T; 1887 1863
+22
drivers/mtd/devices/m25p80.c
··· 131 131 /* convert the dummy cycles to the number of bytes */ 132 132 dummy /= 8; 133 133 134 + if (spi_flash_read_supported(spi)) { 135 + struct spi_flash_read_message msg; 136 + int ret; 137 + 138 + memset(&msg, 0, sizeof(msg)); 139 + 140 + msg.buf = buf; 141 + msg.from = from; 142 + msg.len = len; 143 + msg.read_opcode = nor->read_opcode; 144 + msg.addr_width = nor->addr_width; 145 + msg.dummy_bytes = dummy; 146 + /* TODO: Support other combinations */ 147 + msg.opcode_nbits = SPI_NBITS_SINGLE; 148 + msg.addr_nbits = SPI_NBITS_SINGLE; 149 + msg.data_nbits = m25p80_rx_nbits(nor); 150 + 151 + ret = spi_flash_read(spi, &msg); 152 + *retlen = msg.retlen; 153 + return ret; 154 + } 155 + 134 156 spi_message_init(&m); 135 157 memset(t, 0, (sizeof t)); 136 158
+1 -1
drivers/mtd/devices/pmc551.c
··· 353 353 * mechanism 354 354 * returns the size of the memory region found. 355 355 */ 356 - static int fixup_pmc551(struct pci_dev *dev) 356 + static int __init fixup_pmc551(struct pci_dev *dev) 357 357 { 358 358 #ifdef CONFIG_MTD_PMC551_BUGFIX 359 359 u32 dram_data;
+2 -2
drivers/mtd/maps/ck804xrom.c
··· 112 112 } 113 113 114 114 115 - static int ck804xrom_init_one(struct pci_dev *pdev, 116 - const struct pci_device_id *ent) 115 + static int __init ck804xrom_init_one(struct pci_dev *pdev, 116 + const struct pci_device_id *ent) 117 117 { 118 118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 119 119 u8 byte;
+2 -2
drivers/mtd/maps/esb2rom.c
··· 144 144 pci_dev_put(window->pdev); 145 145 } 146 146 147 - static int esb2rom_init_one(struct pci_dev *pdev, 148 - const struct pci_device_id *ent) 147 + static int __init esb2rom_init_one(struct pci_dev *pdev, 148 + const struct pci_device_id *ent) 149 149 { 150 150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 151 151 struct esb2rom_window *window = &esb2rom_window;
+2 -2
drivers/mtd/maps/ichxrom.c
··· 84 84 } 85 85 86 86 87 - static int ichxrom_init_one(struct pci_dev *pdev, 88 - const struct pci_device_id *ent) 87 + static int __init ichxrom_init_one(struct pci_dev *pdev, 88 + const struct pci_device_id *ent) 89 89 { 90 90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 91 91 struct ichxrom_window *window = &ichxrom_window;
+4 -23
drivers/mtd/maps/uclinux.c
··· 4 4 * uclinux.c -- generic memory mapped MTD driver for uclinux 5 5 * 6 6 * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) 7 + * 8 + * License: GPL 7 9 */ 8 10 9 11 /****************************************************************************/ 10 12 11 - #include <linux/module.h> 13 + #include <linux/moduleparam.h> 12 14 #include <linux/types.h> 13 15 #include <linux/init.h> 14 16 #include <linux/kernel.h> ··· 119 117 120 118 return(0); 121 119 } 122 - 123 - /****************************************************************************/ 124 - 125 - static void __exit uclinux_mtd_cleanup(void) 126 - { 127 - if (uclinux_ram_mtdinfo) { 128 - mtd_device_unregister(uclinux_ram_mtdinfo); 129 - map_destroy(uclinux_ram_mtdinfo); 130 - uclinux_ram_mtdinfo = NULL; 131 - } 132 - if (uclinux_ram_map.virt) 133 - uclinux_ram_map.virt = 0; 134 - } 135 - 136 - /****************************************************************************/ 137 - 138 - module_init(uclinux_mtd_init); 139 - module_exit(uclinux_mtd_cleanup); 140 - 141 - MODULE_LICENSE("GPL"); 142 - MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>"); 143 - MODULE_DESCRIPTION("Generic MTD for uClinux"); 120 + device_initcall(uclinux_mtd_init); 144 121 145 122 /****************************************************************************/
+96 -27
drivers/mtd/mtdchar.c
··· 465 465 } 466 466 467 467 /* 468 - * Copies (and truncates, if necessary) data from the larger struct, 469 - * nand_ecclayout, to the smaller, deprecated layout struct, 470 - * nand_ecclayout_user. This is necessary only to support the deprecated 471 - * API ioctl ECCGETLAYOUT while allowing all new functionality to use 472 - * nand_ecclayout flexibly (i.e. the struct may change size in new 473 - * releases without requiring major rewrites). 468 + * Copies (and truncates, if necessary) OOB layout information to the 469 + * deprecated layout struct, nand_ecclayout_user. This is necessary only to 470 + * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 471 + * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 472 + * can describe any kind of OOB layout with almost zero overhead from a 473 + * memory usage point of view). 474 474 */ 475 - static int shrink_ecclayout(const struct nand_ecclayout *from, 476 - struct nand_ecclayout_user *to) 475 + static int shrink_ecclayout(struct mtd_info *mtd, 476 + struct nand_ecclayout_user *to) 477 477 { 478 - int i; 478 + struct mtd_oob_region oobregion; 479 + int i, section = 0, ret; 479 480 480 - if (!from || !to) 481 + if (!mtd || !to) 481 482 return -EINVAL; 482 483 483 484 memset(to, 0, sizeof(*to)); 484 485 485 - to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); 486 - for (i = 0; i < to->eccbytes; i++) 487 - to->eccpos[i] = from->eccpos[i]; 486 + to->eccbytes = 0; 487 + for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 488 + u32 eccpos; 489 + 490 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion); 491 + if (ret < 0) { 492 + if (ret != -ERANGE) 493 + return ret; 494 + 495 + break; 496 + } 497 + 498 + eccpos = oobregion.offset; 499 + for (; i < MTD_MAX_ECCPOS_ENTRIES && 500 + eccpos < oobregion.offset + oobregion.length; i++) { 501 + to->eccpos[i] = eccpos++; 502 + to->eccbytes++; 503 + } 504 + } 488 505 489 506 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 490 - if (from->oobfree[i].length == 0 && 491 - from->oobfree[i].offset == 0) 507 + ret = mtd_ooblayout_free(mtd, i, &oobregion); 508 + if (ret < 0) { 509 + if (ret != -ERANGE) 510 + return ret; 511 + 492 512 break; 493 - to->oobavail += from->oobfree[i].length; 494 - to->oobfree[i] = from->oobfree[i]; 513 + } 514 + 515 + to->oobfree[i].offset = oobregion.offset; 516 + to->oobfree[i].length = oobregion.length; 517 + to->oobavail += to->oobfree[i].length; 495 518 } 519 + 520 + return 0; 521 + } 522 + 523 + static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 524 + { 525 + struct mtd_oob_region oobregion; 526 + int i, section = 0, ret; 527 + 528 + if (!mtd || !to) 529 + return -EINVAL; 530 + 531 + memset(to, 0, sizeof(*to)); 532 + 533 + to->eccbytes = 0; 534 + for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 535 + u32 eccpos; 536 + 537 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion); 538 + if (ret < 0) { 539 + if (ret != -ERANGE) 540 + return ret; 541 + 542 + break; 543 + } 544 + 545 + if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 546 + return -EINVAL; 547 + 548 + eccpos = oobregion.offset; 549 + for (; eccpos < oobregion.offset + oobregion.length; i++) { 550 + to->eccpos[i] = eccpos++; 551 + to->eccbytes++; 552 + } 553 + } 554 + 555 + for (i = 0; i < 8; i++) { 556 + ret = mtd_ooblayout_free(mtd, i, &oobregion); 557 + if (ret < 0) { 558 + if (ret != -ERANGE) 559 + return ret; 560 + 561 + break; 562 + } 563 + 564 + to->oobfree[i][0] = oobregion.offset; 565 + to->oobfree[i][1] = oobregion.length; 566 + } 567 + 568 + to->useecc = MTD_NANDECC_AUTOPLACE; 496 569 497 570 return 0; 498 571 } ··· 888 815 { 889 816 struct nand_oobinfo oi; 890 817 891 - if (!mtd->ecclayout) 818 + if (!mtd->ooblayout) 892 819 return -EOPNOTSUPP; 893 - if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 894 - return -EINVAL; 895 820 896 - oi.useecc = MTD_NANDECC_AUTOPLACE; 897 - memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 898 - memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 899 - sizeof(oi.oobfree)); 900 - oi.eccbytes = mtd->ecclayout->eccbytes; 821 + ret = get_oobinfo(mtd, &oi); 822 + if (ret) 823 + return ret; 901 824 902 825 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 903 826 return -EFAULT; ··· 982 913 { 983 914 struct nand_ecclayout_user *usrlay; 984 915 985 - if (!mtd->ecclayout) 916 + if (!mtd->ooblayout) 986 917 return -EOPNOTSUPP; 987 918 988 919 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 989 920 if (!usrlay) 990 921 return -ENOMEM; 991 922 992 - shrink_ecclayout(mtd->ecclayout, usrlay); 923 + shrink_ecclayout(mtd, usrlay); 993 924 994 925 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 995 926 ret = -EFAULT;
+1 -1
drivers/mtd/mtdconcat.c
··· 777 777 778 778 } 779 779 780 - concat->mtd.ecclayout = subdev[0]->ecclayout; 780 + mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout); 781 781 782 782 concat->num_subdev = num_devs; 783 783 concat->mtd.name = name;
+360
drivers/mtd/mtdcore.c
··· 1016 1016 } 1017 1017 EXPORT_SYMBOL_GPL(mtd_write_oob); 1018 1018 1019 + /** 1020 + * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section 1021 + * @mtd: MTD device structure 1022 + * @section: ECC section. Depending on the layout you may have all the ECC 1023 + * bytes stored in a single contiguous section, or one section 1024 + * per ECC chunk (and sometime several sections for a single ECC 1025 + * ECC chunk) 1026 + * @oobecc: OOB region struct filled with the appropriate ECC position 1027 + * information 1028 + * 1029 + * This functions return ECC section information in the OOB area. I you want 1030 + * to get all the ECC bytes information, then you should call 1031 + * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. 1032 + * 1033 + * Returns zero on success, a negative error code otherwise. 1034 + */ 1035 + int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1036 + struct mtd_oob_region *oobecc) 1037 + { 1038 + memset(oobecc, 0, sizeof(*oobecc)); 1039 + 1040 + if (!mtd || section < 0) 1041 + return -EINVAL; 1042 + 1043 + if (!mtd->ooblayout || !mtd->ooblayout->ecc) 1044 + return -ENOTSUPP; 1045 + 1046 + return mtd->ooblayout->ecc(mtd, section, oobecc); 1047 + } 1048 + EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1049 + 1050 + /** 1051 + * mtd_ooblayout_free - Get the OOB region definition of a specific free 1052 + * section 1053 + * @mtd: MTD device structure 1054 + * @section: Free section you are interested in. Depending on the layout 1055 + * you may have all the free bytes stored in a single contiguous 1056 + * section, or one section per ECC chunk plus an extra section 1057 + * for the remaining bytes (or other funky layout). 1058 + * @oobfree: OOB region struct filled with the appropriate free position 1059 + * information 1060 + * 1061 + * This functions return free bytes position in the OOB area. I you want 1062 + * to get all the free bytes information, then you should call 1063 + * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. 1064 + * 1065 + * Returns zero on success, a negative error code otherwise. 1066 + */ 1067 + int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1068 + struct mtd_oob_region *oobfree) 1069 + { 1070 + memset(oobfree, 0, sizeof(*oobfree)); 1071 + 1072 + if (!mtd || section < 0) 1073 + return -EINVAL; 1074 + 1075 + if (!mtd->ooblayout || !mtd->ooblayout->free) 1076 + return -ENOTSUPP; 1077 + 1078 + return mtd->ooblayout->free(mtd, section, oobfree); 1079 + } 1080 + EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1081 + 1082 + /** 1083 + * mtd_ooblayout_find_region - Find the region attached to a specific byte 1084 + * @mtd: mtd info structure 1085 + * @byte: the byte we are searching for 1086 + * @sectionp: pointer where the section id will be stored 1087 + * @oobregion: used to retrieve the ECC position 1088 + * @iter: iterator function. Should be either mtd_ooblayout_free or 1089 + * mtd_ooblayout_ecc depending on the region type you're searching for 1090 + * 1091 + * This functions returns the section id and oobregion information of a 1092 + * specific byte. For example, say you want to know where the 4th ECC byte is 1093 + * stored, you'll use: 1094 + * 1095 + * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc); 1096 + * 1097 + * Returns zero on success, a negative error code otherwise. 1098 + */ 1099 + static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, 1100 + int *sectionp, struct mtd_oob_region *oobregion, 1101 + int (*iter)(struct mtd_info *, 1102 + int section, 1103 + struct mtd_oob_region *oobregion)) 1104 + { 1105 + int pos = 0, ret, section = 0; 1106 + 1107 + memset(oobregion, 0, sizeof(*oobregion)); 1108 + 1109 + while (1) { 1110 + ret = iter(mtd, section, oobregion); 1111 + if (ret) 1112 + return ret; 1113 + 1114 + if (pos + oobregion->length > byte) 1115 + break; 1116 + 1117 + pos += oobregion->length; 1118 + section++; 1119 + } 1120 + 1121 + /* 1122 + * Adjust region info to make it start at the beginning at the 1123 + * 'start' ECC byte. 1124 + */ 1125 + oobregion->offset += byte - pos; 1126 + oobregion->length -= byte - pos; 1127 + *sectionp = section; 1128 + 1129 + return 0; 1130 + } 1131 + 1132 + /** 1133 + * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific 1134 + * ECC byte 1135 + * @mtd: mtd info structure 1136 + * @eccbyte: the byte we are searching for 1137 + * @sectionp: pointer where the section id will be stored 1138 + * @oobregion: OOB region information 1139 + * 1140 + * Works like mtd_ooblayout_find_region() except it searches for a specific ECC 1141 + * byte. 1142 + * 1143 + * Returns zero on success, a negative error code otherwise. 1144 + */ 1145 + int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 1146 + int *section, 1147 + struct mtd_oob_region *oobregion) 1148 + { 1149 + return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, 1150 + mtd_ooblayout_ecc); 1151 + } 1152 + EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); 1153 + 1154 + /** 1155 + * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer 1156 + * @mtd: mtd info structure 1157 + * @buf: destination buffer to store OOB bytes 1158 + * @oobbuf: OOB buffer 1159 + * @start: first byte to retrieve 1160 + * @nbytes: number of bytes to retrieve 1161 + * @iter: section iterator 1162 + * 1163 + * Extract bytes attached to a specific category (ECC or free) 1164 + * from the OOB buffer and copy them into buf. 1165 + * 1166 + * Returns zero on success, a negative error code otherwise. 1167 + */ 1168 + static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, 1169 + const u8 *oobbuf, int start, int nbytes, 1170 + int (*iter)(struct mtd_info *, 1171 + int section, 1172 + struct mtd_oob_region *oobregion)) 1173 + { 1174 + struct mtd_oob_region oobregion = { }; 1175 + int section = 0, ret; 1176 + 1177 + ret = mtd_ooblayout_find_region(mtd, start, &section, 1178 + &oobregion, iter); 1179 + 1180 + while (!ret) { 1181 + int cnt; 1182 + 1183 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length; 1184 + memcpy(buf, oobbuf + oobregion.offset, cnt); 1185 + buf += cnt; 1186 + nbytes -= cnt; 1187 + 1188 + if (!nbytes) 1189 + break; 1190 + 1191 + ret = iter(mtd, ++section, &oobregion); 1192 + } 1193 + 1194 + return ret; 1195 + } 1196 + 1197 + /** 1198 + * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer 1199 + * @mtd: mtd info structure 1200 + * @buf: source buffer to get OOB bytes from 1201 + * @oobbuf: OOB buffer 1202 + * @start: first OOB byte to set 1203 + * @nbytes: number of OOB bytes to set 1204 + * @iter: section iterator 1205 + * 1206 + * Fill the OOB buffer with data provided in buf. The category (ECC or free) 1207 + * is selected by passing the appropriate iterator. 1208 + * 1209 + * Returns zero on success, a negative error code otherwise. 1210 + */ 1211 + static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, 1212 + u8 *oobbuf, int start, int nbytes, 1213 + int (*iter)(struct mtd_info *, 1214 + int section, 1215 + struct mtd_oob_region *oobregion)) 1216 + { 1217 + struct mtd_oob_region oobregion = { }; 1218 + int section = 0, ret; 1219 + 1220 + ret = mtd_ooblayout_find_region(mtd, start, &section, 1221 + &oobregion, iter); 1222 + 1223 + while (!ret) { 1224 + int cnt; 1225 + 1226 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length; 1227 + memcpy(oobbuf + oobregion.offset, buf, cnt); 1228 + buf += cnt; 1229 + nbytes -= cnt; 1230 + 1231 + if (!nbytes) 1232 + break; 1233 + 1234 + ret = iter(mtd, ++section, &oobregion); 1235 + } 1236 + 1237 + return ret; 1238 + } 1239 + 1240 + /** 1241 + * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category 1242 + * @mtd: mtd info structure 1243 + * @iter: category iterator 1244 + * 1245 + * Count the number of bytes in a given category. 1246 + * 1247 + * Returns a positive value on success, a negative error code otherwise. 1248 + */ 1249 + static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, 1250 + int (*iter)(struct mtd_info *, 1251 + int section, 1252 + struct mtd_oob_region *oobregion)) 1253 + { 1254 + struct mtd_oob_region oobregion = { }; 1255 + int section = 0, ret, nbytes = 0; 1256 + 1257 + while (1) { 1258 + ret = iter(mtd, section++, &oobregion); 1259 + if (ret) { 1260 + if (ret == -ERANGE) 1261 + ret = nbytes; 1262 + break; 1263 + } 1264 + 1265 + nbytes += oobregion.length; 1266 + } 1267 + 1268 + return ret; 1269 + } 1270 + 1271 + /** 1272 + * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer 1273 + * @mtd: mtd info structure 1274 + * @eccbuf: destination buffer to store ECC bytes 1275 + * @oobbuf: OOB buffer 1276 + * @start: first ECC byte to retrieve 1277 + * @nbytes: number of ECC bytes to retrieve 1278 + * 1279 + * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. 1280 + * 1281 + * Returns zero on success, a negative error code otherwise. 1282 + */ 1283 + int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 1284 + const u8 *oobbuf, int start, int nbytes) 1285 + { 1286 + return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1287 + mtd_ooblayout_ecc); 1288 + } 1289 + EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); 1290 + 1291 + /** 1292 + * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer 1293 + * @mtd: mtd info structure 1294 + * @eccbuf: source buffer to get ECC bytes from 1295 + * @oobbuf: OOB buffer 1296 + * @start: first ECC byte to set 1297 + * @nbytes: number of ECC bytes to set 1298 + * 1299 + * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. 1300 + * 1301 + * Returns zero on success, a negative error code otherwise. 1302 + */ 1303 + int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 1304 + u8 *oobbuf, int start, int nbytes) 1305 + { 1306 + return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1307 + mtd_ooblayout_ecc); 1308 + } 1309 + EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); 1310 + 1311 + /** 1312 + * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer 1313 + * @mtd: mtd info structure 1314 + * @databuf: destination buffer to store ECC bytes 1315 + * @oobbuf: OOB buffer 1316 + * @start: first ECC byte to retrieve 1317 + * @nbytes: number of ECC bytes to retrieve 1318 + * 1319 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 1320 + * 1321 + * Returns zero on success, a negative error code otherwise. 1322 + */ 1323 + int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 1324 + const u8 *oobbuf, int start, int nbytes) 1325 + { 1326 + return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, 1327 + mtd_ooblayout_free); 1328 + } 1329 + EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); 1330 + 1331 + /** 1332 + * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer 1333 + * @mtd: mtd info structure 1334 + * @eccbuf: source buffer to get data bytes from 1335 + * @oobbuf: OOB buffer 1336 + * @start: first ECC byte to set 1337 + * @nbytes: number of ECC bytes to set 1338 + * 1339 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 1340 + * 1341 + * Returns zero on success, a negative error code otherwise. 1342 + */ 1343 + int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 1344 + u8 *oobbuf, int start, int nbytes) 1345 + { 1346 + return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, 1347 + mtd_ooblayout_free); 1348 + } 1349 + EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); 1350 + 1351 + /** 1352 + * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB 1353 + * @mtd: mtd info structure 1354 + * 1355 + * Works like mtd_ooblayout_count_bytes(), except it count free bytes. 1356 + * 1357 + * Returns zero on success, a negative error code otherwise. 1358 + */ 1359 + int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) 1360 + { 1361 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); 1362 + } 1363 + EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); 1364 + 1365 + /** 1366 + * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB 1367 + * @mtd: mtd info structure 1368 + * 1369 + * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. 1370 + * 1371 + * Returns zero on success, a negative error code otherwise. 1372 + */ 1373 + int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) 1374 + { 1375 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); 1376 + } 1377 + EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); 1378 + 1019 1379 /* 1020 1380 * Method to access the protection register area, present in some flash 1021 1381 * devices. The user data is one time programmable but the factory data is read
+22 -1
drivers/mtd/mtdpart.c
··· 317 317 return res; 318 318 } 319 319 320 + static int part_ooblayout_ecc(struct mtd_info *mtd, int section, 321 + struct mtd_oob_region *oobregion) 322 + { 323 + struct mtd_part *part = mtd_to_part(mtd); 324 + 325 + return mtd_ooblayout_ecc(part->master, section, oobregion); 326 + } 327 + 328 + static int part_ooblayout_free(struct mtd_info *mtd, int section, 329 + struct mtd_oob_region *oobregion) 330 + { 331 + struct mtd_part *part = mtd_to_part(mtd); 332 + 333 + return mtd_ooblayout_free(part->master, section, oobregion); 334 + } 335 + 336 + static const struct mtd_ooblayout_ops part_ooblayout_ops = { 337 + .ecc = part_ooblayout_ecc, 338 + .free = part_ooblayout_free, 339 + }; 340 + 320 341 static inline void free_partition(struct mtd_part *p) 321 342 { 322 343 kfree(p->mtd.name); ··· 554 533 part->name); 555 534 } 556 535 557 - slave->mtd.ecclayout = master->ecclayout; 536 + mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); 558 537 slave->mtd.ecc_step_size = master->ecc_step_size; 559 538 slave->mtd.ecc_strength = master->ecc_strength; 560 539 slave->mtd.bitflip_threshold = master->bitflip_threshold;
+1
drivers/mtd/nand/ams-delta.c
··· 224 224 /* 25 us command delay time */ 225 225 this->chip_delay = 30; 226 226 this->ecc.mode = NAND_ECC_SOFT; 227 + this->ecc.algo = NAND_ECC_HAMMING; 227 228 228 229 platform_set_drvdata(pdev, io_base); 229 230
+174 -141
drivers/mtd/nand/atmel_nand.c
··· 36 36 #include <linux/of.h> 37 37 #include <linux/of_device.h> 38 38 #include <linux/of_gpio.h> 39 - #include <linux/of_mtd.h> 40 39 #include <linux/mtd/mtd.h> 41 40 #include <linux/mtd/nand.h> 42 41 #include <linux/mtd/partitions.h> ··· 71 72 uint32_t rb_mask; 72 73 }; 73 74 74 - /* oob layout for large page size 75 + /* 76 + * oob layout for large page size 75 77 * bad block info is on bytes 0 and 1 76 78 * the bytes have to be consecutives to avoid 77 79 * several NAND_CMD_RNDOUT during read 78 - */ 79 - static struct nand_ecclayout atmel_oobinfo_large = { 80 - .eccbytes = 4, 81 - .eccpos = {60, 61, 62, 63}, 82 - .oobfree = { 83 - {2, 58} 84 - }, 85 - }; 86 - 87 - /* oob layout for small page size 80 + * 81 + * oob layout for small page size 88 82 * bad block info is on bytes 4 and 5 89 83 * the bytes have to be consecutives to avoid 90 84 * several NAND_CMD_RNDOUT during read 91 85 */ 92 - static struct nand_ecclayout atmel_oobinfo_small = { 93 - .eccbytes = 4, 94 - .eccpos = {0, 1, 2, 3}, 95 - .oobfree = { 96 - {6, 10} 97 - }, 86 + static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section, 87 + struct mtd_oob_region *oobregion) 88 + { 89 + if (section) 90 + return -ERANGE; 91 + 92 + oobregion->length = 4; 93 + oobregion->offset = 0; 94 + 95 + return 0; 96 + } 97 + 98 + static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section, 99 + struct mtd_oob_region *oobregion) 100 + { 101 + if (section) 102 + return -ERANGE; 103 + 104 + oobregion->offset = 6; 105 + oobregion->length = mtd->oobsize - oobregion->offset; 106 + 107 + return 0; 108 + } 109 + 110 + static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = { 111 + .ecc = atmel_ooblayout_ecc_sp, 112 + .free = atmel_ooblayout_free_sp, 98 113 }; 99 114 100 115 struct atmel_nfc { ··· 175 162 int *pmecc_dmu; 176 163 int *pmecc_delta; 177 164 }; 178 - 179 - static struct nand_ecclayout atmel_pmecc_oobinfo; 180 165 181 166 /* 182 167 * Enable NAND. ··· 445 434 static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 446 435 { 447 436 struct nand_chip *chip = mtd_to_nand(mtd); 448 - struct atmel_nand_host *host = nand_get_controller_data(chip); 449 437 450 438 if (use_dma && len > mtd->oobsize) 451 439 /* only use DMA for bigger than oob size: better performances */ 452 440 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 453 441 return; 454 442 455 - if (host->board.bus_width_16) 443 + if (chip->options & NAND_BUSWIDTH_16) 456 444 atmel_read_buf16(mtd, buf, len); 457 445 else 458 446 atmel_read_buf8(mtd, buf, len); ··· 460 450 static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 461 451 { 462 452 struct nand_chip *chip = mtd_to_nand(mtd); 463 - struct atmel_nand_host *host = nand_get_controller_data(chip); 464 453 465 454 if (use_dma && len > mtd->oobsize) 466 455 /* only use DMA for bigger than oob size: better performances */ 467 456 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 468 457 return; 469 458 470 - if (host->board.bus_width_16) 459 + if (chip->options & NAND_BUSWIDTH_16) 471 460 atmel_write_buf16(mtd, buf, len); 472 461 else 473 462 atmel_write_buf8(mtd, buf, len); ··· 490 481 { 491 482 int m = 12 + sector_size / 512; 492 483 return (m * cap + 7) / 8; 493 - } 494 - 495 - static void pmecc_config_ecc_layout(struct nand_ecclayout *layout, 496 - int oobsize, int ecc_len) 497 - { 498 - int i; 499 - 500 - layout->eccbytes = ecc_len; 501 - 502 - /* ECC will occupy the last ecc_len bytes continuously */ 503 - for (i = 0; i < ecc_len; i++) 504 - layout->eccpos[i] = oobsize - ecc_len + i; 505 - 506 - layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES; 507 - layout->oobfree[0].length = 508 - oobsize - ecc_len - layout->oobfree[0].offset; 509 484 } 510 485 511 486 static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) ··· 829 836 dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", 830 837 pos, bit_pos, err_byte, *(buf + byte_pos)); 831 838 } else { 839 + struct mtd_oob_region oobregion; 840 + 832 841 /* Bit flip in OOB area */ 833 842 tmp = sector_num * nand_chip->ecc.bytes 834 843 + (byte_pos - sector_size); 835 844 err_byte = ecc[tmp]; 836 845 ecc[tmp] ^= (1 << bit_pos); 837 846 838 - pos = tmp + nand_chip->ecc.layout->eccpos[0]; 847 + mtd_ooblayout_ecc(mtd, 0, &oobregion); 848 + pos = tmp + oobregion.offset; 839 849 dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", 840 850 pos, bit_pos, err_byte, ecc[tmp]); 841 851 } ··· 859 863 uint8_t *buf_pos; 860 864 int max_bitflips = 0; 861 865 862 - /* If can correct bitfilps from erased page, do the normal check */ 863 - if (host->caps->pmecc_correct_erase_page) 864 - goto normal_check; 865 - 866 - for (i = 0; i < nand_chip->ecc.total; i++) 867 - if (ecc[i] != 0xff) 868 - goto normal_check; 869 - /* Erased page, return OK */ 870 - return 0; 871 - 872 - normal_check: 873 866 for (i = 0; i < nand_chip->ecc.steps; i++) { 874 867 err_nbr = 0; 875 868 if (pmecc_stat & 0x1) { ··· 869 884 pmecc_get_sigma(mtd); 870 885 871 886 err_nbr = pmecc_err_location(mtd); 872 - if (err_nbr == -1) { 887 + if (err_nbr >= 0) { 888 + pmecc_correct_data(mtd, buf_pos, ecc, i, 889 + nand_chip->ecc.bytes, 890 + err_nbr); 891 + } else if (!host->caps->pmecc_correct_erase_page) { 892 + u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes); 893 + 894 + /* Try to detect erased pages */ 895 + err_nbr = nand_check_erased_ecc_chunk(buf_pos, 896 + host->pmecc_sector_size, 897 + ecc_pos, 898 + nand_chip->ecc.bytes, 899 + NULL, 0, 900 + nand_chip->ecc.strength); 901 + } 902 + 903 + if (err_nbr < 0) { 873 904 dev_err(host->dev, "PMECC: Too many errors\n"); 874 905 mtd->ecc_stats.failed++; 875 906 return -EIO; 876 - } else { 877 - pmecc_correct_data(mtd, buf_pos, ecc, i, 878 - nand_chip->ecc.bytes, err_nbr); 879 - mtd->ecc_stats.corrected += err_nbr; 880 - max_bitflips = max_t(int, max_bitflips, err_nbr); 881 907 } 908 + 909 + mtd->ecc_stats.corrected += err_nbr; 910 + max_bitflips = max_t(int, max_bitflips, err_nbr); 882 911 } 883 912 pmecc_stat >>= 1; 884 913 } ··· 930 931 struct atmel_nand_host *host = nand_get_controller_data(chip); 931 932 int eccsize = chip->ecc.size * chip->ecc.steps; 932 933 uint8_t *oob = chip->oob_poi; 933 - uint32_t *eccpos = chip->ecc.layout->eccpos; 934 934 uint32_t stat; 935 935 unsigned long end_time; 936 936 int bitflips = 0; ··· 951 953 952 954 stat = pmecc_readl_relaxed(host->ecc, ISR); 953 955 if (stat != 0) { 954 - bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]); 956 + struct mtd_oob_region oobregion; 957 + 958 + mtd_ooblayout_ecc(mtd, 0, &oobregion); 959 + bitflips = pmecc_correction(mtd, stat, buf, 960 + &oob[oobregion.offset]); 955 961 if (bitflips < 0) 956 962 /* uncorrectable errors */ 957 963 return 0; ··· 969 967 int page) 970 968 { 971 969 struct atmel_nand_host *host = nand_get_controller_data(chip); 972 - uint32_t *eccpos = chip->ecc.layout->eccpos; 973 - int i, j; 970 + struct mtd_oob_region oobregion = { }; 971 + int i, j, section = 0; 974 972 unsigned long end_time; 975 973 976 974 if (!host->nfc || !host->nfc->write_by_sram) { ··· 989 987 990 988 for (i = 0; i < chip->ecc.steps; i++) { 991 989 for (j = 0; j < chip->ecc.bytes; j++) { 992 - int pos; 990 + if (!oobregion.length) 991 + mtd_ooblayout_ecc(mtd, section, &oobregion); 993 992 994 - pos = i * chip->ecc.bytes + j; 995 - chip->oob_poi[eccpos[pos]] = 993 + chip->oob_poi[oobregion.offset] = 996 994 pmecc_readb_ecc_relaxed(host->ecc, i, j); 995 + oobregion.length--; 996 + oobregion.offset++; 997 + section++; 997 998 } 998 999 } 999 1000 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); ··· 1008 1003 { 1009 1004 struct nand_chip *nand_chip = mtd_to_nand(mtd); 1010 1005 struct atmel_nand_host *host = nand_get_controller_data(nand_chip); 1006 + int eccbytes = mtd_ooblayout_count_eccbytes(mtd); 1011 1007 uint32_t val = 0; 1012 - struct nand_ecclayout *ecc_layout; 1008 + struct mtd_oob_region oobregion; 1013 1009 1014 1010 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); 1015 1011 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); ··· 1060 1054 | PMECC_CFG_AUTO_DISABLE); 1061 1055 pmecc_writel(host->ecc, CFG, val); 1062 1056 1063 - ecc_layout = nand_chip->ecc.layout; 1064 1057 pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1); 1065 - pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]); 1058 + mtd_ooblayout_ecc(mtd, 0, &oobregion); 1059 + pmecc_writel(host->ecc, SADDR, oobregion.offset); 1066 1060 pmecc_writel(host->ecc, EADDR, 1067 - ecc_layout->eccpos[ecc_layout->eccbytes - 1]); 1061 + oobregion.offset + eccbytes - 1); 1068 1062 /* See datasheet about PMECC Clock Control Register */ 1069 1063 pmecc_writel(host->ecc, CLK, 2); 1070 1064 pmecc_writel(host->ecc, IDR, 0xff); ··· 1212 1206 dev_warn(host->dev, 1213 1207 "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n"); 1214 1208 nand_chip->ecc.mode = NAND_ECC_SOFT; 1209 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 1215 1210 return 0; 1216 1211 } 1217 1212 ··· 1287 1280 err_no = -EINVAL; 1288 1281 goto err; 1289 1282 } 1290 - pmecc_config_ecc_layout(&atmel_pmecc_oobinfo, 1291 - mtd->oobsize, 1292 - nand_chip->ecc.total); 1293 1283 1294 - nand_chip->ecc.layout = &atmel_pmecc_oobinfo; 1284 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 1295 1285 break; 1296 1286 default: 1297 1287 dev_warn(host->dev, ··· 1296 1292 /* page size not handled by HW ECC */ 1297 1293 /* switching back to soft ECC */ 1298 1294 nand_chip->ecc.mode = NAND_ECC_SOFT; 1295 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 1299 1296 return 0; 1300 1297 } 1301 1298 ··· 1364 1359 { 1365 1360 int eccsize = chip->ecc.size; 1366 1361 int eccbytes = chip->ecc.bytes; 1367 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1368 1362 uint8_t *p = buf; 1369 1363 uint8_t *oob = chip->oob_poi; 1370 1364 uint8_t *ecc_pos; 1371 1365 int stat; 1372 1366 unsigned int max_bitflips = 0; 1367 + struct mtd_oob_region oobregion = {}; 1373 1368 1374 1369 /* 1375 1370 * Errata: ALE is incorrectly wired up to the ECC controller ··· 1387 1382 chip->read_buf(mtd, p, eccsize); 1388 1383 1389 1384 /* move to ECC position if needed */ 1390 - if (eccpos[0] != 0) { 1391 - /* This only works on large pages 1392 - * because the ECC controller waits for 1393 - * NAND_CMD_RNDOUTSTART after the 1394 - * NAND_CMD_RNDOUT. 1395 - * anyway, for small pages, the eccpos[0] == 0 1385 + mtd_ooblayout_ecc(mtd, 0, &oobregion); 1386 + if (oobregion.offset != 0) { 1387 + /* 1388 + * This only works on large pages because the ECC controller 1389 + * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT. 1390 + * Anyway, for small pages, the first ECC byte is at offset 1391 + * 0 in the OOB area. 1396 1392 */ 1397 1393 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 1398 - mtd->writesize + eccpos[0], -1); 1394 + mtd->writesize + oobregion.offset, -1); 1399 1395 } 1400 1396 1401 1397 /* the ECC controller needs to read the ECC just after the data */ 1402 - ecc_pos = oob + eccpos[0]; 1398 + ecc_pos = oob + oobregion.offset; 1403 1399 chip->read_buf(mtd, ecc_pos, eccbytes); 1404 1400 1405 1401 /* check if there's an error */ ··· 1510 1504 ecc_writel(host->ecc, CR, ATMEL_ECC_RST); 1511 1505 } 1512 1506 1513 - static int atmel_of_init_port(struct atmel_nand_host *host, 1514 - struct device_node *np) 1507 + static int atmel_of_init_ecc(struct atmel_nand_host *host, 1508 + struct device_node *np) 1515 1509 { 1516 - u32 val; 1517 1510 u32 offset[2]; 1518 - int ecc_mode; 1519 - struct atmel_nand_data *board = &host->board; 1520 - enum of_gpio_flags flags = 0; 1521 - 1522 - host->caps = (struct atmel_nand_caps *) 1523 - of_device_get_match_data(host->dev); 1524 - 1525 - if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { 1526 - if (val >= 32) { 1527 - dev_err(host->dev, "invalid addr-offset %u\n", val); 1528 - return -EINVAL; 1529 - } 1530 - board->ale = val; 1531 - } 1532 - 1533 - if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) { 1534 - if (val >= 32) { 1535 - dev_err(host->dev, "invalid cmd-offset %u\n", val); 1536 - return -EINVAL; 1537 - } 1538 - board->cle = val; 1539 - } 1540 - 1541 - ecc_mode = of_get_nand_ecc_mode(np); 1542 - 1543 - board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode; 1544 - 1545 - board->on_flash_bbt = of_get_nand_on_flash_bbt(np); 1546 - 1547 - board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma"); 1548 - 1549 - if (of_get_nand_bus_width(np) == 16) 1550 - board->bus_width_16 = 1; 1551 - 1552 - board->rdy_pin = of_get_gpio_flags(np, 0, &flags); 1553 - board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW); 1554 - 1555 - board->enable_pin = of_get_gpio(np, 1); 1556 - board->det_pin = of_get_gpio(np, 2); 1511 + u32 val; 1557 1512 1558 1513 host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc"); 1559 1514 1560 - /* load the nfc driver if there is */ 1561 - of_platform_populate(np, NULL, NULL, host->dev); 1562 - 1563 - if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc) 1564 - return 0; /* Not using PMECC */ 1515 + /* Not using PMECC */ 1516 + if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc) 1517 + return 0; 1565 1518 1566 1519 /* use PMECC, get correction capability, sector size and lookup 1567 1520 * table offset. ··· 1561 1596 /* Will build a lookup table and initialize the offset later */ 1562 1597 return 0; 1563 1598 } 1599 + 1564 1600 if (!offset[0] && !offset[1]) { 1565 1601 dev_err(host->dev, "Invalid PMECC lookup table offset\n"); 1566 1602 return -EINVAL; 1567 1603 } 1604 + 1568 1605 host->pmecc_lookup_table_offset_512 = offset[0]; 1569 1606 host->pmecc_lookup_table_offset_1024 = offset[1]; 1607 + 1608 + return 0; 1609 + } 1610 + 1611 + static int atmel_of_init_port(struct atmel_nand_host *host, 1612 + struct device_node *np) 1613 + { 1614 + u32 val; 1615 + struct atmel_nand_data *board = &host->board; 1616 + enum of_gpio_flags flags = 0; 1617 + 1618 + host->caps = (struct atmel_nand_caps *) 1619 + of_device_get_match_data(host->dev); 1620 + 1621 + if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { 1622 + if (val >= 32) { 1623 + dev_err(host->dev, "invalid addr-offset %u\n", val); 1624 + return -EINVAL; 1625 + } 1626 + board->ale = val; 1627 + } 1628 + 1629 + if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) { 1630 + if (val >= 32) { 1631 + dev_err(host->dev, "invalid cmd-offset %u\n", val); 1632 + return -EINVAL; 1633 + } 1634 + board->cle = val; 1635 + } 1636 + 1637 + board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma"); 1638 + 1639 + board->rdy_pin = of_get_gpio_flags(np, 0, &flags); 1640 + board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW); 1641 + 1642 + board->enable_pin = of_get_gpio(np, 1); 1643 + board->det_pin = of_get_gpio(np, 2); 1644 + 1645 + /* load the nfc driver if there is */ 1646 + of_platform_populate(np, NULL, NULL, host->dev); 1647 + 1648 + /* 1649 + * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value 1650 + * even if the nand-ecc-mode property is not defined. 1651 + */ 1652 + host->nand_chip.ecc.mode = NAND_ECC_SOFT; 1653 + host->nand_chip.ecc.algo = NAND_ECC_HAMMING; 1570 1654 1571 1655 return 0; 1572 1656 } ··· 1632 1618 dev_err(host->dev, 1633 1619 "Can't get I/O resource regs, use software ECC\n"); 1634 1620 nand_chip->ecc.mode = NAND_ECC_SOFT; 1621 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 1635 1622 return 0; 1636 1623 } 1637 1624 ··· 1646 1631 /* set ECC page size and oob layout */ 1647 1632 switch (mtd->writesize) { 1648 1633 case 512: 1649 - nand_chip->ecc.layout = &atmel_oobinfo_small; 1634 + mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops); 1650 1635 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); 1651 1636 break; 1652 1637 case 1024: 1653 - nand_chip->ecc.layout = &atmel_oobinfo_large; 1638 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 1654 1639 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056); 1655 1640 break; 1656 1641 case 2048: 1657 - nand_chip->ecc.layout = &atmel_oobinfo_large; 1642 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 1658 1643 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112); 1659 1644 break; 1660 1645 case 4096: 1661 - nand_chip->ecc.layout = &atmel_oobinfo_large; 1646 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 1662 1647 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224); 1663 1648 break; 1664 1649 default: 1665 1650 /* page size not handled by HW ECC */ 1666 1651 /* switching back to soft ECC */ 1667 1652 nand_chip->ecc.mode = NAND_ECC_SOFT; 1653 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 1668 1654 return 0; 1669 1655 } 1670 1656 ··· 2163 2147 } else { 2164 2148 memcpy(&host->board, dev_get_platdata(&pdev->dev), 2165 2149 sizeof(struct atmel_nand_data)); 2150 + nand_chip->ecc.mode = host->board.ecc_mode; 2151 + 2152 + /* 2153 + * When using software ECC every supported avr32 board means 2154 + * Hamming algorithm. If that ever changes we'll need to add 2155 + * ecc_algo field to the struct atmel_nand_data. 2156 + */ 2157 + if (nand_chip->ecc.mode == NAND_ECC_SOFT) 2158 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 2159 + 2160 + /* 16-bit bus width */ 2161 + if (host->board.bus_width_16) 2162 + nand_chip->options |= NAND_BUSWIDTH_16; 2166 2163 } 2167 2164 2168 2165 /* link the private data structures */ ··· 2217 2188 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; 2218 2189 } 2219 2190 2220 - nand_chip->ecc.mode = host->board.ecc_mode; 2221 2191 nand_chip->chip_delay = 40; /* 40us command delay time */ 2222 2192 2223 - if (host->board.bus_width_16) /* 16-bit bus width */ 2224 - nand_chip->options |= NAND_BUSWIDTH_16; 2225 2193 2226 2194 nand_chip->read_buf = atmel_read_buf; 2227 2195 nand_chip->write_buf = atmel_write_buf; ··· 2251 2225 } 2252 2226 } 2253 2227 2254 - if (host->board.on_flash_bbt || on_flash_bbt) { 2255 - dev_info(&pdev->dev, "Use On Flash BBT\n"); 2256 - nand_chip->bbt_options |= NAND_BBT_USE_FLASH; 2257 - } 2258 - 2259 2228 if (!host->board.has_dma) 2260 2229 use_dma = 0; 2261 2230 ··· 2275 2254 if (nand_scan_ident(mtd, 1, NULL)) { 2276 2255 res = -ENXIO; 2277 2256 goto err_scan_ident; 2257 + } 2258 + 2259 + if (host->board.on_flash_bbt || on_flash_bbt) 2260 + nand_chip->bbt_options |= NAND_BBT_USE_FLASH; 2261 + 2262 + if (nand_chip->bbt_options & NAND_BBT_USE_FLASH) 2263 + dev_info(&pdev->dev, "Use On Flash BBT\n"); 2264 + 2265 + if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { 2266 + res = atmel_of_init_ecc(host, pdev->dev.of_node); 2267 + if (res) 2268 + goto err_hw_ecc; 2278 2269 } 2279 2270 2280 2271 if (nand_chip->ecc.mode == NAND_ECC_HW) {
+1
drivers/mtd/nand/au1550nd.c
··· 459 459 /* 30 us command delay time */ 460 460 this->chip_delay = 30; 461 461 this->ecc.mode = NAND_ECC_SOFT; 462 + this->ecc.algo = NAND_ECC_HAMMING; 462 463 463 464 if (pd->devwidth) 464 465 this->options |= NAND_BUSWIDTH_16;
+29 -23
drivers/mtd/nand/bf5xx_nand.c
··· 109 109 0}; 110 110 111 111 #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC 112 - static struct nand_ecclayout bootrom_ecclayout = { 113 - .eccbytes = 24, 114 - .eccpos = { 115 - 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2, 116 - 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2, 117 - 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2, 118 - 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2, 119 - 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2, 120 - 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2, 121 - 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2, 122 - 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2 123 - }, 124 - .oobfree = { 125 - { 0x8 * 0 + 3, 5 }, 126 - { 0x8 * 1 + 3, 5 }, 127 - { 0x8 * 2 + 3, 5 }, 128 - { 0x8 * 3 + 3, 5 }, 129 - { 0x8 * 4 + 3, 5 }, 130 - { 0x8 * 5 + 3, 5 }, 131 - { 0x8 * 6 + 3, 5 }, 132 - { 0x8 * 7 + 3, 5 }, 133 - } 112 + static int bootrom_ooblayout_ecc(struct mtd_info *mtd, int section, 113 + struct mtd_oob_region *oobregion) 114 + { 115 + if (section > 7) 116 + return -ERANGE; 117 + 118 + oobregion->offset = section * 8; 119 + oobregion->length = 3; 120 + 121 + return 0; 122 + } 123 + 124 + static int bootrom_ooblayout_free(struct mtd_info *mtd, int section, 125 + struct mtd_oob_region *oobregion) 126 + { 127 + if (section > 7) 128 + return -ERANGE; 129 + 130 + oobregion->offset = (section * 8) + 3; 131 + oobregion->length = 5; 132 + 133 + return 0; 134 + } 135 + 136 + static const struct mtd_ooblayout_ops bootrom_ooblayout_ops = { 137 + .ecc = bootrom_ooblayout_ecc, 138 + .free = bootrom_ooblayout_free, 134 139 }; 135 140 #endif 136 141 ··· 805 800 /* setup hardware ECC data struct */ 806 801 if (hardware_ecc) { 807 802 #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC 808 - chip->ecc.layout = &bootrom_ecclayout; 803 + mtd_set_ooblayout(mtd, &bootrom_ooblayout_ops); 809 804 #endif 810 805 chip->read_buf = bf5xx_nand_dma_read_buf; 811 806 chip->write_buf = bf5xx_nand_dma_write_buf; ··· 817 812 chip->ecc.write_page_raw = bf5xx_nand_write_page_raw; 818 813 } else { 819 814 chip->ecc.mode = NAND_ECC_SOFT; 815 + chip->ecc.algo = NAND_ECC_HAMMING; 820 816 } 821 817 822 818 /* scan hardware nand chip and setup mtd info data struct */
+187 -109
drivers/mtd/nand/brcmnand/brcmnand.c
··· 32 32 #include <linux/mtd/nand.h> 33 33 #include <linux/mtd/partitions.h> 34 34 #include <linux/of.h> 35 - #include <linux/of_mtd.h> 36 35 #include <linux/of_platform.h> 37 36 #include <linux/slab.h> 38 37 #include <linux/list.h> ··· 600 601 601 602 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) 602 603 { 603 - if (ctrl->nand_version < 0x0700) 604 + if (ctrl->nand_version < 0x0602) 604 605 return 24; 605 606 return 0; 606 607 } ··· 780 781 } 781 782 782 783 /* 783 - * Returns a nand_ecclayout strucutre for the given layout/configuration. 784 - * Returns NULL on failure. 784 + * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given 785 + * the layout/configuration. 786 + * Returns -ERRCODE on failure. 785 787 */ 786 - static struct nand_ecclayout *brcmnand_create_layout(int ecc_level, 787 - struct brcmnand_host *host) 788 + static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section, 789 + struct mtd_oob_region *oobregion) 788 790 { 791 + struct nand_chip *chip = mtd_to_nand(mtd); 792 + struct brcmnand_host *host = nand_get_controller_data(chip); 789 793 struct brcmnand_cfg *cfg = &host->hwcfg; 790 - int i, j; 791 - struct nand_ecclayout *layout; 792 - int req; 793 - int sectors; 794 - int sas; 795 - int idx1, idx2; 794 + int sas = cfg->spare_area_size << cfg->sector_size_1k; 795 + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 796 796 797 - layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL); 798 - if (!layout) 799 - return NULL; 797 + if (section >= sectors) 798 + return -ERANGE; 800 799 801 - sectors = cfg->page_size / (512 << cfg->sector_size_1k); 802 - sas = cfg->spare_area_size << cfg->sector_size_1k; 800 + oobregion->offset = (section * sas) + 6; 801 + oobregion->length = 3; 803 802 804 - /* Hamming */ 805 - if (is_hamming_ecc(cfg)) { 806 - for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) { 807 - /* First sector of each page may have BBI */ 808 - if (i == 0) { 809 - layout->oobfree[idx2].offset = i * sas + 1; 810 - /* Small-page NAND use byte 6 for BBI */ 811 - if (cfg->page_size == 512) 812 - layout->oobfree[idx2].offset--; 813 - layout->oobfree[idx2].length = 5; 814 - } else { 815 - layout->oobfree[idx2].offset = i * sas; 816 - layout->oobfree[idx2].length = 6; 817 - } 818 - idx2++; 819 - layout->eccpos[idx1++] = i * sas + 6; 820 - layout->eccpos[idx1++] = i * sas + 7; 821 - layout->eccpos[idx1++] = i * sas + 8; 822 - layout->oobfree[idx2].offset = i * sas + 9; 823 - layout->oobfree[idx2].length = 7; 824 - idx2++; 825 - /* Leave zero-terminated entry for OOBFREE */ 826 - if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE || 827 - idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) 828 - break; 803 + return 0; 804 + } 805 + 806 + static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, 807 + struct mtd_oob_region *oobregion) 808 + { 809 + struct nand_chip *chip = mtd_to_nand(mtd); 810 + struct brcmnand_host *host = nand_get_controller_data(chip); 811 + struct brcmnand_cfg *cfg = &host->hwcfg; 812 + int sas = cfg->spare_area_size << cfg->sector_size_1k; 813 + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 814 + 815 + if (section >= sectors * 2) 816 + return -ERANGE; 817 + 818 + oobregion->offset = (section / 2) * sas; 819 + 820 + if (section & 1) { 821 + oobregion->offset += 9; 822 + oobregion->length = 7; 823 + } else { 824 + oobregion->length = 6; 825 + 826 + /* First sector of each page may have BBI */ 827 + if (!section) { 828 + /* 829 + * Small-page NAND use byte 6 for BBI while large-page 830 + * NAND use byte 0. 831 + */ 832 + if (cfg->page_size > 512) 833 + oobregion->offset++; 834 + oobregion->length--; 829 835 } 836 + } 830 837 831 - return layout; 838 + return 0; 839 + } 840 + 841 + static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = { 842 + .ecc = brcmnand_hamming_ooblayout_ecc, 843 + .free = brcmnand_hamming_ooblayout_free, 844 + }; 845 + 846 + static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section, 847 + struct mtd_oob_region *oobregion) 848 + { 849 + struct nand_chip *chip = mtd_to_nand(mtd); 850 + struct brcmnand_host *host = nand_get_controller_data(chip); 851 + struct brcmnand_cfg *cfg = &host->hwcfg; 852 + int sas = cfg->spare_area_size << cfg->sector_size_1k; 853 + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 854 + 855 + if (section >= sectors) 856 + return -ERANGE; 857 + 858 + oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes; 859 + oobregion->length = chip->ecc.bytes; 860 + 861 + return 0; 862 + } 863 + 864 + static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section, 865 + struct mtd_oob_region *oobregion) 866 + { 867 + struct nand_chip *chip = mtd_to_nand(mtd); 868 + struct brcmnand_host *host = nand_get_controller_data(chip); 869 + struct brcmnand_cfg *cfg = &host->hwcfg; 870 + int sas = cfg->spare_area_size << cfg->sector_size_1k; 871 + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); 872 + 873 + if (section >= sectors) 874 + return -ERANGE; 875 + 876 + if (sas <= chip->ecc.bytes) 877 + return 0; 878 + 879 + oobregion->offset = section * sas; 880 + oobregion->length = sas - chip->ecc.bytes; 881 + 882 + if (!section) { 883 + oobregion->offset++; 884 + oobregion->length--; 885 + } 886 + 887 + return 0; 888 + } 889 + 890 + static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section, 891 + struct mtd_oob_region *oobregion) 892 + { 893 + struct nand_chip *chip = mtd_to_nand(mtd); 894 + struct brcmnand_host *host = nand_get_controller_data(chip); 895 + struct brcmnand_cfg *cfg = &host->hwcfg; 896 + int sas = cfg->spare_area_size << cfg->sector_size_1k; 897 + 898 + if (section > 1 || sas - chip->ecc.bytes < 6 || 899 + (section && sas - chip->ecc.bytes == 6)) 900 + return -ERANGE; 901 + 902 + if (!section) { 903 + oobregion->offset = 0; 904 + oobregion->length = 5; 905 + } else { 906 + oobregion->offset = 6; 907 + oobregion->length = sas - chip->ecc.bytes - 6; 908 + } 909 + 910 + return 0; 911 + } 912 + 913 + static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = { 914 + .ecc = brcmnand_bch_ooblayout_ecc, 915 + .free = brcmnand_bch_ooblayout_free_lp, 916 + }; 917 + 918 + static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = { 919 + .ecc = brcmnand_bch_ooblayout_ecc, 920 + .free = brcmnand_bch_ooblayout_free_sp, 921 + }; 922 + 923 + static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) 924 + { 925 + struct brcmnand_cfg *p = &host->hwcfg; 926 + struct mtd_info *mtd = nand_to_mtd(&host->chip); 927 + struct nand_ecc_ctrl *ecc = &host->chip.ecc; 928 + unsigned int ecc_level = p->ecc_level; 929 + int sas = p->spare_area_size << p->sector_size_1k; 930 + int sectors = p->page_size / (512 << p->sector_size_1k); 931 + 932 + if (p->sector_size_1k) 933 + ecc_level <<= 1; 934 + 935 + if (is_hamming_ecc(p)) { 936 + ecc->bytes = 3 * sectors; 937 + mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); 938 + return 0; 832 939 } 833 940 834 941 /* ··· 943 838 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8) 944 839 * But we will just be conservative. 945 840 */ 946 - req = DIV_ROUND_UP(ecc_level * 14, 8); 947 - if (req >= sas) { 841 + ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8); 842 + if (p->page_size == 512) 843 + mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops); 844 + else 845 + mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops); 846 + 847 + if (ecc->bytes >= sas) { 948 848 dev_err(&host->pdev->dev, 949 849 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", 950 - req, sas); 951 - return NULL; 850 + ecc->bytes, sas); 851 + return -EINVAL; 952 852 } 953 853 954 - layout->eccbytes = req * sectors; 955 - for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) { 956 - for (j = sas - req; j < sas && idx1 < 957 - MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++) 958 - layout->eccpos[idx1] = i * sas + j; 959 - 960 - /* First sector of each page may have BBI */ 961 - if (i == 0) { 962 - if (cfg->page_size == 512 && (sas - req >= 6)) { 963 - /* Small-page NAND use byte 6 for BBI */ 964 - layout->oobfree[idx2].offset = 0; 965 - layout->oobfree[idx2].length = 5; 966 - idx2++; 967 - if (sas - req > 6) { 968 - layout->oobfree[idx2].offset = 6; 969 - layout->oobfree[idx2].length = 970 - sas - req - 6; 971 - idx2++; 972 - } 973 - } else if (sas > req + 1) { 974 - layout->oobfree[idx2].offset = i * sas + 1; 975 - layout->oobfree[idx2].length = sas - req - 1; 976 - idx2++; 977 - } 978 - } else if (sas > req) { 979 - layout->oobfree[idx2].offset = i * sas; 980 - layout->oobfree[idx2].length = sas - req; 981 - idx2++; 982 - } 983 - /* Leave zero-terminated entry for OOBFREE */ 984 - if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE || 985 - idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) 986 - break; 987 - } 988 - 989 - return layout; 990 - } 991 - 992 - static struct nand_ecclayout *brcmstb_choose_ecc_layout( 993 - struct brcmnand_host *host) 994 - { 995 - struct nand_ecclayout *layout; 996 - struct brcmnand_cfg *p = &host->hwcfg; 997 - unsigned int ecc_level = p->ecc_level; 998 - 999 - if (p->sector_size_1k) 1000 - ecc_level <<= 1; 1001 - 1002 - layout = brcmnand_create_layout(ecc_level, host); 1003 - if (!layout) { 1004 - dev_err(&host->pdev->dev, 1005 - "no proper ecc_layout for this NAND cfg\n"); 1006 - return NULL; 1007 - } 1008 - 1009 - return layout; 854 + return 0; 1010 855 } 1011 856 1012 857 static void brcmnand_wp(struct mtd_info *mtd, int wp) ··· 1925 1870 cfg->col_adr_bytes = 2; 1926 1871 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize); 1927 1872 1873 + if (chip->ecc.mode != NAND_ECC_HW) { 1874 + dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", 1875 + chip->ecc.mode); 1876 + return -EINVAL; 1877 + } 1878 + 1879 + if (chip->ecc.algo == NAND_ECC_UNKNOWN) { 1880 + if (chip->ecc.strength == 1 && chip->ecc.size == 512) 1881 + /* Default to Hamming for 1-bit ECC, if unspecified */ 1882 + chip->ecc.algo = NAND_ECC_HAMMING; 1883 + else 1884 + /* Otherwise, BCH */ 1885 + chip->ecc.algo = NAND_ECC_BCH; 1886 + } 1887 + 1888 + if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 || 1889 + chip->ecc.size != 512)) { 1890 + dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", 1891 + chip->ecc.strength, chip->ecc.size); 1892 + return -EINVAL; 1893 + } 1894 + 1928 1895 switch (chip->ecc.size) { 1929 1896 case 512: 1930 - if (chip->ecc.strength == 1) /* Hamming */ 1897 + if (chip->ecc.algo == NAND_ECC_HAMMING) 1931 1898 cfg->ecc_level = 15; 1932 1899 else 1933 1900 cfg->ecc_level = chip->ecc.strength; ··· 2078 2001 */ 2079 2002 chip->options |= NAND_USE_BOUNCE_BUFFER; 2080 2003 2081 - if (of_get_nand_on_flash_bbt(dn)) 2082 - chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 2004 + if (chip->bbt_options & NAND_BBT_USE_FLASH) 2005 + chip->bbt_options |= NAND_BBT_NO_OOB; 2083 2006 2084 2007 if (brcmnand_setup_dev(host)) 2085 2008 return -ENXIO; ··· 2088 2011 /* only use our internal HW threshold */ 2089 2012 mtd->bitflip_threshold = 1; 2090 2013 2091 - chip->ecc.layout = brcmstb_choose_ecc_layout(host); 2092 - if (!chip->ecc.layout) 2093 - return -ENXIO; 2014 + ret = brcmstb_choose_ecc_layout(host); 2015 + if (ret) 2016 + return ret; 2094 2017 2095 2018 if (nand_scan_tail(mtd)) 2096 2019 return -ENXIO; ··· 2192 2115 { .compatible = "brcm,brcmnand-v5.0" }, 2193 2116 { .compatible = "brcm,brcmnand-v6.0" }, 2194 2117 { .compatible = "brcm,brcmnand-v6.1" }, 2118 + { .compatible = "brcm,brcmnand-v6.2" }, 2195 2119 { .compatible = "brcm,brcmnand-v7.0" }, 2196 2120 { .compatible = "brcm,brcmnand-v7.1" }, 2197 2121 {},
+32 -12
drivers/mtd/nand/cafe_nand.c
··· 459 459 return max_bitflips; 460 460 } 461 461 462 - static struct nand_ecclayout cafe_oobinfo_2048 = { 463 - .eccbytes = 14, 464 - .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 465 - .oobfree = {{14, 50}} 462 + static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section, 463 + struct mtd_oob_region *oobregion) 464 + { 465 + struct nand_chip *chip = mtd_to_nand(mtd); 466 + 467 + if (section) 468 + return -ERANGE; 469 + 470 + oobregion->offset = 0; 471 + oobregion->length = chip->ecc.total; 472 + 473 + return 0; 474 + } 475 + 476 + static int cafe_ooblayout_free(struct mtd_info *mtd, int section, 477 + struct mtd_oob_region *oobregion) 478 + { 479 + struct nand_chip *chip = mtd_to_nand(mtd); 480 + 481 + if (section) 482 + return -ERANGE; 483 + 484 + oobregion->offset = chip->ecc.total; 485 + oobregion->length = mtd->oobsize - chip->ecc.total; 486 + 487 + return 0; 488 + } 489 + 490 + static const struct mtd_ooblayout_ops cafe_ooblayout_ops = { 491 + .ecc = cafe_ooblayout_ecc, 492 + .free = cafe_ooblayout_free, 466 493 }; 467 494 468 495 /* Ick. The BBT code really ought to be able to work this bit out ··· 519 492 .veroffs = 18, 520 493 .maxblocks = 4, 521 494 .pattern = cafe_mirror_pattern_2048 522 - }; 523 - 524 - static struct nand_ecclayout cafe_oobinfo_512 = { 525 - .eccbytes = 14, 526 - .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 527 - .oobfree = {{14, 2}} 528 495 }; 529 496 530 497 static struct nand_bbt_descr cafe_bbt_main_descr_512 = { ··· 764 743 cafe->ctl2 |= 1<<29; /* 2KiB page size */ 765 744 766 745 /* Set up ECC according to the type of chip we found */ 746 + mtd_set_ooblayout(mtd, &cafe_ooblayout_ops); 767 747 if (mtd->writesize == 2048) { 768 - cafe->nand.ecc.layout = &cafe_oobinfo_2048; 769 748 cafe->nand.bbt_td = &cafe_bbt_main_descr_2048; 770 749 cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048; 771 750 } else if (mtd->writesize == 512) { 772 - cafe->nand.ecc.layout = &cafe_oobinfo_512; 773 751 cafe->nand.bbt_td = &cafe_bbt_main_descr_512; 774 752 cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512; 775 753 } else {
+1
drivers/mtd/nand/cmx270_nand.c
··· 187 187 /* 15 us command delay time */ 188 188 this->chip_delay = 20; 189 189 this->ecc.mode = NAND_ECC_SOFT; 190 + this->ecc.algo = NAND_ECC_HAMMING; 190 191 191 192 /* read/write functions */ 192 193 this->read_byte = cmx270_read_byte;
+91 -119
drivers/mtd/nand/davinci_nand.c
··· 34 34 #include <linux/slab.h> 35 35 #include <linux/of_device.h> 36 36 #include <linux/of.h> 37 - #include <linux/of_mtd.h> 38 37 39 38 #include <linux/platform_data/mtd-davinci.h> 40 39 #include <linux/platform_data/mtd-davinci-aemif.h> ··· 53 54 */ 54 55 struct davinci_nand_info { 55 56 struct nand_chip chip; 56 - struct nand_ecclayout ecclayout; 57 57 58 58 struct device *dev; 59 59 struct clk *clk; ··· 478 480 * ten ECC bytes plus the manufacturer's bad block marker byte, and 479 481 * and not overlapping the default BBT markers. 480 482 */ 481 - static struct nand_ecclayout hwecc4_small = { 482 - .eccbytes = 10, 483 - .eccpos = { 0, 1, 2, 3, 4, 484 - /* offset 5 holds the badblock marker */ 485 - 6, 7, 486 - 13, 14, 15, }, 487 - .oobfree = { 488 - {.offset = 8, .length = 5, }, 489 - {.offset = 16, }, 490 - }, 491 - }; 483 + static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section, 484 + struct mtd_oob_region *oobregion) 485 + { 486 + if (section > 2) 487 + return -ERANGE; 492 488 493 - /* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash, 494 - * storing ten ECC bytes plus the manufacturer's bad block marker byte, 495 - * and not overlapping the default BBT markers. 496 - */ 497 - static struct nand_ecclayout hwecc4_2048 = { 498 - .eccbytes = 40, 499 - .eccpos = { 500 - /* at the end of spare sector */ 501 - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 502 - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 503 - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 504 - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 505 - }, 506 - .oobfree = { 507 - /* 2 bytes at offset 0 hold manufacturer badblock markers */ 508 - {.offset = 2, .length = 22, }, 509 - /* 5 bytes at offset 8 hold BBT markers */ 510 - /* 8 bytes at offset 16 hold JFFS2 clean markers */ 511 - }, 512 - }; 489 + if (!section) { 490 + oobregion->offset = 0; 491 + oobregion->length = 5; 492 + } else if (section == 1) { 493 + oobregion->offset = 6; 494 + oobregion->length = 2; 495 + } else { 496 + oobregion->offset = 13; 497 + oobregion->length = 3; 498 + } 513 499 514 - /* 515 - * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash, 516 - * storing ten ECC bytes plus the manufacturer's bad block marker byte, 517 - * and not overlapping the default BBT markers. 518 - */ 519 - static struct nand_ecclayout hwecc4_4096 = { 520 - .eccbytes = 80, 521 - .eccpos = { 522 - /* at the end of spare sector */ 523 - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 524 - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 525 - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 526 - 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 527 - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 528 - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 529 - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 530 - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 531 - }, 532 - .oobfree = { 533 - /* 2 bytes at offset 0 hold manufacturer badblock markers */ 534 - {.offset = 2, .length = 46, }, 535 - /* 5 bytes at offset 8 hold BBT markers */ 536 - /* 8 bytes at offset 16 hold JFFS2 clean markers */ 537 - }, 500 + return 0; 501 + } 502 + 503 + static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section, 504 + struct mtd_oob_region *oobregion) 505 + { 506 + if (section > 1) 507 + return -ERANGE; 508 + 509 + if (!section) { 510 + oobregion->offset = 8; 511 + oobregion->length = 5; 512 + } else { 513 + oobregion->offset = 16; 514 + oobregion->length = mtd->oobsize - 16; 515 + } 516 + 517 + return 0; 518 + } 519 + 520 + static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = { 521 + .ecc = hwecc4_ooblayout_small_ecc, 522 + .free = hwecc4_ooblayout_small_free, 538 523 }; 539 524 540 525 #if defined(CONFIG_OF) ··· 558 577 "ti,davinci-mask-chipsel", &prop)) 559 578 pdata->mask_chipsel = prop; 560 579 if (!of_property_read_string(pdev->dev.of_node, 561 - "nand-ecc-mode", &mode) || 562 - !of_property_read_string(pdev->dev.of_node, 563 580 "ti,davinci-ecc-mode", &mode)) { 564 581 if (!strncmp("none", mode, 4)) 565 582 pdata->ecc_mode = NAND_ECC_NONE; ··· 570 591 "ti,davinci-ecc-bits", &prop)) 571 592 pdata->ecc_bits = prop; 572 593 573 - prop = of_get_nand_bus_width(pdev->dev.of_node); 574 - if (0 < prop || !of_property_read_u32(pdev->dev.of_node, 575 - "ti,davinci-nand-buswidth", &prop)) 576 - if (prop == 16) 577 - pdata->options |= NAND_BUSWIDTH_16; 594 + if (!of_property_read_u32(pdev->dev.of_node, 595 + "ti,davinci-nand-buswidth", &prop) && prop == 16) 596 + pdata->options |= NAND_BUSWIDTH_16; 597 + 578 598 if (of_property_read_bool(pdev->dev.of_node, 579 - "nand-on-flash-bbt") || 580 - of_property_read_bool(pdev->dev.of_node, 581 599 "ti,davinci-nand-use-bbt")) 582 600 pdata->bbt_options = NAND_BBT_USE_FLASH; 583 601 ··· 604 628 void __iomem *base; 605 629 int ret; 606 630 uint32_t val; 607 - nand_ecc_modes_t ecc_mode; 608 631 struct mtd_info *mtd; 609 632 610 633 pdata = nand_davinci_get_pdata(pdev); ··· 687 712 info->chip.write_buf = nand_davinci_write_buf; 688 713 689 714 /* Use board-specific ECC config */ 690 - ecc_mode = pdata->ecc_mode; 715 + info->chip.ecc.mode = pdata->ecc_mode; 691 716 692 717 ret = -EINVAL; 693 - switch (ecc_mode) { 718 + 719 + info->clk = devm_clk_get(&pdev->dev, "aemif"); 720 + if (IS_ERR(info->clk)) { 721 + ret = PTR_ERR(info->clk); 722 + dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); 723 + return ret; 724 + } 725 + 726 + ret = clk_prepare_enable(info->clk); 727 + if (ret < 0) { 728 + dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", 729 + ret); 730 + goto err_clk_enable; 731 + } 732 + 733 + spin_lock_irq(&davinci_nand_lock); 734 + 735 + /* put CSxNAND into NAND mode */ 736 + val = davinci_nand_readl(info, NANDFCR_OFFSET); 737 + val |= BIT(info->core_chipsel); 738 + davinci_nand_writel(info, NANDFCR_OFFSET, val); 739 + 740 + spin_unlock_irq(&davinci_nand_lock); 741 + 742 + /* Scan to find existence of the device(s) */ 743 + ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL); 744 + if (ret < 0) { 745 + dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 746 + goto err; 747 + } 748 + 749 + switch (info->chip.ecc.mode) { 694 750 case NAND_ECC_NONE: 751 + pdata->ecc_bits = 0; 752 + break; 695 753 case NAND_ECC_SOFT: 696 754 pdata->ecc_bits = 0; 755 + /* 756 + * This driver expects Hamming based ECC when ecc_mode is set 757 + * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to 758 + * avoid adding an extra ->ecc_algo field to 759 + * davinci_nand_pdata. 760 + */ 761 + info->chip.ecc.algo = NAND_ECC_HAMMING; 697 762 break; 698 763 case NAND_ECC_HW: 699 764 if (pdata->ecc_bits == 4) { ··· 769 754 default: 770 755 return -EINVAL; 771 756 } 772 - info->chip.ecc.mode = ecc_mode; 773 - 774 - info->clk = devm_clk_get(&pdev->dev, "aemif"); 775 - if (IS_ERR(info->clk)) { 776 - ret = PTR_ERR(info->clk); 777 - dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); 778 - return ret; 779 - } 780 - 781 - ret = clk_prepare_enable(info->clk); 782 - if (ret < 0) { 783 - dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", 784 - ret); 785 - goto err_clk_enable; 786 - } 787 - 788 - spin_lock_irq(&davinci_nand_lock); 789 - 790 - /* put CSxNAND into NAND mode */ 791 - val = davinci_nand_readl(info, NANDFCR_OFFSET); 792 - val |= BIT(info->core_chipsel); 793 - davinci_nand_writel(info, NANDFCR_OFFSET, val); 794 - 795 - spin_unlock_irq(&davinci_nand_lock); 796 - 797 - /* Scan to find existence of the device(s) */ 798 - ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL); 799 - if (ret < 0) { 800 - dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 801 - goto err; 802 - } 803 757 804 758 /* Update ECC layout if needed ... for 1-bit HW ECC, the default 805 759 * is OK, but it allocates 6 bytes when only 3 are needed (for ··· 789 805 * table marker fits in the free bytes. 790 806 */ 791 807 if (chunks == 1) { 792 - info->ecclayout = hwecc4_small; 793 - info->ecclayout.oobfree[1].length = mtd->oobsize - 16; 794 - goto syndrome_done; 795 - } 796 - if (chunks == 4) { 797 - info->ecclayout = hwecc4_2048; 808 + mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops); 809 + } else if (chunks == 4 || chunks == 8) { 810 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 798 811 info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; 799 - goto syndrome_done; 812 + } else { 813 + ret = -EIO; 814 + goto err; 800 815 } 801 - if (chunks == 8) { 802 - info->ecclayout = hwecc4_4096; 803 - info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; 804 - goto syndrome_done; 805 - } 806 - 807 - ret = -EIO; 808 - goto err; 809 - 810 - syndrome_done: 811 - info->chip.ecc.layout = &info->ecclayout; 812 816 } 813 817 814 818 ret = nand_scan_tail(mtd); ··· 822 850 823 851 err_clk_enable: 824 852 spin_lock_irq(&davinci_nand_lock); 825 - if (ecc_mode == NAND_ECC_HW_SYNDROME) 853 + if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) 826 854 ecc4_busy = false; 827 855 spin_unlock_irq(&davinci_nand_lock); 828 856 return ret;
+35 -15
drivers/mtd/nand/denali.c
··· 1374 1374 * correction 1375 1375 */ 1376 1376 #define ECC_8BITS 14 1377 - static struct nand_ecclayout nand_8bit_oob = { 1378 - .eccbytes = 14, 1379 - }; 1380 - 1381 1377 #define ECC_15BITS 26 1382 - static struct nand_ecclayout nand_15bit_oob = { 1383 - .eccbytes = 26, 1378 + 1379 + static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1380 + struct mtd_oob_region *oobregion) 1381 + { 1382 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1383 + struct nand_chip *chip = mtd_to_nand(mtd); 1384 + 1385 + if (section) 1386 + return -ERANGE; 1387 + 1388 + oobregion->offset = denali->bbtskipbytes; 1389 + oobregion->length = chip->ecc.total; 1390 + 1391 + return 0; 1392 + } 1393 + 1394 + static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1395 + struct mtd_oob_region *oobregion) 1396 + { 1397 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1398 + struct nand_chip *chip = mtd_to_nand(mtd); 1399 + 1400 + if (section) 1401 + return -ERANGE; 1402 + 1403 + oobregion->offset = chip->ecc.total + denali->bbtskipbytes; 1404 + oobregion->length = mtd->oobsize - oobregion->offset; 1405 + 1406 + return 0; 1407 + } 1408 + 1409 + static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1410 + .ecc = denali_ooblayout_ecc, 1411 + .free = denali_ooblayout_free, 1384 1412 }; 1385 1413 1386 1414 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; ··· 1589 1561 ECC_SECTOR_SIZE)))) { 1590 1562 /* if MLC OOB size is large enough, use 15bit ECC*/ 1591 1563 denali->nand.ecc.strength = 15; 1592 - denali->nand.ecc.layout = &nand_15bit_oob; 1593 1564 denali->nand.ecc.bytes = ECC_15BITS; 1594 1565 iowrite32(15, denali->flash_reg + ECC_CORRECTION); 1595 1566 } else if (mtd->oobsize < (denali->bbtskipbytes + ··· 1598 1571 goto failed_req_irq; 1599 1572 } else { 1600 1573 denali->nand.ecc.strength = 8; 1601 - denali->nand.ecc.layout = &nand_8bit_oob; 1602 1574 denali->nand.ecc.bytes = ECC_8BITS; 1603 1575 iowrite32(8, denali->flash_reg + ECC_CORRECTION); 1604 1576 } 1605 1577 1578 + mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1606 1579 denali->nand.ecc.bytes *= denali->devnum; 1607 1580 denali->nand.ecc.strength *= denali->devnum; 1608 - denali->nand.ecc.layout->eccbytes *= 1609 - mtd->writesize / ECC_SECTOR_SIZE; 1610 - denali->nand.ecc.layout->oobfree[0].offset = 1611 - denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes; 1612 - denali->nand.ecc.layout->oobfree[0].length = 1613 - mtd->oobsize - denali->nand.ecc.layout->eccbytes - 1614 - denali->bbtskipbytes; 1615 1581 1616 1582 /* 1617 1583 * Let driver know the total blocks number and how many blocks
+45 -15
drivers/mtd/nand/diskonchip.c
··· 950 950 951 951 //u_char mydatabuf[528]; 952 952 953 - /* The strange out-of-order .oobfree list below is a (possibly unneeded) 954 - * attempt to retain compatibility. It used to read: 955 - * .oobfree = { {8, 8} } 956 - * Since that leaves two bytes unusable, it was changed. But the following 957 - * scheme might affect existing jffs2 installs by moving the cleanmarker: 958 - * .oobfree = { {6, 10} } 959 - * jffs2 seems to handle the above gracefully, but the current scheme seems 960 - * safer. The only problem with it is that any code that parses oobfree must 961 - * be able to handle out-of-order segments. 962 - */ 963 - static struct nand_ecclayout doc200x_oobinfo = { 964 - .eccbytes = 6, 965 - .eccpos = {0, 1, 2, 3, 4, 5}, 966 - .oobfree = {{8, 8}, {6, 2}} 953 + static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section, 954 + struct mtd_oob_region *oobregion) 955 + { 956 + if (section) 957 + return -ERANGE; 958 + 959 + oobregion->offset = 0; 960 + oobregion->length = 6; 961 + 962 + return 0; 963 + } 964 + 965 + static int doc200x_ooblayout_free(struct mtd_info *mtd, int section, 966 + struct mtd_oob_region *oobregion) 967 + { 968 + if (section > 1) 969 + return -ERANGE; 970 + 971 + /* 972 + * The strange out-of-order free bytes definition is a (possibly 973 + * unneeded) attempt to retain compatibility. It used to read: 974 + * .oobfree = { {8, 8} } 975 + * Since that leaves two bytes unusable, it was changed. But the 976 + * following scheme might affect existing jffs2 installs by moving the 977 + * cleanmarker: 978 + * .oobfree = { {6, 10} } 979 + * jffs2 seems to handle the above gracefully, but the current scheme 980 + * seems safer. The only problem with it is that any code retrieving 981 + * free bytes position must be able to handle out-of-order segments. 982 + */ 983 + if (!section) { 984 + oobregion->offset = 8; 985 + oobregion->length = 8; 986 + } else { 987 + oobregion->offset = 6; 988 + oobregion->length = 2; 989 + } 990 + 991 + return 0; 992 + } 993 + 994 + static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = { 995 + .ecc = doc200x_ooblayout_ecc, 996 + .free = doc200x_ooblayout_free, 967 997 }; 968 998 969 999 /* Find the (I)NFTL Media Header, and optionally also the mirror media header. ··· 1567 1537 nand->bbt_md = nand->bbt_td + 1; 1568 1538 1569 1539 mtd->owner = THIS_MODULE; 1540 + mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops); 1570 1541 1571 1542 nand_set_controller_data(nand, doc); 1572 1543 nand->select_chip = doc200x_select_chip; ··· 1579 1548 nand->ecc.calculate = doc200x_calculate_ecc; 1580 1549 nand->ecc.correct = doc200x_correct_data; 1581 1550 1582 - nand->ecc.layout = &doc200x_oobinfo; 1583 1551 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1584 1552 nand->ecc.size = 512; 1585 1553 nand->ecc.bytes = 6;
+28 -5
drivers/mtd/nand/docg4.c
··· 222 222 * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14. 223 223 * Byte 15 (the last) is used by the driver as a "page written" flag. 224 224 */ 225 - static struct nand_ecclayout docg4_oobinfo = { 226 - .eccbytes = 9, 227 - .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, 228 - .oobfree = { {.offset = 2, .length = 5} } 225 + static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section, 226 + struct mtd_oob_region *oobregion) 227 + { 228 + if (section) 229 + return -ERANGE; 230 + 231 + oobregion->offset = 7; 232 + oobregion->length = 9; 233 + 234 + return 0; 235 + } 236 + 237 + static int docg4_ooblayout_free(struct mtd_info *mtd, int section, 238 + struct mtd_oob_region *oobregion) 239 + { 240 + if (section) 241 + return -ERANGE; 242 + 243 + oobregion->offset = 2; 244 + oobregion->length = 5; 245 + 246 + return 0; 247 + } 248 + 249 + static const struct mtd_ooblayout_ops docg4_ooblayout_ops = { 250 + .ecc = docg4_ooblayout_ecc, 251 + .free = docg4_ooblayout_free, 229 252 }; 230 253 231 254 /* ··· 1232 1209 mtd->writesize = DOCG4_PAGE_SIZE; 1233 1210 mtd->erasesize = DOCG4_BLOCK_SIZE; 1234 1211 mtd->oobsize = DOCG4_OOB_SIZE; 1212 + mtd_set_ooblayout(mtd, &docg4_ooblayout_ops); 1235 1213 nand->chipsize = DOCG4_CHIP_SIZE; 1236 1214 nand->chip_shift = DOCG4_CHIP_SHIFT; 1237 1215 nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT; ··· 1241 1217 nand->pagemask = 0x3ffff; 1242 1218 nand->badblockpos = NAND_LARGE_BADBLOCK_POS; 1243 1219 nand->badblockbits = 8; 1244 - nand->ecc.layout = &docg4_oobinfo; 1245 1220 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1246 1221 nand->ecc.size = DOCG4_PAGE_SIZE; 1247 1222 nand->ecc.prepad = 8;
+48 -36
drivers/mtd/nand/fsl_elbc_nand.c
··· 79 79 80 80 /* These map to the positions used by the FCM hardware ECC generator */ 81 81 82 - /* Small Page FLASH with FMR[ECCM] = 0 */ 83 - static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = { 84 - .eccbytes = 3, 85 - .eccpos = {6, 7, 8}, 86 - .oobfree = { {0, 5}, {9, 7} }, 87 - }; 82 + static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section, 83 + struct mtd_oob_region *oobregion) 84 + { 85 + struct nand_chip *chip = mtd_to_nand(mtd); 86 + struct fsl_elbc_mtd *priv = nand_get_controller_data(chip); 88 87 89 - /* Small Page FLASH with FMR[ECCM] = 1 */ 90 - static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = { 91 - .eccbytes = 3, 92 - .eccpos = {8, 9, 10}, 93 - .oobfree = { {0, 5}, {6, 2}, {11, 5} }, 94 - }; 88 + if (section >= chip->ecc.steps) 89 + return -ERANGE; 95 90 96 - /* Large Page FLASH with FMR[ECCM] = 0 */ 97 - static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = { 98 - .eccbytes = 12, 99 - .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, 100 - .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, 101 - }; 91 + oobregion->offset = (16 * section) + 6; 92 + if (priv->fmr & FMR_ECCM) 93 + oobregion->offset += 2; 102 94 103 - /* Large Page FLASH with FMR[ECCM] = 1 */ 104 - static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = { 105 - .eccbytes = 12, 106 - .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, 107 - .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, 95 + oobregion->length = chip->ecc.bytes; 96 + 97 + return 0; 98 + } 99 + 100 + static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section, 101 + struct mtd_oob_region *oobregion) 102 + { 103 + struct nand_chip *chip = mtd_to_nand(mtd); 104 + struct fsl_elbc_mtd *priv = nand_get_controller_data(chip); 105 + 106 + if (section > chip->ecc.steps) 107 + return -ERANGE; 108 + 109 + if (!section) { 110 + oobregion->offset = 0; 111 + if (mtd->writesize > 512) 112 + oobregion->offset++; 113 + oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5; 114 + } else { 115 + oobregion->offset = (16 * section) - 116 + ((priv->fmr & FMR_ECCM) ? 5 : 7); 117 + if (section < chip->ecc.steps) 118 + oobregion->length = 13; 119 + else 120 + oobregion->length = mtd->oobsize - oobregion->offset; 121 + } 122 + 123 + return 0; 124 + } 125 + 126 + static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = { 127 + .ecc = fsl_elbc_ooblayout_ecc, 128 + .free = fsl_elbc_ooblayout_free, 108 129 }; 109 130 110 131 /* ··· 678 657 chip->ecc.bytes); 679 658 dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n", 680 659 chip->ecc.total); 681 - dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", 682 - chip->ecc.layout); 660 + dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n", 661 + mtd->ooblayout); 683 662 dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); 684 663 dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size); 685 664 dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n", ··· 696 675 } else if (mtd->writesize == 2048) { 697 676 priv->page_size = 1; 698 677 setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); 699 - /* adjust ecc setup if needed */ 700 - if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 701 - BR_DECC_CHK_GEN) { 702 - chip->ecc.size = 512; 703 - chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 704 - &fsl_elbc_oob_lp_eccm1 : 705 - &fsl_elbc_oob_lp_eccm0; 706 - } 707 678 } else { 708 679 dev_err(priv->dev, 709 680 "fsl_elbc_init: page size %d is not supported\n", ··· 793 780 if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 794 781 BR_DECC_CHK_GEN) { 795 782 chip->ecc.mode = NAND_ECC_HW; 796 - /* put in small page settings and adjust later if needed */ 797 - chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 798 - &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; 783 + mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops); 799 784 chip->ecc.size = 512; 800 785 chip->ecc.bytes = 3; 801 786 chip->ecc.strength = 1; 802 787 } else { 803 788 /* otherwise fall back to default software ECC */ 804 789 chip->ecc.mode = NAND_ECC_SOFT; 790 + chip->ecc.algo = NAND_ECC_HAMMING; 805 791 } 806 792 807 793 return 0;
+115 -202
drivers/mtd/nand/fsl_ifc_nand.c
··· 67 67 68 68 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl; 69 69 70 - /* 512-byte page with 4-bit ECC, 8-bit */ 71 - static struct nand_ecclayout oob_512_8bit_ecc4 = { 72 - .eccbytes = 8, 73 - .eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, 74 - .oobfree = { {0, 5}, {6, 2} }, 75 - }; 76 - 77 - /* 512-byte page with 4-bit ECC, 16-bit */ 78 - static struct nand_ecclayout oob_512_16bit_ecc4 = { 79 - .eccbytes = 8, 80 - .eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, 81 - .oobfree = { {2, 6}, }, 82 - }; 83 - 84 - /* 2048-byte page size with 4-bit ECC */ 85 - static struct nand_ecclayout oob_2048_ecc4 = { 86 - .eccbytes = 32, 87 - .eccpos = { 88 - 8, 9, 10, 11, 12, 13, 14, 15, 89 - 16, 17, 18, 19, 20, 21, 22, 23, 90 - 24, 25, 26, 27, 28, 29, 30, 31, 91 - 32, 33, 34, 35, 36, 37, 38, 39, 92 - }, 93 - .oobfree = { {2, 6}, {40, 24} }, 94 - }; 95 - 96 - /* 4096-byte page size with 4-bit ECC */ 97 - static struct nand_ecclayout oob_4096_ecc4 = { 98 - .eccbytes = 64, 99 - .eccpos = { 100 - 8, 9, 10, 11, 12, 13, 14, 15, 101 - 16, 17, 18, 19, 20, 21, 22, 23, 102 - 24, 25, 26, 27, 28, 29, 30, 31, 103 - 32, 33, 34, 35, 36, 37, 38, 39, 104 - 40, 41, 42, 43, 44, 45, 46, 47, 105 - 48, 49, 50, 51, 52, 53, 54, 55, 106 - 56, 57, 58, 59, 60, 61, 62, 63, 107 - 64, 65, 66, 67, 68, 69, 70, 71, 108 - }, 109 - .oobfree = { {2, 6}, {72, 56} }, 110 - }; 111 - 112 - /* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */ 113 - static struct nand_ecclayout oob_4096_ecc8 = { 114 - .eccbytes = 128, 115 - .eccpos = { 116 - 8, 9, 10, 11, 12, 13, 14, 15, 117 - 16, 17, 18, 19, 20, 21, 22, 23, 118 - 24, 25, 26, 27, 28, 29, 30, 31, 119 - 32, 33, 34, 35, 36, 37, 38, 39, 120 - 40, 41, 42, 43, 44, 45, 46, 47, 121 - 48, 49, 50, 51, 52, 53, 54, 55, 122 - 56, 57, 58, 59, 60, 61, 62, 63, 123 - 64, 65, 66, 67, 68, 69, 70, 71, 124 - 72, 73, 74, 75, 76, 77, 78, 79, 125 - 80, 81, 82, 83, 84, 85, 86, 87, 126 - 88, 89, 90, 91, 92, 93, 94, 95, 127 - 96, 97, 98, 99, 100, 101, 102, 103, 128 - 104, 105, 106, 107, 108, 109, 110, 111, 129 - 112, 113, 114, 115, 116, 117, 118, 119, 130 - 120, 121, 122, 123, 124, 125, 126, 127, 131 - 128, 129, 130, 131, 132, 133, 134, 135, 132 - }, 133 - .oobfree = { {2, 6}, {136, 82} }, 134 - }; 135 - 136 - /* 8192-byte page size with 4-bit ECC */ 137 - static struct nand_ecclayout oob_8192_ecc4 = { 138 - .eccbytes = 128, 139 - .eccpos = { 140 - 8, 9, 10, 11, 12, 13, 14, 15, 141 - 16, 17, 18, 19, 20, 21, 22, 23, 142 - 24, 25, 26, 27, 28, 29, 30, 31, 143 - 32, 33, 34, 35, 36, 37, 38, 39, 144 - 40, 41, 42, 43, 44, 45, 46, 47, 145 - 48, 49, 50, 51, 52, 53, 54, 55, 146 - 56, 57, 58, 59, 60, 61, 62, 63, 147 - 64, 65, 66, 67, 68, 69, 70, 71, 148 - 72, 73, 74, 75, 76, 77, 78, 79, 149 - 80, 81, 82, 83, 84, 85, 86, 87, 150 - 88, 89, 90, 91, 92, 93, 94, 95, 151 - 96, 97, 98, 99, 100, 101, 102, 103, 152 - 104, 105, 106, 107, 108, 109, 110, 111, 153 - 112, 113, 114, 115, 116, 117, 118, 119, 154 - 120, 121, 122, 123, 124, 125, 126, 127, 155 - 128, 129, 130, 131, 132, 133, 134, 135, 156 - }, 157 - .oobfree = { {2, 6}, {136, 208} }, 158 - }; 159 - 160 - /* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */ 161 - static struct nand_ecclayout oob_8192_ecc8 = { 162 - .eccbytes = 256, 163 - .eccpos = { 164 - 8, 9, 10, 11, 12, 13, 14, 15, 165 - 16, 17, 18, 19, 20, 21, 22, 23, 166 - 24, 25, 26, 27, 28, 29, 30, 31, 167 - 32, 33, 34, 35, 36, 37, 38, 39, 168 - 40, 41, 42, 43, 44, 45, 46, 47, 169 - 48, 49, 50, 51, 52, 53, 54, 55, 170 - 56, 57, 58, 59, 60, 61, 62, 63, 171 - 64, 65, 66, 67, 68, 69, 70, 71, 172 - 72, 73, 74, 75, 76, 77, 78, 79, 173 - 80, 81, 82, 83, 84, 85, 86, 87, 174 - 88, 89, 90, 91, 92, 93, 94, 95, 175 - 96, 97, 98, 99, 100, 101, 102, 103, 176 - 104, 105, 106, 107, 108, 109, 110, 111, 177 - 112, 113, 114, 115, 116, 117, 118, 119, 178 - 120, 121, 122, 123, 124, 125, 126, 127, 179 - 128, 129, 130, 131, 132, 133, 134, 135, 180 - 136, 137, 138, 139, 140, 141, 142, 143, 181 - 144, 145, 146, 147, 148, 149, 150, 151, 182 - 152, 153, 154, 155, 156, 157, 158, 159, 183 - 160, 161, 162, 163, 164, 165, 166, 167, 184 - 168, 169, 170, 171, 172, 173, 174, 175, 185 - 176, 177, 178, 179, 180, 181, 182, 183, 186 - 184, 185, 186, 187, 188, 189, 190, 191, 187 - 192, 193, 194, 195, 196, 197, 198, 199, 188 - 200, 201, 202, 203, 204, 205, 206, 207, 189 - 208, 209, 210, 211, 212, 213, 214, 215, 190 - 216, 217, 218, 219, 220, 221, 222, 223, 191 - 224, 225, 226, 227, 228, 229, 230, 231, 192 - 232, 233, 234, 235, 236, 237, 238, 239, 193 - 240, 241, 242, 243, 244, 245, 246, 247, 194 - 248, 249, 250, 251, 252, 253, 254, 255, 195 - 256, 257, 258, 259, 260, 261, 262, 263, 196 - }, 197 - .oobfree = { {2, 6}, {264, 80} }, 198 - }; 199 - 200 70 /* 201 71 * Generic flash bbt descriptors 202 72 */ ··· 93 223 .pattern = mirror_pattern, 94 224 }; 95 225 226 + static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section, 227 + struct mtd_oob_region *oobregion) 228 + { 229 + struct nand_chip *chip = mtd_to_nand(mtd); 230 + 231 + if (section) 232 + return -ERANGE; 233 + 234 + oobregion->offset = 8; 235 + oobregion->length = chip->ecc.total; 236 + 237 + return 0; 238 + } 239 + 240 + static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section, 241 + struct mtd_oob_region *oobregion) 242 + { 243 + struct nand_chip *chip = mtd_to_nand(mtd); 244 + 245 + if (section > 1) 246 + return -ERANGE; 247 + 248 + if (mtd->writesize == 512 && 249 + !(chip->options & NAND_BUSWIDTH_16)) { 250 + if (!section) { 251 + oobregion->offset = 0; 252 + oobregion->length = 5; 253 + } else { 254 + oobregion->offset = 6; 255 + oobregion->length = 2; 256 + } 257 + 258 + return 0; 259 + } 260 + 261 + if (!section) { 262 + oobregion->offset = 2; 263 + oobregion->length = 6; 264 + } else { 265 + oobregion->offset = chip->ecc.total + 8; 266 + oobregion->length = mtd->oobsize - oobregion->offset; 267 + } 268 + 269 + return 0; 270 + } 271 + 272 + static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = { 273 + .ecc = fsl_ifc_ooblayout_ecc, 274 + .free = fsl_ifc_ooblayout_free, 275 + }; 276 + 96 277 /* 97 278 * Set up the IFC hardware block and page address fields, and the ifc nand 98 279 * structure addr field to point to the correct IFC buffer in memory ··· 153 232 struct nand_chip *chip = mtd_to_nand(mtd); 154 233 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 155 234 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 156 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 235 + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 157 236 int buf_num; 158 237 159 238 ifc_nand_ctrl->page = page_addr; ··· 178 257 u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); 179 258 u32 __iomem *mainarea = (u32 __iomem *)addr; 180 259 u8 __iomem *oob = addr + mtd->writesize; 181 - int i; 260 + struct mtd_oob_region oobregion = { }; 261 + int i, section = 0; 182 262 183 263 for (i = 0; i < mtd->writesize / 4; i++) { 184 264 if (__raw_readl(&mainarea[i]) != 0xffffffff) 185 265 return 0; 186 266 } 187 267 188 - for (i = 0; i < chip->ecc.layout->eccbytes; i++) { 189 - int pos = chip->ecc.layout->eccpos[i]; 268 + mtd_ooblayout_ecc(mtd, section++, &oobregion); 269 + while (oobregion.length) { 270 + for (i = 0; i < oobregion.length; i++) { 271 + if (__raw_readb(&oob[oobregion.offset + i]) != 0xff) 272 + return 0; 273 + } 190 274 191 - if (__raw_readb(&oob[pos]) != 0xff) 192 - return 0; 275 + mtd_ooblayout_ecc(mtd, section++, &oobregion); 193 276 } 194 277 195 278 return 1; ··· 220 295 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 221 296 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 222 297 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; 223 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 298 + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 224 299 u32 eccstat[4]; 225 300 int i; 226 301 ··· 296 371 { 297 372 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 298 373 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 299 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 374 + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 300 375 301 376 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ 302 377 if (mtd->writesize > 512) { ··· 336 411 struct nand_chip *chip = mtd_to_nand(mtd); 337 412 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 338 413 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 339 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 414 + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 340 415 341 416 /* clear the read buffer */ 342 417 ifc_nand_ctrl->read_bytes = 0; ··· 648 723 { 649 724 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 650 725 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 651 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 726 + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 652 727 u32 nand_fsr; 653 728 654 729 /* Use READ_STATUS command, but wait for the device to be ready */ ··· 733 808 chip->ecc.bytes); 734 809 dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__, 735 810 chip->ecc.total); 736 - dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__, 737 - chip->ecc.layout); 811 + dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__, 812 + mtd->ooblayout); 738 813 dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags); 739 814 dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size); 740 815 dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__, ··· 750 825 static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) 751 826 { 752 827 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 753 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 828 + struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; 829 + struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; 754 830 uint32_t csor = 0, csor_8k = 0, csor_ext = 0; 755 831 uint32_t cs = priv->bank; 756 832 757 833 /* Save CSOR and CSOR_ext */ 758 - csor = ifc_in32(&ifc->csor_cs[cs].csor); 759 - csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext); 834 + csor = ifc_in32(&ifc_global->csor_cs[cs].csor); 835 + csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); 760 836 761 837 /* chage PageSize 8K and SpareSize 1K*/ 762 838 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; 763 - ifc_out32(csor_8k, &ifc->csor_cs[cs].csor); 764 - ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext); 839 + ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); 840 + ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); 765 841 766 842 /* READID */ 767 843 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 768 - (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 769 - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), 770 - &ifc->ifc_nand.nand_fir0); 844 + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 845 + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), 846 + &ifc_runtime->ifc_nand.nand_fir0); 771 847 ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, 772 - &ifc->ifc_nand.nand_fcr0); 773 - ifc_out32(0x0, &ifc->ifc_nand.row3); 848 + &ifc_runtime->ifc_nand.nand_fcr0); 849 + ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); 774 850 775 - ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr); 851 + ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); 776 852 777 853 /* Program ROW0/COL0 */ 778 - ifc_out32(0x0, &ifc->ifc_nand.row0); 779 - ifc_out32(0x0, &ifc->ifc_nand.col0); 854 + ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); 855 + ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); 780 856 781 857 /* set the chip select for NAND Transaction */ 782 - ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); 858 + ifc_out32(cs << IFC_NAND_CSEL_SHIFT, 859 + &ifc_runtime->ifc_nand.nand_csel); 783 860 784 861 /* start read seq */ 785 - ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); 862 + ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, 863 + &ifc_runtime->ifc_nand.nandseq_strt); 786 864 787 865 /* wait for command complete flag or timeout */ 788 866 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, ··· 795 867 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); 796 868 797 869 /* Restore CSOR and CSOR_ext */ 798 - ifc_out32(csor, &ifc->csor_cs[cs].csor); 799 - ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext); 870 + ifc_out32(csor, &ifc_global->csor_cs[cs].csor); 871 + ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); 800 872 } 801 873 802 874 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) 803 875 { 804 876 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 805 - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 877 + struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; 878 + struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; 806 879 struct nand_chip *chip = &priv->chip; 807 880 struct mtd_info *mtd = nand_to_mtd(&priv->chip); 808 - struct nand_ecclayout *layout; 809 881 u32 csor; 810 882 811 883 /* Fill in fsl_ifc_mtd structure */ ··· 814 886 815 887 /* fill in nand_chip structure */ 816 888 /* set up function call table */ 817 - if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) 889 + if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) 890 + & CSPR_PORT_SIZE_16) 818 891 chip->read_byte = fsl_ifc_read_byte16; 819 892 else 820 893 chip->read_byte = fsl_ifc_read_byte; ··· 829 900 chip->bbt_td = &bbt_main_descr; 830 901 chip->bbt_md = &bbt_mirror_descr; 831 902 832 - ifc_out32(0x0, &ifc->ifc_nand.ncfgr); 903 + ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); 833 904 834 905 /* set up nand options */ 835 906 chip->bbt_options = NAND_BBT_USE_FLASH; 836 907 chip->options = NAND_NO_SUBPAGE_WRITE; 837 908 838 - if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { 909 + if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) 910 + & CSPR_PORT_SIZE_16) { 839 911 chip->read_byte = fsl_ifc_read_byte16; 840 912 chip->options |= NAND_BUSWIDTH_16; 841 913 } else { ··· 849 919 chip->ecc.read_page = fsl_ifc_read_page; 850 920 chip->ecc.write_page = fsl_ifc_write_page; 851 921 852 - csor = ifc_in32(&ifc->csor_cs[priv->bank].csor); 853 - 854 - /* Hardware generates ECC per 512 Bytes */ 855 - chip->ecc.size = 512; 856 - chip->ecc.bytes = 8; 857 - chip->ecc.strength = 4; 922 + csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); 858 923 859 924 switch (csor & CSOR_NAND_PGS_MASK) { 860 925 case CSOR_NAND_PGS_512: 861 - if (chip->options & NAND_BUSWIDTH_16) { 862 - layout = &oob_512_16bit_ecc4; 863 - } else { 864 - layout = &oob_512_8bit_ecc4; 865 - 926 + if (!(chip->options & NAND_BUSWIDTH_16)) { 866 927 /* Avoid conflict with bad block marker */ 867 928 bbt_main_descr.offs = 0; 868 929 bbt_mirror_descr.offs = 0; ··· 863 942 break; 864 943 865 944 case CSOR_NAND_PGS_2K: 866 - layout = &oob_2048_ecc4; 867 945 priv->bufnum_mask = 3; 868 946 break; 869 947 870 948 case CSOR_NAND_PGS_4K: 871 - if ((csor & CSOR_NAND_ECC_MODE_MASK) == 872 - CSOR_NAND_ECC_MODE_4) { 873 - layout = &oob_4096_ecc4; 874 - } else { 875 - layout = &oob_4096_ecc8; 876 - chip->ecc.bytes = 16; 877 - chip->ecc.strength = 8; 878 - } 879 - 880 949 priv->bufnum_mask = 1; 881 950 break; 882 951 883 952 case CSOR_NAND_PGS_8K: 884 - if ((csor & CSOR_NAND_ECC_MODE_MASK) == 885 - CSOR_NAND_ECC_MODE_4) { 886 - layout = &oob_8192_ecc4; 887 - } else { 888 - layout = &oob_8192_ecc8; 889 - chip->ecc.bytes = 16; 890 - chip->ecc.strength = 8; 891 - } 892 - 893 953 priv->bufnum_mask = 0; 894 - break; 954 + break; 895 955 896 956 default: 897 957 dev_err(priv->dev, "bad csor %#x: bad page size\n", csor); ··· 882 980 /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */ 883 981 if (csor & CSOR_NAND_ECC_DEC_EN) { 884 982 chip->ecc.mode = NAND_ECC_HW; 885 - chip->ecc.layout = layout; 983 + mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops); 984 + 985 + /* Hardware generates ECC per 512 Bytes */ 986 + chip->ecc.size = 512; 987 + if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) { 988 + chip->ecc.bytes = 8; 989 + chip->ecc.strength = 4; 990 + } else { 991 + chip->ecc.bytes = 16; 992 + chip->ecc.strength = 8; 993 + } 886 994 } else { 887 995 chip->ecc.mode = NAND_ECC_SOFT; 996 + chip->ecc.algo = NAND_ECC_HAMMING; 888 997 } 889 998 890 999 if (ctrl->version == FSL_IFC_VERSION_1_1_0) ··· 920 1007 return 0; 921 1008 } 922 1009 923 - static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, 1010 + static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, 924 1011 phys_addr_t addr) 925 1012 { 926 - u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr); 1013 + u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); 927 1014 928 1015 if (!(cspr & CSPR_V)) 929 1016 return 0; ··· 937 1024 938 1025 static int fsl_ifc_nand_probe(struct platform_device *dev) 939 1026 { 940 - struct fsl_ifc_regs __iomem *ifc; 1027 + struct fsl_ifc_runtime __iomem *ifc; 941 1028 struct fsl_ifc_mtd *priv; 942 1029 struct resource res; 943 1030 static const char *part_probe_types[] ··· 947 1034 struct device_node *node = dev->dev.of_node; 948 1035 struct mtd_info *mtd; 949 1036 950 - if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) 1037 + if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) 951 1038 return -ENODEV; 952 - ifc = fsl_ifc_ctrl_dev->regs; 1039 + ifc = fsl_ifc_ctrl_dev->rregs; 953 1040 954 1041 /* get, allocate and map the memory resource */ 955 1042 ret = of_address_to_resource(node, 0, &res); ··· 960 1047 961 1048 /* find which chip select it is connected to */ 962 1049 for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { 963 - if (match_bank(ifc, bank, res.start)) 1050 + if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) 964 1051 break; 965 1052 } 966 1053
+1
drivers/mtd/nand/fsl_upm.c
··· 170 170 fun->chip.read_buf = fun_read_buf; 171 171 fun->chip.write_buf = fun_write_buf; 172 172 fun->chip.ecc.mode = NAND_ECC_SOFT; 173 + fun->chip.ecc.algo = NAND_ECC_HAMMING; 173 174 if (fun->mchip_count > 1) 174 175 fun->chip.select_chip = fun_select_chip; 175 176
+85 -247
drivers/mtd/nand/fsmc_nand.c
··· 39 39 #include <linux/amba/bus.h> 40 40 #include <mtd/mtd-abi.h> 41 41 42 - static struct nand_ecclayout fsmc_ecc1_128_layout = { 43 - .eccbytes = 24, 44 - .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, 45 - 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, 46 - .oobfree = { 47 - {.offset = 8, .length = 8}, 48 - {.offset = 24, .length = 8}, 49 - {.offset = 40, .length = 8}, 50 - {.offset = 56, .length = 8}, 51 - {.offset = 72, .length = 8}, 52 - {.offset = 88, .length = 8}, 53 - {.offset = 104, .length = 8}, 54 - {.offset = 120, .length = 8} 55 - } 56 - }; 42 + static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section, 43 + struct mtd_oob_region *oobregion) 44 + { 45 + struct nand_chip *chip = mtd_to_nand(mtd); 57 46 58 - static struct nand_ecclayout fsmc_ecc1_64_layout = { 59 - .eccbytes = 12, 60 - .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52}, 61 - .oobfree = { 62 - {.offset = 8, .length = 8}, 63 - {.offset = 24, .length = 8}, 64 - {.offset = 40, .length = 8}, 65 - {.offset = 56, .length = 8}, 66 - } 67 - }; 47 + if (section >= chip->ecc.steps) 48 + return -ERANGE; 68 49 69 - static struct nand_ecclayout fsmc_ecc1_16_layout = { 70 - .eccbytes = 3, 71 - .eccpos = {2, 3, 4}, 72 - .oobfree = { 73 - {.offset = 8, .length = 8}, 74 - } 75 - }; 50 + oobregion->offset = (section * 16) + 2; 51 + oobregion->length = 3; 76 52 77 - /* 78 - * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes 79 - * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46 80 - * bytes are free for use. 81 - */ 82 - static struct nand_ecclayout fsmc_ecc4_256_layout = { 83 - .eccbytes = 208, 84 - .eccpos = { 2, 3, 4, 5, 6, 7, 8, 85 - 9, 10, 11, 12, 13, 14, 86 - 18, 19, 20, 21, 22, 23, 24, 87 - 25, 26, 27, 28, 29, 30, 88 - 34, 35, 36, 37, 38, 39, 40, 89 - 41, 42, 43, 44, 45, 46, 90 - 50, 51, 52, 53, 54, 55, 56, 91 - 57, 58, 59, 60, 61, 62, 92 - 66, 67, 68, 69, 70, 71, 72, 93 - 73, 74, 75, 76, 77, 78, 94 - 82, 83, 84, 85, 86, 87, 88, 95 - 89, 90, 91, 92, 93, 94, 96 - 98, 99, 100, 101, 102, 103, 104, 97 - 105, 106, 107, 108, 109, 110, 98 - 114, 115, 116, 117, 118, 119, 120, 99 - 121, 122, 123, 124, 125, 126, 100 - 130, 131, 132, 133, 134, 135, 136, 101 - 137, 138, 139, 140, 141, 142, 102 - 146, 147, 148, 149, 150, 151, 152, 103 - 153, 154, 155, 156, 157, 158, 104 - 162, 163, 164, 165, 166, 167, 168, 105 - 169, 170, 171, 172, 173, 174, 106 - 178, 179, 180, 181, 182, 183, 184, 107 - 185, 186, 187, 188, 189, 190, 108 - 194, 195, 196, 197, 198, 199, 200, 109 - 201, 202, 203, 204, 205, 206, 110 - 210, 211, 212, 213, 214, 215, 216, 111 - 217, 218, 219, 220, 221, 222, 112 - 226, 227, 228, 229, 230, 231, 232, 113 - 233, 234, 235, 236, 237, 238, 114 - 242, 243, 244, 245, 246, 247, 248, 115 - 249, 250, 251, 252, 253, 254 116 - }, 117 - .oobfree = { 118 - {.offset = 15, .length = 3}, 119 - {.offset = 31, .length = 3}, 120 - {.offset = 47, .length = 3}, 121 - {.offset = 63, .length = 3}, 122 - {.offset = 79, .length = 3}, 123 - {.offset = 95, .length = 3}, 124 - {.offset = 111, .length = 3}, 125 - {.offset = 127, .length = 3}, 126 - {.offset = 143, .length = 3}, 127 - {.offset = 159, .length = 3}, 128 - {.offset = 175, .length = 3}, 129 - {.offset = 191, .length = 3}, 130 - {.offset = 207, .length = 3}, 131 - {.offset = 223, .length = 3}, 132 - {.offset = 239, .length = 3}, 133 - {.offset = 255, .length = 1} 134 - } 135 - }; 53 + return 0; 54 + } 136 55 137 - /* 138 - * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes 139 - * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118 140 - * bytes are free for use. 141 - */ 142 - static struct nand_ecclayout fsmc_ecc4_224_layout = { 143 - .eccbytes = 104, 144 - .eccpos = { 2, 3, 4, 5, 6, 7, 8, 145 - 9, 10, 11, 12, 13, 14, 146 - 18, 19, 20, 21, 22, 23, 24, 147 - 25, 26, 27, 28, 29, 30, 148 - 34, 35, 36, 37, 38, 39, 40, 149 - 41, 42, 43, 44, 45, 46, 150 - 50, 51, 52, 53, 54, 55, 56, 151 - 57, 58, 59, 60, 61, 62, 152 - 66, 67, 68, 69, 70, 71, 72, 153 - 73, 74, 75, 76, 77, 78, 154 - 82, 83, 84, 85, 86, 87, 88, 155 - 89, 90, 91, 92, 93, 94, 156 - 98, 99, 100, 101, 102, 103, 104, 157 - 105, 106, 107, 108, 109, 110, 158 - 114, 115, 116, 117, 118, 119, 120, 159 - 121, 122, 123, 124, 125, 126 160 - }, 161 - .oobfree = { 162 - {.offset = 15, .length = 3}, 163 - {.offset = 31, .length = 3}, 164 - {.offset = 47, .length = 3}, 165 - {.offset = 63, .length = 3}, 166 - {.offset = 79, .length = 3}, 167 - {.offset = 95, .length = 3}, 168 - {.offset = 111, .length = 3}, 169 - {.offset = 127, .length = 97} 170 - } 171 - }; 56 + static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section, 57 + struct mtd_oob_region *oobregion) 58 + { 59 + struct nand_chip *chip = mtd_to_nand(mtd); 172 60 173 - /* 174 - * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes 175 - * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22 176 - * bytes are free for use. 177 - */ 178 - static struct nand_ecclayout fsmc_ecc4_128_layout = { 179 - .eccbytes = 104, 180 - .eccpos = { 2, 3, 4, 5, 6, 7, 8, 181 - 9, 10, 11, 12, 13, 14, 182 - 18, 19, 20, 21, 22, 23, 24, 183 - 25, 26, 27, 28, 29, 30, 184 - 34, 35, 36, 37, 38, 39, 40, 185 - 41, 42, 43, 44, 45, 46, 186 - 50, 51, 52, 53, 54, 55, 56, 187 - 57, 58, 59, 60, 61, 62, 188 - 66, 67, 68, 69, 70, 71, 72, 189 - 73, 74, 75, 76, 77, 78, 190 - 82, 83, 84, 85, 86, 87, 88, 191 - 89, 90, 91, 92, 93, 94, 192 - 98, 99, 100, 101, 102, 103, 104, 193 - 105, 106, 107, 108, 109, 110, 194 - 114, 115, 116, 117, 118, 119, 120, 195 - 121, 122, 123, 124, 125, 126 196 - }, 197 - .oobfree = { 198 - {.offset = 15, .length = 3}, 199 - {.offset = 31, .length = 3}, 200 - {.offset = 47, .length = 3}, 201 - {.offset = 63, .length = 3}, 202 - {.offset = 79, .length = 3}, 203 - {.offset = 95, .length = 3}, 204 - {.offset = 111, .length = 3}, 205 - {.offset = 127, .length = 1} 206 - } 207 - }; 61 + if (section >= chip->ecc.steps) 62 + return -ERANGE; 208 63 209 - /* 210 - * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of 211 - * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10 212 - * bytes are free for use. 213 - */ 214 - static struct nand_ecclayout fsmc_ecc4_64_layout = { 215 - .eccbytes = 52, 216 - .eccpos = { 2, 3, 4, 5, 6, 7, 8, 217 - 9, 10, 11, 12, 13, 14, 218 - 18, 19, 20, 21, 22, 23, 24, 219 - 25, 26, 27, 28, 29, 30, 220 - 34, 35, 36, 37, 38, 39, 40, 221 - 41, 42, 43, 44, 45, 46, 222 - 50, 51, 52, 53, 54, 55, 56, 223 - 57, 58, 59, 60, 61, 62, 224 - }, 225 - .oobfree = { 226 - {.offset = 15, .length = 3}, 227 - {.offset = 31, .length = 3}, 228 - {.offset = 47, .length = 3}, 229 - {.offset = 63, .length = 1}, 230 - } 231 - }; 64 + oobregion->offset = (section * 16) + 8; 232 65 233 - /* 234 - * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of 235 - * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One 236 - * byte is free for use. 237 - */ 238 - static struct nand_ecclayout fsmc_ecc4_16_layout = { 239 - .eccbytes = 13, 240 - .eccpos = { 0, 1, 2, 3, 6, 7, 8, 241 - 9, 10, 11, 12, 13, 14 242 - }, 243 - .oobfree = { 244 - {.offset = 15, .length = 1}, 245 - } 66 + if (section < chip->ecc.steps - 1) 67 + oobregion->length = 8; 68 + else 69 + oobregion->length = mtd->oobsize - oobregion->offset; 70 + 71 + return 0; 72 + } 73 + 74 + static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = { 75 + .ecc = fsmc_ecc1_ooblayout_ecc, 76 + .free = fsmc_ecc1_ooblayout_free, 246 77 }; 247 78 248 79 /* ··· 81 250 * There are 13 bytes of ecc for every 512 byte block and it has to be read 82 251 * consecutively and immediately after the 512 byte data block for hardware to 83 252 * generate the error bit offsets in 512 byte data. 84 - * Managing the ecc bytes in the following way makes it easier for software to 85 - * read ecc bytes consecutive to data bytes. This way is similar to 86 - * oobfree structure maintained already in generic nand driver 87 253 */ 88 - static struct fsmc_eccplace fsmc_ecc4_lp_place = { 89 - .eccplace = { 90 - {.offset = 2, .length = 13}, 91 - {.offset = 18, .length = 13}, 92 - {.offset = 34, .length = 13}, 93 - {.offset = 50, .length = 13}, 94 - {.offset = 66, .length = 13}, 95 - {.offset = 82, .length = 13}, 96 - {.offset = 98, .length = 13}, 97 - {.offset = 114, .length = 13} 98 - } 99 - }; 254 + static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section, 255 + struct mtd_oob_region *oobregion) 256 + { 257 + struct nand_chip *chip = mtd_to_nand(mtd); 100 258 101 - static struct fsmc_eccplace fsmc_ecc4_sp_place = { 102 - .eccplace = { 103 - {.offset = 0, .length = 4}, 104 - {.offset = 6, .length = 9} 105 - } 259 + if (section >= chip->ecc.steps) 260 + return -ERANGE; 261 + 262 + oobregion->length = chip->ecc.bytes; 263 + 264 + if (!section && mtd->writesize <= 512) 265 + oobregion->offset = 0; 266 + else 267 + oobregion->offset = (section * 16) + 2; 268 + 269 + return 0; 270 + } 271 + 272 + static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section, 273 + struct mtd_oob_region *oobregion) 274 + { 275 + struct nand_chip *chip = mtd_to_nand(mtd); 276 + 277 + if (section >= chip->ecc.steps) 278 + return -ERANGE; 279 + 280 + oobregion->offset = (section * 16) + 15; 281 + 282 + if (section < chip->ecc.steps - 1) 283 + oobregion->length = 3; 284 + else 285 + oobregion->length = mtd->oobsize - oobregion->offset; 286 + 287 + return 0; 288 + } 289 + 290 + static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = { 291 + .ecc = fsmc_ecc4_ooblayout_ecc, 292 + .free = fsmc_ecc4_ooblayout_free, 106 293 }; 107 294 108 295 /** ··· 132 283 * @partitions: Partition info for a NAND Flash. 133 284 * @nr_partitions: Total number of partition of a NAND flash. 134 285 * 135 - * @ecc_place: ECC placing locations in oobfree type format. 136 286 * @bank: Bank number for probed device. 137 287 * @clk: Clock structure for FSMC. 138 288 * ··· 151 303 struct mtd_partition *partitions; 152 304 unsigned int nr_partitions; 153 305 154 - struct fsmc_eccplace *ecc_place; 155 306 unsigned int bank; 156 307 struct device *dev; 157 308 enum access_mode mode; ··· 557 710 static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 558 711 uint8_t *buf, int oob_required, int page) 559 712 { 560 - struct fsmc_nand_data *host = mtd_to_fsmc(mtd); 561 - struct fsmc_eccplace *ecc_place = host->ecc_place; 562 713 int i, j, s, stat, eccsize = chip->ecc.size; 563 714 int eccbytes = chip->ecc.bytes; 564 715 int eccsteps = chip->ecc.steps; ··· 579 734 chip->read_buf(mtd, p, eccsize); 580 735 581 736 for (j = 0; j < eccbytes;) { 582 - off = ecc_place->eccplace[group].offset; 583 - len = ecc_place->eccplace[group].length; 584 - group++; 737 + struct mtd_oob_region oobregion; 738 + int ret; 739 + 740 + ret = mtd_ooblayout_ecc(mtd, group++, &oobregion); 741 + if (ret) 742 + return ret; 743 + 744 + off = oobregion.offset; 745 + len = oobregion.length; 585 746 586 747 /* 587 748 * length is intentionally kept a higher multiple of 2 ··· 935 1084 if (AMBA_REV_BITS(host->pid) >= 8) { 936 1085 switch (mtd->oobsize) { 937 1086 case 16: 938 - nand->ecc.layout = &fsmc_ecc4_16_layout; 939 - host->ecc_place = &fsmc_ecc4_sp_place; 940 - break; 941 1087 case 64: 942 - nand->ecc.layout = &fsmc_ecc4_64_layout; 943 - host->ecc_place = &fsmc_ecc4_lp_place; 944 - break; 945 1088 case 128: 946 - nand->ecc.layout = &fsmc_ecc4_128_layout; 947 - host->ecc_place = &fsmc_ecc4_lp_place; 948 - break; 949 1089 case 224: 950 - nand->ecc.layout = &fsmc_ecc4_224_layout; 951 - host->ecc_place = &fsmc_ecc4_lp_place; 952 - break; 953 1090 case 256: 954 - nand->ecc.layout = &fsmc_ecc4_256_layout; 955 - host->ecc_place = &fsmc_ecc4_lp_place; 956 1091 break; 957 1092 default: 958 1093 dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n", ··· 946 1109 ret = -EINVAL; 947 1110 goto err_probe; 948 1111 } 1112 + 1113 + mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops); 949 1114 } else { 950 1115 switch (nand->ecc.mode) { 951 1116 case NAND_ECC_HW: ··· 958 1119 nand->ecc.strength = 1; 959 1120 break; 960 1121 961 - case NAND_ECC_SOFT_BCH: 962 - dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n"); 963 - break; 1122 + case NAND_ECC_SOFT: 1123 + if (nand->ecc.algo == NAND_ECC_BCH) { 1124 + dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n"); 1125 + break; 1126 + } 964 1127 965 1128 default: 966 1129 dev_err(&pdev->dev, "Unsupported ECC mode!\n"); ··· 973 1132 * Don't set layout for BCH4 SW ECC. This will be 974 1133 * generated later in nand_bch_init() later. 975 1134 */ 976 - if (nand->ecc.mode != NAND_ECC_SOFT_BCH) { 1135 + if (nand->ecc.mode == NAND_ECC_HW) { 977 1136 switch (mtd->oobsize) { 978 1137 case 16: 979 - nand->ecc.layout = &fsmc_ecc1_16_layout; 980 - break; 981 1138 case 64: 982 - nand->ecc.layout = &fsmc_ecc1_64_layout; 983 - break; 984 1139 case 128: 985 - nand->ecc.layout = &fsmc_ecc1_128_layout; 1140 + mtd_set_ooblayout(mtd, 1141 + &fsmc_ecc1_ooblayout_ops); 986 1142 break; 987 1143 default: 988 1144 dev_warn(&pdev->dev,
+1
drivers/mtd/nand/gpio.c
··· 273 273 nand_set_flash_node(chip, pdev->dev.of_node); 274 274 chip->IO_ADDR_W = chip->IO_ADDR_R; 275 275 chip->ecc.mode = NAND_ECC_SOFT; 276 + chip->ecc.algo = NAND_ECC_HAMMING; 276 277 chip->options = gpiomtd->plat.options; 277 278 chip->chip_delay = gpiomtd->plat.chip_delay; 278 279 chip->cmd_ctrl = gpio_nand_cmd_ctrl;
+129 -32
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
··· 25 25 #include <linux/mtd/partitions.h> 26 26 #include <linux/of.h> 27 27 #include <linux/of_device.h> 28 - #include <linux/of_mtd.h> 29 28 #include "gpmi-nand.h" 30 29 #include "bch-regs.h" 31 30 ··· 46 47 * We may change the layout if we can get the ECC info from the datasheet, 47 48 * else we will use all the (page + OOB). 48 49 */ 49 - static struct nand_ecclayout gpmi_hw_ecclayout = { 50 - .eccbytes = 0, 51 - .eccpos = { 0, }, 52 - .oobfree = { {.offset = 0, .length = 0} } 50 + static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, 51 + struct mtd_oob_region *oobregion) 52 + { 53 + struct nand_chip *chip = mtd_to_nand(mtd); 54 + struct gpmi_nand_data *this = nand_get_controller_data(chip); 55 + struct bch_geometry *geo = &this->bch_geometry; 56 + 57 + if (section) 58 + return -ERANGE; 59 + 60 + oobregion->offset = 0; 61 + oobregion->length = geo->page_size - mtd->writesize; 62 + 63 + return 0; 64 + } 65 + 66 + static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, 67 + struct mtd_oob_region *oobregion) 68 + { 69 + struct nand_chip *chip = mtd_to_nand(mtd); 70 + struct gpmi_nand_data *this = nand_get_controller_data(chip); 71 + struct bch_geometry *geo = &this->bch_geometry; 72 + 73 + if (section) 74 + return -ERANGE; 75 + 76 + /* The available oob size we have. */ 77 + if (geo->page_size < mtd->writesize + mtd->oobsize) { 78 + oobregion->offset = geo->page_size - mtd->writesize; 79 + oobregion->length = mtd->oobsize - oobregion->offset; 80 + } 81 + 82 + return 0; 83 + } 84 + 85 + static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { 86 + .ecc = gpmi_ooblayout_ecc, 87 + .free = gpmi_ooblayout_free, 53 88 }; 54 89 55 90 static const struct gpmi_devdata gpmi_devdata_imx23 = { ··· 174 141 struct bch_geometry *geo = &this->bch_geometry; 175 142 struct nand_chip *chip = &this->nand; 176 143 struct mtd_info *mtd = nand_to_mtd(chip); 177 - struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree; 178 144 unsigned int block_mark_bit_offset; 179 145 180 146 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) ··· 260 228 */ 261 229 geo->page_size = mtd->writesize + geo->metadata_size + 262 230 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 263 - 264 - /* The available oob size we have. */ 265 - if (geo->page_size < mtd->writesize + mtd->oobsize) { 266 - of->offset = geo->page_size - mtd->writesize; 267 - of->length = mtd->oobsize - of->offset; 268 - } 269 231 270 232 geo->payload_size = mtd->writesize; 271 233 ··· 823 797 824 798 this->cmd_buffer = NULL; 825 799 this->data_buffer_dma = NULL; 800 + this->raw_buffer = NULL; 826 801 this->page_buffer_virt = NULL; 827 802 this->page_buffer_size = 0; 828 803 } ··· 1064 1037 /* Loop over status bytes, accumulating ECC status. */ 1065 1038 status = auxiliary_virt + nfc_geo->auxiliary_status_offset; 1066 1039 1040 + read_page_swap_end(this, buf, nfc_geo->payload_size, 1041 + this->payload_virt, this->payload_phys, 1042 + nfc_geo->payload_size, 1043 + payload_virt, payload_phys); 1044 + 1067 1045 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 1068 1046 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 1069 1047 continue; 1070 1048 1071 1049 if (*status == STATUS_UNCORRECTABLE) { 1050 + int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1051 + u8 *eccbuf = this->raw_buffer; 1052 + int offset, bitoffset; 1053 + int eccbytes; 1054 + int flips; 1055 + 1056 + /* Read ECC bytes into our internal raw_buffer */ 1057 + offset = nfc_geo->metadata_size * 8; 1058 + offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1); 1059 + offset -= eccbits; 1060 + bitoffset = offset % 8; 1061 + eccbytes = DIV_ROUND_UP(offset + eccbits, 8); 1062 + offset /= 8; 1063 + eccbytes -= offset; 1064 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1); 1065 + chip->read_buf(mtd, eccbuf, eccbytes); 1066 + 1067 + /* 1068 + * ECC data are not byte aligned and we may have 1069 + * in-band data in the first and last byte of 1070 + * eccbuf. Set non-eccbits to one so that 1071 + * nand_check_erased_ecc_chunk() does not count them 1072 + * as bitflips. 1073 + */ 1074 + if (bitoffset) 1075 + eccbuf[0] |= GENMASK(bitoffset - 1, 0); 1076 + 1077 + bitoffset = (bitoffset + eccbits) % 8; 1078 + if (bitoffset) 1079 + eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset); 1080 + 1081 + /* 1082 + * The ECC hardware has an uncorrectable ECC status 1083 + * code in case we have bitflips in an erased page. As 1084 + * nothing was written into this subpage the ECC is 1085 + * obviously wrong and we can not trust it. We assume 1086 + * at this point that we are reading an erased page and 1087 + * try to correct the bitflips in buffer up to 1088 + * ecc_strength bitflips. If this is a page with random 1089 + * data, we exceed this number of bitflips and have a 1090 + * ECC failure. Otherwise we use the corrected buffer. 1091 + */ 1092 + if (i == 0) { 1093 + /* The first block includes metadata */ 1094 + flips = nand_check_erased_ecc_chunk( 1095 + buf + i * nfc_geo->ecc_chunk_size, 1096 + nfc_geo->ecc_chunk_size, 1097 + eccbuf, eccbytes, 1098 + auxiliary_virt, 1099 + nfc_geo->metadata_size, 1100 + nfc_geo->ecc_strength); 1101 + } else { 1102 + flips = nand_check_erased_ecc_chunk( 1103 + buf + i * nfc_geo->ecc_chunk_size, 1104 + nfc_geo->ecc_chunk_size, 1105 + eccbuf, eccbytes, 1106 + NULL, 0, 1107 + nfc_geo->ecc_strength); 1108 + } 1109 + 1110 + if (flips > 0) { 1111 + max_bitflips = max_t(unsigned int, max_bitflips, 1112 + flips); 1113 + mtd->ecc_stats.corrected += flips; 1114 + continue; 1115 + } 1116 + 1072 1117 mtd->ecc_stats.failed++; 1073 1118 continue; 1074 1119 } 1120 + 1075 1121 mtd->ecc_stats.corrected += *status; 1076 1122 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1077 1123 } ··· 1163 1063 memset(chip->oob_poi, ~0, mtd->oobsize); 1164 1064 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; 1165 1065 } 1166 - 1167 - read_page_swap_end(this, buf, nfc_geo->payload_size, 1168 - this->payload_virt, this->payload_phys, 1169 - nfc_geo->payload_size, 1170 - payload_virt, payload_phys); 1171 1066 1172 1067 return max_bitflips; 1173 1068 } ··· 1422 1327 static int 1423 1328 gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) 1424 1329 { 1425 - struct nand_oobfree *of = mtd->ecclayout->oobfree; 1330 + struct mtd_oob_region of = { }; 1426 1331 int status = 0; 1427 1332 1428 1333 /* Do we have available oob area? */ 1429 - if (!of->length) 1334 + mtd_ooblayout_free(mtd, 0, &of); 1335 + if (!of.length) 1430 1336 return -EPERM; 1431 1337 1432 1338 if (!nand_is_slc(chip)) 1433 1339 return -EPERM; 1434 1340 1435 - chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page); 1436 - chip->write_buf(mtd, chip->oob_poi + of->offset, of->length); 1341 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page); 1342 + chip->write_buf(mtd, chip->oob_poi + of.offset, of.length); 1437 1343 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1438 1344 1439 1345 status = chip->waitfunc(mtd, chip); ··· 1936 1840 static int gpmi_init_last(struct gpmi_nand_data *this) 1937 1841 { 1938 1842 struct nand_chip *chip = &this->nand; 1843 + struct mtd_info *mtd = nand_to_mtd(chip); 1939 1844 struct nand_ecc_ctrl *ecc = &chip->ecc; 1940 1845 struct bch_geometry *bch_geo = &this->bch_geometry; 1941 1846 int ret; ··· 1958 1861 ecc->mode = NAND_ECC_HW; 1959 1862 ecc->size = bch_geo->ecc_chunk_size; 1960 1863 ecc->strength = bch_geo->ecc_strength; 1961 - ecc->layout = &gpmi_hw_ecclayout; 1864 + mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops); 1962 1865 1963 1866 /* 1964 1867 * We only enable the subpage read when: ··· 2011 1914 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ 2012 1915 this->swap_block_mark = !GPMI_IS_MX23(this); 2013 1916 2014 - if (of_get_nand_on_flash_bbt(this->dev->of_node)) { 2015 - chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 2016 - 2017 - if (of_property_read_bool(this->dev->of_node, 2018 - "fsl,no-blockmark-swap")) 2019 - this->swap_block_mark = false; 2020 - } 2021 - dev_dbg(this->dev, "Blockmark swapping %sabled\n", 2022 - this->swap_block_mark ? "en" : "dis"); 2023 - 2024 1917 /* 2025 1918 * Allocate a temporary DMA buffer for reading ID in the 2026 1919 * nand_scan_ident(). ··· 2024 1937 ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL); 2025 1938 if (ret) 2026 1939 goto err_out; 1940 + 1941 + if (chip->bbt_options & NAND_BBT_USE_FLASH) { 1942 + chip->bbt_options |= NAND_BBT_NO_OOB; 1943 + 1944 + if (of_property_read_bool(this->dev->of_node, 1945 + "fsl,no-blockmark-swap")) 1946 + this->swap_block_mark = false; 1947 + } 1948 + dev_dbg(this->dev, "Blockmark swapping %sabled\n", 1949 + this->swap_block_mark ? "en" : "dis"); 2027 1950 2028 1951 ret = gpmi_init_last(this); 2029 1952 if (ret)
+26 -14
drivers/mtd/nand/hisi504_nand.c
··· 19 19 * GNU General Public License for more details. 20 20 */ 21 21 #include <linux/of.h> 22 - #include <linux/of_mtd.h> 23 22 #include <linux/mtd/mtd.h> 24 23 #include <linux/sizes.h> 25 24 #include <linux/clk.h> ··· 630 631 hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN); 631 632 } 632 633 633 - static struct nand_ecclayout nand_ecc_2K_16bits = { 634 - .oobfree = { {2, 6} }, 634 + static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section, 635 + struct mtd_oob_region *oobregion) 636 + { 637 + /* FIXME: add ECC bytes position */ 638 + return -ENOTSUPP; 639 + } 640 + 641 + static int hisi_ooblayout_free(struct mtd_info *mtd, int section, 642 + struct mtd_oob_region *oobregion) 643 + { 644 + if (section) 645 + return -ERANGE; 646 + 647 + oobregion->offset = 2; 648 + oobregion->length = 6; 649 + 650 + return 0; 651 + } 652 + 653 + static const struct mtd_ooblayout_ops hisi_ooblayout_ops = { 654 + .ecc = hisi_ooblayout_ecc, 655 + .free = hisi_ooblayout_free, 635 656 }; 636 657 637 658 static int hisi_nfc_ecc_probe(struct hinfc_host *host) ··· 661 642 struct device *dev = host->dev; 662 643 struct nand_chip *chip = &host->chip; 663 644 struct mtd_info *mtd = nand_to_mtd(chip); 664 - struct device_node *np = host->dev->of_node; 665 645 666 - size = of_get_nand_ecc_step_size(np); 667 - strength = of_get_nand_ecc_strength(np); 646 + size = chip->ecc.size; 647 + strength = chip->ecc.strength; 668 648 if (size != 1024) { 669 649 dev_err(dev, "error ecc size: %d\n", size); 670 650 return -EINVAL; ··· 686 668 case 16: 687 669 ecc_bits = 6; 688 670 if (mtd->writesize == 2048) 689 - chip->ecc.layout = &nand_ecc_2K_16bits; 671 + mtd_set_ooblayout(mtd, &hisi_ooblayout_ops); 690 672 691 673 /* TODO: add more page size support */ 692 674 break; ··· 713 695 714 696 static int hisi_nfc_probe(struct platform_device *pdev) 715 697 { 716 - int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP; 698 + int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP; 717 699 struct device *dev = &pdev->dev; 718 700 struct hinfc_host *host; 719 701 struct nand_chip *chip; ··· 764 746 chip->write_buf = hisi_nfc_write_buf; 765 747 chip->read_buf = hisi_nfc_read_buf; 766 748 chip->chip_delay = HINFC504_CHIP_DELAY; 767 - 768 - chip->ecc.mode = of_get_nand_ecc_mode(np); 769 - 770 - buswidth = of_get_nand_bus_width(np); 771 - if (buswidth == 16) 772 - chip->options |= NAND_BUSWIDTH_16; 773 749 774 750 hisi_nfc_host_init(host); 775 751
+1 -2
drivers/mtd/nand/jz4740_nand.c
··· 221 221 struct jz_nand *nand = mtd_to_jz_nand(mtd); 222 222 int i, error_count, index; 223 223 uint32_t reg, status, error; 224 - uint32_t t; 225 224 unsigned int timeout = 1000; 226 225 227 226 for (i = 0; i < 9; ++i) ··· 475 476 } 476 477 477 478 if (pdata && pdata->ident_callback) { 478 - pdata->ident_callback(pdev, chip, &pdata->partitions, 479 + pdata->ident_callback(pdev, mtd, &pdata->partitions, 479 480 &pdata->num_partitions); 480 481 } 481 482
-1
drivers/mtd/nand/jz4780_bch.c
··· 287 287 bch = platform_get_drvdata(pdev); 288 288 clk_prepare_enable(bch->clk); 289 289 290 - bch->dev = &pdev->dev; 291 290 return bch; 292 291 } 293 292
+5 -16
drivers/mtd/nand/jz4780_nand.c
··· 17 17 #include <linux/of.h> 18 18 #include <linux/of_address.h> 19 19 #include <linux/gpio/consumer.h> 20 - #include <linux/of_mtd.h> 21 20 #include <linux/platform_device.h> 22 21 #include <linux/slab.h> 23 22 #include <linux/mtd/mtd.h> ··· 54 55 struct jz4780_nand_chip { 55 56 struct nand_chip chip; 56 57 struct list_head chip_list; 57 - 58 - struct nand_ecclayout ecclayout; 59 58 60 59 struct gpio_desc *busy_gpio; 61 60 struct gpio_desc *wp_gpio; ··· 162 165 struct nand_chip *chip = &nand->chip; 163 166 struct mtd_info *mtd = nand_to_mtd(chip); 164 167 struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller); 165 - struct nand_ecclayout *layout = &nand->ecclayout; 166 - u32 start, i; 168 + int eccbytes; 167 169 168 170 chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) * 169 171 (chip->ecc.strength / 8); ··· 179 183 chip->ecc.correct = jz4780_nand_ecc_correct; 180 184 /* fall through */ 181 185 case NAND_ECC_SOFT: 182 - case NAND_ECC_SOFT_BCH: 183 186 dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n", 184 187 (nfc->bch) ? "hardware BCH" : "software ECC", 185 188 chip->ecc.strength, chip->ecc.size, chip->ecc.bytes); ··· 196 201 return 0; 197 202 198 203 /* Generate ECC layout. ECC codes are right aligned in the OOB area. */ 199 - layout->eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes; 204 + eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes; 200 205 201 - if (layout->eccbytes > mtd->oobsize - 2) { 206 + if (eccbytes > mtd->oobsize - 2) { 202 207 dev_err(dev, 203 208 "invalid ECC config: required %d ECC bytes, but only %d are available", 204 - layout->eccbytes, mtd->oobsize - 2); 209 + eccbytes, mtd->oobsize - 2); 205 210 return -EINVAL; 206 211 } 207 212 208 - start = mtd->oobsize - layout->eccbytes; 209 - for (i = 0; i < layout->eccbytes; i++) 210 - layout->eccpos[i] = start + i; 213 + mtd->ooblayout = &nand_ooblayout_lp_ops; 211 214 212 - layout->oobfree[0].offset = 2; 213 - layout->oobfree[0].length = mtd->oobsize - layout->eccbytes - 2; 214 - 215 - chip->ecc.layout = layout; 216 215 return 0; 217 216 } 218 217
+33 -18
drivers/mtd/nand/lpc32xx_mlc.c
··· 35 35 #include <linux/completion.h> 36 36 #include <linux/interrupt.h> 37 37 #include <linux/of.h> 38 - #include <linux/of_mtd.h> 39 38 #include <linux/of_gpio.h> 40 39 #include <linux/mtd/lpc32xx_mlc.h> 41 40 #include <linux/io.h> ··· 138 139 unsigned num_parts; 139 140 }; 140 141 141 - static struct nand_ecclayout lpc32xx_nand_oob = { 142 - .eccbytes = 40, 143 - .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 144 - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 145 - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 146 - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, 147 - .oobfree = { 148 - { .offset = 0, 149 - .length = 6, }, 150 - { .offset = 16, 151 - .length = 6, }, 152 - { .offset = 32, 153 - .length = 6, }, 154 - { .offset = 48, 155 - .length = 6, }, 156 - }, 142 + static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section, 143 + struct mtd_oob_region *oobregion) 144 + { 145 + struct nand_chip *nand_chip = mtd_to_nand(mtd); 146 + 147 + if (section >= nand_chip->ecc.steps) 148 + return -ERANGE; 149 + 150 + oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes; 151 + oobregion->length = nand_chip->ecc.bytes; 152 + 153 + return 0; 154 + } 155 + 156 + static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section, 157 + struct mtd_oob_region *oobregion) 158 + { 159 + struct nand_chip *nand_chip = mtd_to_nand(mtd); 160 + 161 + if (section >= nand_chip->ecc.steps) 162 + return -ERANGE; 163 + 164 + oobregion->offset = 16 * section; 165 + oobregion->length = 16 - nand_chip->ecc.bytes; 166 + 167 + return 0; 168 + } 169 + 170 + static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = { 171 + .ecc = lpc32xx_ooblayout_ecc, 172 + .free = lpc32xx_ooblayout_free, 157 173 }; 158 174 159 175 static struct nand_bbt_descr lpc32xx_nand_bbt = { ··· 727 713 nand_chip->ecc.write_oob = lpc32xx_write_oob; 728 714 nand_chip->ecc.read_oob = lpc32xx_read_oob; 729 715 nand_chip->ecc.strength = 4; 716 + nand_chip->ecc.bytes = 10; 730 717 nand_chip->waitfunc = lpc32xx_waitfunc; 731 718 732 719 nand_chip->options = NAND_NO_SUBPAGE_WRITE; ··· 766 751 767 752 nand_chip->ecc.mode = NAND_ECC_HW; 768 753 nand_chip->ecc.size = 512; 769 - nand_chip->ecc.layout = &lpc32xx_nand_oob; 754 + mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops); 770 755 host->mlcsubpages = mtd->writesize / 512; 771 756 772 757 /* initially clear interrupt status */
+56 -27
drivers/mtd/nand/lpc32xx_slc.c
··· 35 35 #include <linux/mtd/nand_ecc.h> 36 36 #include <linux/gpio.h> 37 37 #include <linux/of.h> 38 - #include <linux/of_mtd.h> 39 38 #include <linux/of_gpio.h> 40 39 #include <linux/mtd/lpc32xx_slc.h> 41 40 ··· 145 146 * NAND ECC Layout for small page NAND devices 146 147 * Note: For large and huge page devices, the default layouts are used 147 148 */ 148 - static struct nand_ecclayout lpc32xx_nand_oob_16 = { 149 - .eccbytes = 6, 150 - .eccpos = {10, 11, 12, 13, 14, 15}, 151 - .oobfree = { 152 - { .offset = 0, .length = 4 }, 153 - { .offset = 6, .length = 4 }, 154 - }, 149 + static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section, 150 + struct mtd_oob_region *oobregion) 151 + { 152 + if (section) 153 + return -ERANGE; 154 + 155 + oobregion->length = 6; 156 + oobregion->offset = 10; 157 + 158 + return 0; 159 + } 160 + 161 + static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section, 162 + struct mtd_oob_region *oobregion) 163 + { 164 + if (section > 1) 165 + return -ERANGE; 166 + 167 + if (!section) { 168 + oobregion->offset = 0; 169 + oobregion->length = 4; 170 + } else { 171 + oobregion->offset = 6; 172 + oobregion->length = 4; 173 + } 174 + 175 + return 0; 176 + } 177 + 178 + static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = { 179 + .ecc = lpc32xx_ooblayout_ecc, 180 + .free = lpc32xx_ooblayout_free, 155 181 }; 156 182 157 183 static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; ··· 218 194 uint32_t rwidth; 219 195 uint32_t rhold; 220 196 uint32_t rsetup; 221 - bool use_bbt; 222 197 int wp_gpio; 223 198 struct mtd_partition *parts; 224 199 unsigned num_parts; ··· 627 604 int oob_required, int page) 628 605 { 629 606 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 630 - int stat, i, status; 607 + struct mtd_oob_region oobregion = { }; 608 + int stat, i, status, error; 631 609 uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE]; 632 610 633 611 /* Issue read command */ ··· 644 620 lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps); 645 621 646 622 /* Pointer to ECC data retrieved from NAND spare area */ 647 - oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0]; 623 + error = mtd_ooblayout_ecc(mtd, 0, &oobregion); 624 + if (error) 625 + return error; 626 + 627 + oobecc = chip->oob_poi + oobregion.offset; 648 628 649 629 for (i = 0; i < chip->ecc.steps; i++) { 650 630 stat = chip->ecc.correct(mtd, buf, oobecc, ··· 694 666 int oob_required, int page) 695 667 { 696 668 struct lpc32xx_nand_host *host = nand_get_controller_data(chip); 697 - uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0]; 669 + struct mtd_oob_region oobregion = { }; 670 + uint8_t *pb; 698 671 int error; 699 672 700 673 /* Write data, calculate ECC on outbound data */ ··· 707 678 * The calculated ECC needs some manual work done to it before 708 679 * committing it to NAND. Process the calculated ECC and place 709 680 * the resultant values directly into the OOB buffer. */ 681 + error = mtd_ooblayout_ecc(mtd, 0, &oobregion); 682 + if (error) 683 + return error; 684 + 685 + pb = chip->oob_poi + oobregion.offset; 710 686 lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps); 711 687 712 688 /* Write ECC data to device */ ··· 781 747 return NULL; 782 748 } 783 749 784 - ncfg->use_bbt = of_get_nand_on_flash_bbt(np); 785 750 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0); 786 751 787 752 return ncfg; ··· 908 875 * custom BBT marker layout. 909 876 */ 910 877 if (mtd->writesize <= 512) 911 - chip->ecc.layout = &lpc32xx_nand_oob_16; 878 + mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops); 912 879 913 880 /* These sizes remain the same regardless of page size */ 914 881 chip->ecc.size = 256; 915 882 chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES; 916 883 chip->ecc.prepad = chip->ecc.postpad = 0; 917 884 918 - /* Avoid extra scan if using BBT, setup BBT support */ 919 - if (host->ncfg->use_bbt) { 920 - chip->bbt_options |= NAND_BBT_USE_FLASH; 921 - 922 - /* 923 - * Use a custom BBT marker setup for small page FLASH that 924 - * won't interfere with the ECC layout. Large and huge page 925 - * FLASH use the standard layout. 926 - */ 927 - if (mtd->writesize <= 512) { 928 - chip->bbt_td = &bbt_smallpage_main_descr; 929 - chip->bbt_md = &bbt_smallpage_mirror_descr; 930 - } 885 + /* 886 + * Use a custom BBT marker setup for small page FLASH that 887 + * won't interfere with the ECC layout. Large and huge page 888 + * FLASH use the standard layout. 889 + */ 890 + if ((chip->bbt_options & NAND_BBT_USE_FLASH) && 891 + mtd->writesize <= 512) { 892 + chip->bbt_td = &bbt_smallpage_main_descr; 893 + chip->bbt_md = &bbt_smallpage_mirror_descr; 931 894 } 932 895 933 896 /*
+1
drivers/mtd/nand/mpc5121_nfc.c
··· 710 710 chip->select_chip = mpc5121_nfc_select_chip; 711 711 chip->bbt_options = NAND_BBT_USE_FLASH; 712 712 chip->ecc.mode = NAND_ECC_SOFT; 713 + chip->ecc.algo = NAND_ECC_HAMMING; 713 714 714 715 /* Support external chip-select logic on ADS5121 board */ 715 716 if (of_machine_is_compatible("fsl,mpc5121ads")) {
+128 -129
drivers/mtd/nand/mxc_nand.c
··· 34 34 #include <linux/completion.h> 35 35 #include <linux/of.h> 36 36 #include <linux/of_device.h> 37 - #include <linux/of_mtd.h> 38 37 39 38 #include <asm/mach/flash.h> 40 39 #include <linux/platform_data/mtd-mxc_nand.h> ··· 148 149 int (*check_int)(struct mxc_nand_host *); 149 150 void (*irq_control)(struct mxc_nand_host *, int); 150 151 u32 (*get_ecc_status)(struct mxc_nand_host *); 151 - struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k; 152 + const struct mtd_ooblayout_ops *ooblayout; 152 153 void (*select_chip)(struct mtd_info *mtd, int chip); 153 154 int (*correct_data)(struct mtd_info *mtd, u_char *dat, 154 155 u_char *read_ecc, u_char *calc_ecc); ··· 197 198 198 199 const struct mxc_nand_devtype_data *devtype_data; 199 200 struct mxc_nand_platform_data pdata; 200 - }; 201 - 202 - /* OOB placement block for use with hardware ecc generation */ 203 - static struct nand_ecclayout nandv1_hw_eccoob_smallpage = { 204 - .eccbytes = 5, 205 - .eccpos = {6, 7, 8, 9, 10}, 206 - .oobfree = {{0, 5}, {12, 4}, } 207 - }; 208 - 209 - static struct nand_ecclayout nandv1_hw_eccoob_largepage = { 210 - .eccbytes = 20, 211 - .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 212 - 38, 39, 40, 41, 42, 54, 55, 56, 57, 58}, 213 - .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, } 214 - }; 215 - 216 - /* OOB description for 512 byte pages with 16 byte OOB */ 217 - static struct nand_ecclayout nandv2_hw_eccoob_smallpage = { 218 - .eccbytes = 1 * 9, 219 - .eccpos = { 220 - 7, 8, 9, 10, 11, 12, 13, 14, 15 221 - }, 222 - .oobfree = { 223 - {.offset = 0, .length = 5} 224 - } 225 - }; 226 - 227 - /* OOB description for 2048 byte pages with 64 byte OOB */ 228 - static struct nand_ecclayout nandv2_hw_eccoob_largepage = { 229 - .eccbytes = 4 * 9, 230 - .eccpos = { 231 - 7, 8, 9, 10, 11, 12, 13, 14, 15, 232 - 23, 24, 25, 26, 27, 28, 29, 30, 31, 233 - 39, 40, 41, 42, 43, 44, 45, 46, 47, 234 - 55, 56, 57, 58, 59, 60, 61, 62, 63 235 - }, 236 - .oobfree = { 237 - {.offset = 2, .length = 4}, 238 - {.offset = 16, .length = 7}, 239 - {.offset = 32, .length = 7}, 240 - {.offset = 48, .length = 7} 241 - } 242 - }; 243 - 244 - /* OOB description for 4096 byte pages with 128 byte OOB */ 245 - static struct nand_ecclayout nandv2_hw_eccoob_4k = { 246 - .eccbytes = 8 * 9, 247 - .eccpos = { 248 - 7, 8, 9, 10, 11, 12, 13, 14, 15, 249 - 23, 24, 25, 26, 27, 28, 29, 30, 31, 250 - 39, 40, 41, 42, 43, 44, 45, 46, 47, 251 - 55, 56, 57, 58, 59, 60, 61, 62, 63, 252 - 71, 72, 73, 74, 75, 76, 77, 78, 79, 253 - 87, 88, 89, 90, 91, 92, 93, 94, 95, 254 - 103, 104, 105, 106, 107, 108, 109, 110, 111, 255 - 119, 120, 121, 122, 123, 124, 125, 126, 127, 256 - }, 257 - .oobfree = { 258 - {.offset = 2, .length = 4}, 259 - {.offset = 16, .length = 7}, 260 - {.offset = 32, .length = 7}, 261 - {.offset = 48, .length = 7}, 262 - {.offset = 64, .length = 7}, 263 - {.offset = 80, .length = 7}, 264 - {.offset = 96, .length = 7}, 265 - {.offset = 112, .length = 7}, 266 - } 267 201 }; 268 202 269 203 static const char * const part_probes[] = { ··· 874 942 } 875 943 } 876 944 945 + static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section, 946 + struct mtd_oob_region *oobregion) 947 + { 948 + struct nand_chip *nand_chip = mtd_to_nand(mtd); 949 + 950 + if (section >= nand_chip->ecc.steps) 951 + return -ERANGE; 952 + 953 + oobregion->offset = (section * 16) + 6; 954 + oobregion->length = nand_chip->ecc.bytes; 955 + 956 + return 0; 957 + } 958 + 959 + static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section, 960 + struct mtd_oob_region *oobregion) 961 + { 962 + struct nand_chip *nand_chip = mtd_to_nand(mtd); 963 + 964 + if (section > nand_chip->ecc.steps) 965 + return -ERANGE; 966 + 967 + if (!section) { 968 + if (mtd->writesize <= 512) { 969 + oobregion->offset = 0; 970 + oobregion->length = 5; 971 + } else { 972 + oobregion->offset = 2; 973 + oobregion->length = 4; 974 + } 975 + } else { 976 + oobregion->offset = ((section - 1) * 16) + 977 + nand_chip->ecc.bytes + 6; 978 + if (section < nand_chip->ecc.steps) 979 + oobregion->length = (section * 16) + 6 - 980 + oobregion->offset; 981 + else 982 + oobregion->length = mtd->oobsize - oobregion->offset; 983 + } 984 + 985 + return 0; 986 + } 987 + 988 + static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = { 989 + .ecc = mxc_v1_ooblayout_ecc, 990 + .free = mxc_v1_ooblayout_free, 991 + }; 992 + 993 + static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section, 994 + struct mtd_oob_region *oobregion) 995 + { 996 + struct nand_chip *nand_chip = mtd_to_nand(mtd); 997 + int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; 998 + 999 + if (section >= nand_chip->ecc.steps) 1000 + return -ERANGE; 1001 + 1002 + oobregion->offset = (section * stepsize) + 7; 1003 + oobregion->length = nand_chip->ecc.bytes; 1004 + 1005 + return 0; 1006 + } 1007 + 1008 + static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section, 1009 + struct mtd_oob_region *oobregion) 1010 + { 1011 + struct nand_chip *nand_chip = mtd_to_nand(mtd); 1012 + int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; 1013 + 1014 + if (section > nand_chip->ecc.steps) 1015 + return -ERANGE; 1016 + 1017 + if (!section) { 1018 + if (mtd->writesize <= 512) { 1019 + oobregion->offset = 0; 1020 + oobregion->length = 5; 1021 + } else { 1022 + oobregion->offset = 2; 1023 + oobregion->length = 4; 1024 + } 1025 + } else { 1026 + oobregion->offset = section * stepsize; 1027 + oobregion->length = 7; 1028 + } 1029 + 1030 + return 0; 1031 + } 1032 + 1033 + static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = { 1034 + .ecc = mxc_v2_ooblayout_ecc, 1035 + .free = mxc_v2_ooblayout_free, 1036 + }; 1037 + 877 1038 /* 878 1039 * v2 and v3 type controllers can do 4bit or 8bit ecc depending 879 1040 * on how much oob the nand chip has. For 8bit ecc we need at least ··· 982 957 return 4; 983 958 else 984 959 return 8; 985 - } 986 - 987 - static void ecc_8bit_layout_4k(struct nand_ecclayout *layout) 988 - { 989 - int i, j; 990 - 991 - layout->eccbytes = 8*18; 992 - for (i = 0; i < 8; i++) 993 - for (j = 0; j < 18; j++) 994 - layout->eccpos[i*18 + j] = i*26 + j + 7; 995 - 996 - layout->oobfree[0].offset = 2; 997 - layout->oobfree[0].length = 4; 998 - for (i = 1; i < 8; i++) { 999 - layout->oobfree[i].offset = i*26; 1000 - layout->oobfree[i].length = 7; 1001 - } 1002 960 } 1003 961 1004 962 static void preset_v1(struct mtd_info *mtd) ··· 1277 1269 .check_int = check_int_v1_v2, 1278 1270 .irq_control = irq_control_v1_v2, 1279 1271 .get_ecc_status = get_ecc_status_v1, 1280 - .ecclayout_512 = &nandv1_hw_eccoob_smallpage, 1281 - .ecclayout_2k = &nandv1_hw_eccoob_largepage, 1282 - .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */ 1272 + .ooblayout = &mxc_v1_ooblayout_ops, 1283 1273 .select_chip = mxc_nand_select_chip_v1_v3, 1284 1274 .correct_data = mxc_nand_correct_data_v1, 1285 1275 .irqpending_quirk = 1, ··· 1300 1294 .check_int = check_int_v1_v2, 1301 1295 .irq_control = irq_control_v1_v2, 1302 1296 .get_ecc_status = get_ecc_status_v1, 1303 - .ecclayout_512 = &nandv1_hw_eccoob_smallpage, 1304 - .ecclayout_2k = &nandv1_hw_eccoob_largepage, 1305 - .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */ 1297 + .ooblayout = &mxc_v1_ooblayout_ops, 1306 1298 .select_chip = mxc_nand_select_chip_v1_v3, 1307 1299 .correct_data = mxc_nand_correct_data_v1, 1308 1300 .irqpending_quirk = 0, ··· 1324 1320 .check_int = check_int_v1_v2, 1325 1321 .irq_control = irq_control_v1_v2, 1326 1322 .get_ecc_status = get_ecc_status_v2, 1327 - .ecclayout_512 = &nandv2_hw_eccoob_smallpage, 1328 - .ecclayout_2k = &nandv2_hw_eccoob_largepage, 1329 - .ecclayout_4k = &nandv2_hw_eccoob_4k, 1323 + .ooblayout = &mxc_v2_ooblayout_ops, 1330 1324 .select_chip = mxc_nand_select_chip_v2, 1331 1325 .correct_data = mxc_nand_correct_data_v2_v3, 1332 1326 .irqpending_quirk = 0, ··· 1348 1346 .check_int = check_int_v3, 1349 1347 .irq_control = irq_control_v3, 1350 1348 .get_ecc_status = get_ecc_status_v3, 1351 - .ecclayout_512 = &nandv2_hw_eccoob_smallpage, 1352 - .ecclayout_2k = &nandv2_hw_eccoob_largepage, 1353 - .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */ 1349 + .ooblayout = &mxc_v2_ooblayout_ops, 1354 1350 .select_chip = mxc_nand_select_chip_v1_v3, 1355 1351 .correct_data = mxc_nand_correct_data_v2_v3, 1356 1352 .irqpending_quirk = 0, ··· 1373 1373 .check_int = check_int_v3, 1374 1374 .irq_control = irq_control_v3, 1375 1375 .get_ecc_status = get_ecc_status_v3, 1376 - .ecclayout_512 = &nandv2_hw_eccoob_smallpage, 1377 - .ecclayout_2k = &nandv2_hw_eccoob_largepage, 1378 - .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */ 1376 + .ooblayout = &mxc_v2_ooblayout_ops, 1379 1377 .select_chip = mxc_nand_select_chip_v1_v3, 1380 1378 .correct_data = mxc_nand_correct_data_v2_v3, 1381 1379 .irqpending_quirk = 0, ··· 1459 1461 static int __init mxcnd_probe_dt(struct mxc_nand_host *host) 1460 1462 { 1461 1463 struct device_node *np = host->dev->of_node; 1462 - struct mxc_nand_platform_data *pdata = &host->pdata; 1463 1464 const struct of_device_id *of_id = 1464 1465 of_match_device(mxcnd_dt_ids, host->dev); 1465 - int buswidth; 1466 1466 1467 1467 if (!np) 1468 1468 return 1; 1469 - 1470 - if (of_get_nand_ecc_mode(np) >= 0) 1471 - pdata->hw_ecc = 1; 1472 - 1473 - pdata->flash_bbt = of_get_nand_on_flash_bbt(np); 1474 - 1475 - buswidth = of_get_nand_bus_width(np); 1476 - if (buswidth < 0) 1477 - return buswidth; 1478 - 1479 - pdata->width = buswidth / 8; 1480 1469 1481 1470 host->devtype_data = of_id->data; 1482 1471 ··· 1561 1576 1562 1577 this->select_chip = host->devtype_data->select_chip; 1563 1578 this->ecc.size = 512; 1564 - this->ecc.layout = host->devtype_data->ecclayout_512; 1579 + mtd_set_ooblayout(mtd, host->devtype_data->ooblayout); 1565 1580 1566 1581 if (host->pdata.hw_ecc) { 1567 - this->ecc.calculate = mxc_nand_calculate_ecc; 1568 - this->ecc.hwctl = mxc_nand_enable_hwecc; 1569 - this->ecc.correct = host->devtype_data->correct_data; 1570 1582 this->ecc.mode = NAND_ECC_HW; 1571 1583 } else { 1572 1584 this->ecc.mode = NAND_ECC_SOFT; 1585 + this->ecc.algo = NAND_ECC_HAMMING; 1573 1586 } 1574 1587 1575 1588 /* NAND bus width determines access functions used by upper layer */ 1576 1589 if (host->pdata.width == 2) 1577 1590 this->options |= NAND_BUSWIDTH_16; 1578 1591 1579 - if (host->pdata.flash_bbt) { 1580 - this->bbt_td = &bbt_main_descr; 1581 - this->bbt_md = &bbt_mirror_descr; 1582 - /* update flash based bbt */ 1592 + /* update flash based bbt */ 1593 + if (host->pdata.flash_bbt) 1583 1594 this->bbt_options |= NAND_BBT_USE_FLASH; 1584 - } 1585 1595 1586 1596 init_completion(&host->op_completion); 1587 1597 ··· 1617 1637 goto escan; 1618 1638 } 1619 1639 1640 + switch (this->ecc.mode) { 1641 + case NAND_ECC_HW: 1642 + this->ecc.calculate = mxc_nand_calculate_ecc; 1643 + this->ecc.hwctl = mxc_nand_enable_hwecc; 1644 + this->ecc.correct = host->devtype_data->correct_data; 1645 + break; 1646 + 1647 + case NAND_ECC_SOFT: 1648 + break; 1649 + 1650 + default: 1651 + err = -EINVAL; 1652 + goto escan; 1653 + } 1654 + 1655 + if (this->bbt_options & NAND_BBT_USE_FLASH) { 1656 + this->bbt_td = &bbt_main_descr; 1657 + this->bbt_md = &bbt_mirror_descr; 1658 + } 1659 + 1620 1660 /* allocate the right size buffer now */ 1621 1661 devm_kfree(&pdev->dev, (void *)host->data_buf); 1622 1662 host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize, ··· 1649 1649 /* Call preset again, with correct writesize this time */ 1650 1650 host->devtype_data->preset(mtd); 1651 1651 1652 - if (mtd->writesize == 2048) 1653 - this->ecc.layout = host->devtype_data->ecclayout_2k; 1654 - else if (mtd->writesize == 4096) { 1655 - this->ecc.layout = host->devtype_data->ecclayout_4k; 1656 - if (get_eccsize(mtd) == 8) 1657 - ecc_8bit_layout_4k(this->ecc.layout); 1652 + if (!this->ecc.bytes) { 1653 + if (host->eccsize == 8) 1654 + this->ecc.bytes = 18; 1655 + else if (host->eccsize == 4) 1656 + this->ecc.bytes = 9; 1658 1657 } 1659 1658 1660 1659 /*
+406 -233
drivers/mtd/nand/nand_base.c
··· 45 45 #include <linux/bitops.h> 46 46 #include <linux/io.h> 47 47 #include <linux/mtd/partitions.h> 48 - #include <linux/of_mtd.h> 49 - 50 - /* Define default oob placement schemes for large and small page devices */ 51 - static struct nand_ecclayout nand_oob_8 = { 52 - .eccbytes = 3, 53 - .eccpos = {0, 1, 2}, 54 - .oobfree = { 55 - {.offset = 3, 56 - .length = 2}, 57 - {.offset = 6, 58 - .length = 2} } 59 - }; 60 - 61 - static struct nand_ecclayout nand_oob_16 = { 62 - .eccbytes = 6, 63 - .eccpos = {0, 1, 2, 3, 6, 7}, 64 - .oobfree = { 65 - {.offset = 8, 66 - . length = 8} } 67 - }; 68 - 69 - static struct nand_ecclayout nand_oob_64 = { 70 - .eccbytes = 24, 71 - .eccpos = { 72 - 40, 41, 42, 43, 44, 45, 46, 47, 73 - 48, 49, 50, 51, 52, 53, 54, 55, 74 - 56, 57, 58, 59, 60, 61, 62, 63}, 75 - .oobfree = { 76 - {.offset = 2, 77 - .length = 38} } 78 - }; 79 - 80 - static struct nand_ecclayout nand_oob_128 = { 81 - .eccbytes = 48, 82 - .eccpos = { 83 - 80, 81, 82, 83, 84, 85, 86, 87, 84 - 88, 89, 90, 91, 92, 93, 94, 95, 85 - 96, 97, 98, 99, 100, 101, 102, 103, 86 - 104, 105, 106, 107, 108, 109, 110, 111, 87 - 112, 113, 114, 115, 116, 117, 118, 119, 88 - 120, 121, 122, 123, 124, 125, 126, 127}, 89 - .oobfree = { 90 - {.offset = 2, 91 - .length = 78} } 92 - }; 48 + #include <linux/of.h> 93 49 94 50 static int nand_get_device(struct mtd_info *mtd, int new_state); 95 51 96 52 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 97 53 struct mtd_oob_ops *ops); 54 + 55 + /* Define default oob placement schemes for large and small page devices */ 56 + static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, 57 + struct mtd_oob_region *oobregion) 58 + { 59 + struct nand_chip *chip = mtd_to_nand(mtd); 60 + struct nand_ecc_ctrl *ecc = &chip->ecc; 61 + 62 + if (section > 1) 63 + return -ERANGE; 64 + 65 + if (!section) { 66 + oobregion->offset = 0; 67 + oobregion->length = 4; 68 + } else { 69 + oobregion->offset = 6; 70 + oobregion->length = ecc->total - 4; 71 + } 72 + 73 + return 0; 74 + } 75 + 76 + static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section, 77 + struct mtd_oob_region *oobregion) 78 + { 79 + if (section > 1) 80 + return -ERANGE; 81 + 82 + if (mtd->oobsize == 16) { 83 + if (section) 84 + return -ERANGE; 85 + 86 + oobregion->length = 8; 87 + oobregion->offset = 8; 88 + } else { 89 + oobregion->length = 2; 90 + if (!section) 91 + oobregion->offset = 3; 92 + else 93 + oobregion->offset = 6; 94 + } 95 + 96 + return 0; 97 + } 98 + 99 + const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = { 100 + .ecc = nand_ooblayout_ecc_sp, 101 + .free = nand_ooblayout_free_sp, 102 + }; 103 + EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops); 104 + 105 + static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, 106 + struct mtd_oob_region *oobregion) 107 + { 108 + struct nand_chip *chip = mtd_to_nand(mtd); 109 + struct nand_ecc_ctrl *ecc = &chip->ecc; 110 + 111 + if (section) 112 + return -ERANGE; 113 + 114 + oobregion->length = ecc->total; 115 + oobregion->offset = mtd->oobsize - oobregion->length; 116 + 117 + return 0; 118 + } 119 + 120 + static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section, 121 + struct mtd_oob_region *oobregion) 122 + { 123 + struct nand_chip *chip = mtd_to_nand(mtd); 124 + struct nand_ecc_ctrl *ecc = &chip->ecc; 125 + 126 + if (section) 127 + return -ERANGE; 128 + 129 + oobregion->length = mtd->oobsize - ecc->total - 2; 130 + oobregion->offset = 2; 131 + 132 + return 0; 133 + } 134 + 135 + const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { 136 + .ecc = nand_ooblayout_ecc_lp, 137 + .free = nand_ooblayout_free_lp, 138 + }; 139 + EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); 98 140 99 141 static int check_offs_len(struct mtd_info *mtd, 100 142 loff_t ofs, uint64_t len) ··· 1321 1279 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1322 1280 uint8_t *buf, int oob_required, int page) 1323 1281 { 1324 - int i, eccsize = chip->ecc.size; 1282 + int i, eccsize = chip->ecc.size, ret; 1325 1283 int eccbytes = chip->ecc.bytes; 1326 1284 int eccsteps = chip->ecc.steps; 1327 1285 uint8_t *p = buf; 1328 1286 uint8_t *ecc_calc = chip->buffers->ecccalc; 1329 1287 uint8_t *ecc_code = chip->buffers->ecccode; 1330 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1331 1288 unsigned int max_bitflips = 0; 1332 1289 1333 1290 chip->ecc.read_page_raw(mtd, chip, buf, 1, page); ··· 1334 1293 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1335 1294 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1336 1295 1337 - for (i = 0; i < chip->ecc.total; i++) 1338 - ecc_code[i] = chip->oob_poi[eccpos[i]]; 1296 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 1297 + chip->ecc.total); 1298 + if (ret) 1299 + return ret; 1339 1300 1340 1301 eccsteps = chip->ecc.steps; 1341 1302 p = buf; ··· 1369 1326 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi, 1370 1327 int page) 1371 1328 { 1372 - int start_step, end_step, num_steps; 1373 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1329 + int start_step, end_step, num_steps, ret; 1374 1330 uint8_t *p; 1375 1331 int data_col_addr, i, gaps = 0; 1376 1332 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 1377 1333 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1378 - int index; 1334 + int index, section = 0; 1379 1335 unsigned int max_bitflips = 0; 1336 + struct mtd_oob_region oobregion = { }; 1380 1337 1381 1338 /* Column address within the page aligned to ECC size (256bytes) */ 1382 1339 start_step = data_offs / chip->ecc.size; ··· 1404 1361 * The performance is faster if we position offsets according to 1405 1362 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 1406 1363 */ 1407 - for (i = 0; i < eccfrag_len - 1; i++) { 1408 - if (eccpos[i + index] + 1 != eccpos[i + index + 1]) { 1409 - gaps = 1; 1410 - break; 1411 - } 1412 - } 1364 + ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion); 1365 + if (ret) 1366 + return ret; 1367 + 1368 + if (oobregion.length < eccfrag_len) 1369 + gaps = 1; 1370 + 1413 1371 if (gaps) { 1414 1372 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 1415 1373 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); ··· 1419 1375 * Send the command to read the particular ECC bytes take care 1420 1376 * about buswidth alignment in read_buf. 1421 1377 */ 1422 - aligned_pos = eccpos[index] & ~(busw - 1); 1378 + aligned_pos = oobregion.offset & ~(busw - 1); 1423 1379 aligned_len = eccfrag_len; 1424 - if (eccpos[index] & (busw - 1)) 1380 + if (oobregion.offset & (busw - 1)) 1425 1381 aligned_len++; 1426 - if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1)) 1382 + if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 1383 + (busw - 1)) 1427 1384 aligned_len++; 1428 1385 1429 1386 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 1430 - mtd->writesize + aligned_pos, -1); 1387 + mtd->writesize + aligned_pos, -1); 1431 1388 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len); 1432 1389 } 1433 1390 1434 - for (i = 0; i < eccfrag_len; i++) 1435 - chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]]; 1391 + ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode, 1392 + chip->oob_poi, index, eccfrag_len); 1393 + if (ret) 1394 + return ret; 1436 1395 1437 1396 p = bufpoi + data_col_addr; 1438 1397 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { ··· 1476 1429 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1477 1430 uint8_t *buf, int oob_required, int page) 1478 1431 { 1479 - int i, eccsize = chip->ecc.size; 1432 + int i, eccsize = chip->ecc.size, ret; 1480 1433 int eccbytes = chip->ecc.bytes; 1481 1434 int eccsteps = chip->ecc.steps; 1482 1435 uint8_t *p = buf; 1483 1436 uint8_t *ecc_calc = chip->buffers->ecccalc; 1484 1437 uint8_t *ecc_code = chip->buffers->ecccode; 1485 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1486 1438 unsigned int max_bitflips = 0; 1487 1439 1488 1440 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { ··· 1491 1445 } 1492 1446 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1493 1447 1494 - for (i = 0; i < chip->ecc.total; i++) 1495 - ecc_code[i] = chip->oob_poi[eccpos[i]]; 1448 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 1449 + chip->ecc.total); 1450 + if (ret) 1451 + return ret; 1496 1452 1497 1453 eccsteps = chip->ecc.steps; 1498 1454 p = buf; ··· 1539 1491 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1540 1492 struct nand_chip *chip, uint8_t *buf, int oob_required, int page) 1541 1493 { 1542 - int i, eccsize = chip->ecc.size; 1494 + int i, eccsize = chip->ecc.size, ret; 1543 1495 int eccbytes = chip->ecc.bytes; 1544 1496 int eccsteps = chip->ecc.steps; 1545 1497 uint8_t *p = buf; 1546 1498 uint8_t *ecc_code = chip->buffers->ecccode; 1547 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1548 1499 uint8_t *ecc_calc = chip->buffers->ecccalc; 1549 1500 unsigned int max_bitflips = 0; 1550 1501 ··· 1552 1505 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1553 1506 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 1554 1507 1555 - for (i = 0; i < chip->ecc.total; i++) 1556 - ecc_code[i] = chip->oob_poi[eccpos[i]]; 1508 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 1509 + chip->ecc.total); 1510 + if (ret) 1511 + return ret; 1557 1512 1558 1513 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1559 1514 int stat; ··· 1656 1607 1657 1608 /** 1658 1609 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 1659 - * @chip: nand chip structure 1610 + * @mtd: mtd info structure 1660 1611 * @oob: oob destination address 1661 1612 * @ops: oob ops structure 1662 1613 * @len: size of oob to transfer 1663 1614 */ 1664 - static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 1615 + static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob, 1665 1616 struct mtd_oob_ops *ops, size_t len) 1666 1617 { 1618 + struct nand_chip *chip = mtd_to_nand(mtd); 1619 + int ret; 1620 + 1667 1621 switch (ops->mode) { 1668 1622 1669 1623 case MTD_OPS_PLACE_OOB: ··· 1674 1622 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 1675 1623 return oob + len; 1676 1624 1677 - case MTD_OPS_AUTO_OOB: { 1678 - struct nand_oobfree *free = chip->ecc.layout->oobfree; 1679 - uint32_t boffs = 0, roffs = ops->ooboffs; 1680 - size_t bytes = 0; 1625 + case MTD_OPS_AUTO_OOB: 1626 + ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 1627 + ops->ooboffs, len); 1628 + BUG_ON(ret); 1629 + return oob + len; 1681 1630 1682 - for (; free->length && len; free++, len -= bytes) { 1683 - /* Read request not from offset 0? */ 1684 - if (unlikely(roffs)) { 1685 - if (roffs >= free->length) { 1686 - roffs -= free->length; 1687 - continue; 1688 - } 1689 - boffs = free->offset + roffs; 1690 - bytes = min_t(size_t, len, 1691 - (free->length - roffs)); 1692 - roffs = 0; 1693 - } else { 1694 - bytes = min_t(size_t, len, free->length); 1695 - boffs = free->offset; 1696 - } 1697 - memcpy(oob, chip->oob_poi + boffs, bytes); 1698 - oob += bytes; 1699 - } 1700 - return oob; 1701 - } 1702 1631 default: 1703 1632 BUG(); 1704 1633 } ··· 1813 1780 int toread = min(oobreadlen, max_oobsize); 1814 1781 1815 1782 if (toread) { 1816 - oob = nand_transfer_oob(chip, 1783 + oob = nand_transfer_oob(mtd, 1817 1784 oob, ops, toread); 1818 1785 oobreadlen -= toread; 1819 1786 } ··· 1926 1893 * @chip: nand chip info structure 1927 1894 * @page: page number to read 1928 1895 */ 1929 - static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1930 - int page) 1896 + int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page) 1931 1897 { 1932 1898 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 1933 1899 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1934 1900 return 0; 1935 1901 } 1902 + EXPORT_SYMBOL(nand_read_oob_std); 1936 1903 1937 1904 /** 1938 1905 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC ··· 1941 1908 * @chip: nand chip info structure 1942 1909 * @page: page number to read 1943 1910 */ 1944 - static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1945 - int page) 1911 + int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1912 + int page) 1946 1913 { 1947 1914 int length = mtd->oobsize; 1948 1915 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; ··· 1970 1937 1971 1938 return 0; 1972 1939 } 1940 + EXPORT_SYMBOL(nand_read_oob_syndrome); 1973 1941 1974 1942 /** 1975 1943 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function ··· 1978 1944 * @chip: nand chip info structure 1979 1945 * @page: page number to write 1980 1946 */ 1981 - static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1982 - int page) 1947 + int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page) 1983 1948 { 1984 1949 int status = 0; 1985 1950 const uint8_t *buf = chip->oob_poi; ··· 1993 1960 1994 1961 return status & NAND_STATUS_FAIL ? -EIO : 0; 1995 1962 } 1963 + EXPORT_SYMBOL(nand_write_oob_std); 1996 1964 1997 1965 /** 1998 1966 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC ··· 2002 1968 * @chip: nand chip info structure 2003 1969 * @page: page number to write 2004 1970 */ 2005 - static int nand_write_oob_syndrome(struct mtd_info *mtd, 2006 - struct nand_chip *chip, int page) 1971 + int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1972 + int page) 2007 1973 { 2008 1974 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 2009 1975 int eccsize = chip->ecc.size, length = mtd->oobsize; ··· 2053 2019 2054 2020 return status & NAND_STATUS_FAIL ? -EIO : 0; 2055 2021 } 2022 + EXPORT_SYMBOL(nand_write_oob_syndrome); 2056 2023 2057 2024 /** 2058 2025 * nand_do_read_oob - [INTERN] NAND read out-of-band ··· 2113 2078 break; 2114 2079 2115 2080 len = min(len, readlen); 2116 - buf = nand_transfer_oob(chip, buf, ops, len); 2081 + buf = nand_transfer_oob(mtd, buf, ops, len); 2117 2082 2118 2083 if (chip->options & NAND_NEED_READRDY) { 2119 2084 /* Apply delay or wait for ready/busy pin */ ··· 2272 2237 const uint8_t *buf, int oob_required, 2273 2238 int page) 2274 2239 { 2275 - int i, eccsize = chip->ecc.size; 2240 + int i, eccsize = chip->ecc.size, ret; 2276 2241 int eccbytes = chip->ecc.bytes; 2277 2242 int eccsteps = chip->ecc.steps; 2278 2243 uint8_t *ecc_calc = chip->buffers->ecccalc; 2279 2244 const uint8_t *p = buf; 2280 - uint32_t *eccpos = chip->ecc.layout->eccpos; 2281 2245 2282 2246 /* Software ECC calculation */ 2283 2247 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 2284 2248 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 2285 2249 2286 - for (i = 0; i < chip->ecc.total; i++) 2287 - chip->oob_poi[eccpos[i]] = ecc_calc[i]; 2250 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 2251 + chip->ecc.total); 2252 + if (ret) 2253 + return ret; 2288 2254 2289 2255 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page); 2290 2256 } ··· 2302 2266 const uint8_t *buf, int oob_required, 2303 2267 int page) 2304 2268 { 2305 - int i, eccsize = chip->ecc.size; 2269 + int i, eccsize = chip->ecc.size, ret; 2306 2270 int eccbytes = chip->ecc.bytes; 2307 2271 int eccsteps = chip->ecc.steps; 2308 2272 uint8_t *ecc_calc = chip->buffers->ecccalc; 2309 2273 const uint8_t *p = buf; 2310 - uint32_t *eccpos = chip->ecc.layout->eccpos; 2311 2274 2312 2275 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2313 2276 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); ··· 2314 2279 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 2315 2280 } 2316 2281 2317 - for (i = 0; i < chip->ecc.total; i++) 2318 - chip->oob_poi[eccpos[i]] = ecc_calc[i]; 2282 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 2283 + chip->ecc.total); 2284 + if (ret) 2285 + return ret; 2319 2286 2320 2287 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 2321 2288 ··· 2345 2308 int ecc_size = chip->ecc.size; 2346 2309 int ecc_bytes = chip->ecc.bytes; 2347 2310 int ecc_steps = chip->ecc.steps; 2348 - uint32_t *eccpos = chip->ecc.layout->eccpos; 2349 2311 uint32_t start_step = offset / ecc_size; 2350 2312 uint32_t end_step = (offset + data_len - 1) / ecc_size; 2351 2313 int oob_bytes = mtd->oobsize / ecc_steps; 2352 - int step, i; 2314 + int step, ret; 2353 2315 2354 2316 for (step = 0; step < ecc_steps; step++) { 2355 2317 /* configure controller for WRITE access */ ··· 2376 2340 /* copy calculated ECC for whole page to chip->buffer->oob */ 2377 2341 /* this include masked-value(0xFF) for unwritten subpages */ 2378 2342 ecc_calc = chip->buffers->ecccalc; 2379 - for (i = 0; i < chip->ecc.total; i++) 2380 - chip->oob_poi[eccpos[i]] = ecc_calc[i]; 2343 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 2344 + chip->ecc.total); 2345 + if (ret) 2346 + return ret; 2381 2347 2382 2348 /* write OOB buffer to NAND device */ 2383 2349 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); ··· 2516 2478 struct mtd_oob_ops *ops) 2517 2479 { 2518 2480 struct nand_chip *chip = mtd_to_nand(mtd); 2481 + int ret; 2519 2482 2520 2483 /* 2521 2484 * Initialise to all 0xFF, to avoid the possibility of left over OOB ··· 2531 2492 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 2532 2493 return oob + len; 2533 2494 2534 - case MTD_OPS_AUTO_OOB: { 2535 - struct nand_oobfree *free = chip->ecc.layout->oobfree; 2536 - uint32_t boffs = 0, woffs = ops->ooboffs; 2537 - size_t bytes = 0; 2495 + case MTD_OPS_AUTO_OOB: 2496 + ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 2497 + ops->ooboffs, len); 2498 + BUG_ON(ret); 2499 + return oob + len; 2538 2500 2539 - for (; free->length && len; free++, len -= bytes) { 2540 - /* Write request not from offset 0? */ 2541 - if (unlikely(woffs)) { 2542 - if (woffs >= free->length) { 2543 - woffs -= free->length; 2544 - continue; 2545 - } 2546 - boffs = free->offset + woffs; 2547 - bytes = min_t(size_t, len, 2548 - (free->length - woffs)); 2549 - woffs = 0; 2550 - } else { 2551 - bytes = min_t(size_t, len, free->length); 2552 - boffs = free->offset; 2553 - } 2554 - memcpy(chip->oob_poi + boffs, oob, bytes); 2555 - oob += bytes; 2556 - } 2557 - return oob; 2558 - } 2559 2501 default: 2560 2502 BUG(); 2561 2503 } ··· 3971 3951 return type; 3972 3952 } 3973 3953 3954 + static const char * const nand_ecc_modes[] = { 3955 + [NAND_ECC_NONE] = "none", 3956 + [NAND_ECC_SOFT] = "soft", 3957 + [NAND_ECC_HW] = "hw", 3958 + [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 3959 + [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", 3960 + }; 3961 + 3962 + static int of_get_nand_ecc_mode(struct device_node *np) 3963 + { 3964 + const char *pm; 3965 + int err, i; 3966 + 3967 + err = of_property_read_string(np, "nand-ecc-mode", &pm); 3968 + if (err < 0) 3969 + return err; 3970 + 3971 + for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) 3972 + if (!strcasecmp(pm, nand_ecc_modes[i])) 3973 + return i; 3974 + 3975 + /* 3976 + * For backward compatibility we support few obsoleted values that don't 3977 + * have their mappings into nand_ecc_modes_t anymore (they were merged 3978 + * with other enums). 3979 + */ 3980 + if (!strcasecmp(pm, "soft_bch")) 3981 + return NAND_ECC_SOFT; 3982 + 3983 + return -ENODEV; 3984 + } 3985 + 3986 + static const char * const nand_ecc_algos[] = { 3987 + [NAND_ECC_HAMMING] = "hamming", 3988 + [NAND_ECC_BCH] = "bch", 3989 + }; 3990 + 3991 + static int of_get_nand_ecc_algo(struct device_node *np) 3992 + { 3993 + const char *pm; 3994 + int err, i; 3995 + 3996 + err = of_property_read_string(np, "nand-ecc-algo", &pm); 3997 + if (!err) { 3998 + for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++) 3999 + if (!strcasecmp(pm, nand_ecc_algos[i])) 4000 + return i; 4001 + return -ENODEV; 4002 + } 4003 + 4004 + /* 4005 + * For backward compatibility we also read "nand-ecc-mode" checking 4006 + * for some obsoleted values that were specifying ECC algorithm. 4007 + */ 4008 + err = of_property_read_string(np, "nand-ecc-mode", &pm); 4009 + if (err < 0) 4010 + return err; 4011 + 4012 + if (!strcasecmp(pm, "soft")) 4013 + return NAND_ECC_HAMMING; 4014 + else if (!strcasecmp(pm, "soft_bch")) 4015 + return NAND_ECC_BCH; 4016 + 4017 + return -ENODEV; 4018 + } 4019 + 4020 + static int of_get_nand_ecc_step_size(struct device_node *np) 4021 + { 4022 + int ret; 4023 + u32 val; 4024 + 4025 + ret = of_property_read_u32(np, "nand-ecc-step-size", &val); 4026 + return ret ? ret : val; 4027 + } 4028 + 4029 + static int of_get_nand_ecc_strength(struct device_node *np) 4030 + { 4031 + int ret; 4032 + u32 val; 4033 + 4034 + ret = of_property_read_u32(np, "nand-ecc-strength", &val); 4035 + return ret ? ret : val; 4036 + } 4037 + 4038 + static int of_get_nand_bus_width(struct device_node *np) 4039 + { 4040 + u32 val; 4041 + 4042 + if (of_property_read_u32(np, "nand-bus-width", &val)) 4043 + return 8; 4044 + 4045 + switch (val) { 4046 + case 8: 4047 + case 16: 4048 + return val; 4049 + default: 4050 + return -EIO; 4051 + } 4052 + } 4053 + 4054 + static bool of_get_nand_on_flash_bbt(struct device_node *np) 4055 + { 4056 + return of_property_read_bool(np, "nand-on-flash-bbt"); 4057 + } 4058 + 3974 4059 static int nand_dt_init(struct nand_chip *chip) 3975 4060 { 3976 4061 struct device_node *dn = nand_get_flash_node(chip); 3977 - int ecc_mode, ecc_strength, ecc_step; 4062 + int ecc_mode, ecc_algo, ecc_strength, ecc_step; 3978 4063 3979 4064 if (!dn) 3980 4065 return 0; ··· 4091 3966 chip->bbt_options |= NAND_BBT_USE_FLASH; 4092 3967 4093 3968 ecc_mode = of_get_nand_ecc_mode(dn); 3969 + ecc_algo = of_get_nand_ecc_algo(dn); 4094 3970 ecc_strength = of_get_nand_ecc_strength(dn); 4095 3971 ecc_step = of_get_nand_ecc_step_size(dn); 4096 3972 ··· 4103 3977 4104 3978 if (ecc_mode >= 0) 4105 3979 chip->ecc.mode = ecc_mode; 3980 + 3981 + if (ecc_algo >= 0) 3982 + chip->ecc.algo = ecc_algo; 4106 3983 4107 3984 if (ecc_strength >= 0) 4108 3985 chip->ecc.strength = ecc_strength; ··· 4183 4054 } 4184 4055 EXPORT_SYMBOL(nand_scan_ident); 4185 4056 4057 + static int nand_set_ecc_soft_ops(struct mtd_info *mtd) 4058 + { 4059 + struct nand_chip *chip = mtd_to_nand(mtd); 4060 + struct nand_ecc_ctrl *ecc = &chip->ecc; 4061 + 4062 + if (WARN_ON(ecc->mode != NAND_ECC_SOFT)) 4063 + return -EINVAL; 4064 + 4065 + switch (ecc->algo) { 4066 + case NAND_ECC_HAMMING: 4067 + ecc->calculate = nand_calculate_ecc; 4068 + ecc->correct = nand_correct_data; 4069 + ecc->read_page = nand_read_page_swecc; 4070 + ecc->read_subpage = nand_read_subpage; 4071 + ecc->write_page = nand_write_page_swecc; 4072 + ecc->read_page_raw = nand_read_page_raw; 4073 + ecc->write_page_raw = nand_write_page_raw; 4074 + ecc->read_oob = nand_read_oob_std; 4075 + ecc->write_oob = nand_write_oob_std; 4076 + if (!ecc->size) 4077 + ecc->size = 256; 4078 + ecc->bytes = 3; 4079 + ecc->strength = 1; 4080 + return 0; 4081 + case NAND_ECC_BCH: 4082 + if (!mtd_nand_has_bch()) { 4083 + WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n"); 4084 + return -EINVAL; 4085 + } 4086 + ecc->calculate = nand_bch_calculate_ecc; 4087 + ecc->correct = nand_bch_correct_data; 4088 + ecc->read_page = nand_read_page_swecc; 4089 + ecc->read_subpage = nand_read_subpage; 4090 + ecc->write_page = nand_write_page_swecc; 4091 + ecc->read_page_raw = nand_read_page_raw; 4092 + ecc->write_page_raw = nand_write_page_raw; 4093 + ecc->read_oob = nand_read_oob_std; 4094 + ecc->write_oob = nand_write_oob_std; 4095 + /* 4096 + * Board driver should supply ecc.size and ecc.strength 4097 + * values to select how many bits are correctable. 4098 + * Otherwise, default to 4 bits for large page devices. 4099 + */ 4100 + if (!ecc->size && (mtd->oobsize >= 64)) { 4101 + ecc->size = 512; 4102 + ecc->strength = 4; 4103 + } 4104 + 4105 + /* 4106 + * if no ecc placement scheme was provided pickup the default 4107 + * large page one. 4108 + */ 4109 + if (!mtd->ooblayout) { 4110 + /* handle large page devices only */ 4111 + if (mtd->oobsize < 64) { 4112 + WARN(1, "OOB layout is required when using software BCH on small pages\n"); 4113 + return -EINVAL; 4114 + } 4115 + 4116 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 4117 + } 4118 + 4119 + /* See nand_bch_init() for details. */ 4120 + ecc->bytes = 0; 4121 + ecc->priv = nand_bch_init(mtd); 4122 + if (!ecc->priv) { 4123 + WARN(1, "BCH ECC initialization failed!\n"); 4124 + return -EINVAL; 4125 + } 4126 + return 0; 4127 + default: 4128 + WARN(1, "Unsupported ECC algorithm!\n"); 4129 + return -EINVAL; 4130 + } 4131 + } 4132 + 4186 4133 /* 4187 4134 * Check if the chip configuration meet the datasheet requirements. 4188 4135 ··· 4303 4098 */ 4304 4099 int nand_scan_tail(struct mtd_info *mtd) 4305 4100 { 4306 - int i; 4307 4101 struct nand_chip *chip = mtd_to_nand(mtd); 4308 4102 struct nand_ecc_ctrl *ecc = &chip->ecc; 4309 4103 struct nand_buffers *nbuf; 4104 + int ret; 4310 4105 4311 4106 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 4312 - BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 4313 - !(chip->bbt_options & NAND_BBT_USE_FLASH)); 4107 + if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 4108 + !(chip->bbt_options & NAND_BBT_USE_FLASH))) 4109 + return -EINVAL; 4314 4110 4315 4111 if (!(chip->options & NAND_OWN_BUFFERS)) { 4316 4112 nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize ··· 4334 4128 /* 4335 4129 * If no default placement scheme is given, select an appropriate one. 4336 4130 */ 4337 - if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) { 4131 + if (!mtd->ooblayout && 4132 + !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) { 4338 4133 switch (mtd->oobsize) { 4339 4134 case 8: 4340 - ecc->layout = &nand_oob_8; 4341 - break; 4342 4135 case 16: 4343 - ecc->layout = &nand_oob_16; 4136 + mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops); 4344 4137 break; 4345 4138 case 64: 4346 - ecc->layout = &nand_oob_64; 4347 - break; 4348 4139 case 128: 4349 - ecc->layout = &nand_oob_128; 4140 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 4350 4141 break; 4351 4142 default: 4352 - pr_warn("No oob scheme defined for oobsize %d\n", 4353 - mtd->oobsize); 4354 - BUG(); 4143 + WARN(1, "No oob scheme defined for oobsize %d\n", 4144 + mtd->oobsize); 4145 + ret = -EINVAL; 4146 + goto err_free; 4355 4147 } 4356 4148 } 4357 4149 ··· 4365 4161 case NAND_ECC_HW_OOB_FIRST: 4366 4162 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 4367 4163 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { 4368 - pr_warn("No ECC functions supplied; hardware ECC not possible\n"); 4369 - BUG(); 4164 + WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 4165 + ret = -EINVAL; 4166 + goto err_free; 4370 4167 } 4371 4168 if (!ecc->read_page) 4372 4169 ecc->read_page = nand_read_page_hwecc_oob_first; ··· 4397 4192 ecc->read_page == nand_read_page_hwecc || 4398 4193 !ecc->write_page || 4399 4194 ecc->write_page == nand_write_page_hwecc)) { 4400 - pr_warn("No ECC functions supplied; hardware ECC not possible\n"); 4401 - BUG(); 4195 + WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 4196 + ret = -EINVAL; 4197 + goto err_free; 4402 4198 } 4403 4199 /* Use standard syndrome read/write page function? */ 4404 4200 if (!ecc->read_page) ··· 4417 4211 4418 4212 if (mtd->writesize >= ecc->size) { 4419 4213 if (!ecc->strength) { 4420 - pr_warn("Driver must set ecc.strength when using hardware ECC\n"); 4421 - BUG(); 4214 + WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 4215 + ret = -EINVAL; 4216 + goto err_free; 4422 4217 } 4423 4218 break; 4424 4219 } 4425 4220 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 4426 4221 ecc->size, mtd->writesize); 4427 4222 ecc->mode = NAND_ECC_SOFT; 4223 + ecc->algo = NAND_ECC_HAMMING; 4428 4224 4429 4225 case NAND_ECC_SOFT: 4430 - ecc->calculate = nand_calculate_ecc; 4431 - ecc->correct = nand_correct_data; 4432 - ecc->read_page = nand_read_page_swecc; 4433 - ecc->read_subpage = nand_read_subpage; 4434 - ecc->write_page = nand_write_page_swecc; 4435 - ecc->read_page_raw = nand_read_page_raw; 4436 - ecc->write_page_raw = nand_write_page_raw; 4437 - ecc->read_oob = nand_read_oob_std; 4438 - ecc->write_oob = nand_write_oob_std; 4439 - if (!ecc->size) 4440 - ecc->size = 256; 4441 - ecc->bytes = 3; 4442 - ecc->strength = 1; 4443 - break; 4444 - 4445 - case NAND_ECC_SOFT_BCH: 4446 - if (!mtd_nand_has_bch()) { 4447 - pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n"); 4448 - BUG(); 4449 - } 4450 - ecc->calculate = nand_bch_calculate_ecc; 4451 - ecc->correct = nand_bch_correct_data; 4452 - ecc->read_page = nand_read_page_swecc; 4453 - ecc->read_subpage = nand_read_subpage; 4454 - ecc->write_page = nand_write_page_swecc; 4455 - ecc->read_page_raw = nand_read_page_raw; 4456 - ecc->write_page_raw = nand_write_page_raw; 4457 - ecc->read_oob = nand_read_oob_std; 4458 - ecc->write_oob = nand_write_oob_std; 4459 - /* 4460 - * Board driver should supply ecc.size and ecc.strength values 4461 - * to select how many bits are correctable. Otherwise, default 4462 - * to 4 bits for large page devices. 4463 - */ 4464 - if (!ecc->size && (mtd->oobsize >= 64)) { 4465 - ecc->size = 512; 4466 - ecc->strength = 4; 4467 - } 4468 - 4469 - /* See nand_bch_init() for details. */ 4470 - ecc->bytes = 0; 4471 - ecc->priv = nand_bch_init(mtd); 4472 - if (!ecc->priv) { 4473 - pr_warn("BCH ECC initialization failed!\n"); 4474 - BUG(); 4226 + ret = nand_set_ecc_soft_ops(mtd); 4227 + if (ret) { 4228 + ret = -EINVAL; 4229 + goto err_free; 4475 4230 } 4476 4231 break; 4477 4232 ··· 4450 4283 break; 4451 4284 4452 4285 default: 4453 - pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode); 4454 - BUG(); 4286 + WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode); 4287 + ret = -EINVAL; 4288 + goto err_free; 4455 4289 } 4456 4290 4457 4291 /* For many systems, the standard OOB write also works for raw */ ··· 4461 4293 if (!ecc->write_oob_raw) 4462 4294 ecc->write_oob_raw = ecc->write_oob; 4463 4295 4464 - /* 4465 - * The number of bytes available for a client to place data into 4466 - * the out of band area. 4467 - */ 4468 - mtd->oobavail = 0; 4469 - if (ecc->layout) { 4470 - for (i = 0; ecc->layout->oobfree[i].length; i++) 4471 - mtd->oobavail += ecc->layout->oobfree[i].length; 4472 - } 4473 - 4474 - /* ECC sanity check: warn if it's too weak */ 4475 - if (!nand_ecc_strength_good(mtd)) 4476 - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", 4477 - mtd->name); 4296 + /* propagate ecc info to mtd_info */ 4297 + mtd->ecc_strength = ecc->strength; 4298 + mtd->ecc_step_size = ecc->size; 4478 4299 4479 4300 /* 4480 4301 * Set the number of read / write steps for one page depending on ECC ··· 4471 4314 */ 4472 4315 ecc->steps = mtd->writesize / ecc->size; 4473 4316 if (ecc->steps * ecc->size != mtd->writesize) { 4474 - pr_warn("Invalid ECC parameters\n"); 4475 - BUG(); 4317 + WARN(1, "Invalid ECC parameters\n"); 4318 + ret = -EINVAL; 4319 + goto err_free; 4476 4320 } 4477 4321 ecc->total = ecc->steps * ecc->bytes; 4322 + 4323 + /* 4324 + * The number of bytes available for a client to place data into 4325 + * the out of band area. 4326 + */ 4327 + ret = mtd_ooblayout_count_freebytes(mtd); 4328 + if (ret < 0) 4329 + ret = 0; 4330 + 4331 + mtd->oobavail = ret; 4332 + 4333 + /* ECC sanity check: warn if it's too weak */ 4334 + if (!nand_ecc_strength_good(mtd)) 4335 + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", 4336 + mtd->name); 4478 4337 4479 4338 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 4480 4339 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { ··· 4516 4343 /* Large page NAND with SOFT_ECC should support subpage reads */ 4517 4344 switch (ecc->mode) { 4518 4345 case NAND_ECC_SOFT: 4519 - case NAND_ECC_SOFT_BCH: 4520 4346 if (chip->page_shift > 9) 4521 4347 chip->options |= NAND_SUBPAGE_READ; 4522 4348 break; ··· 4547 4375 mtd->_block_markbad = nand_block_markbad; 4548 4376 mtd->writebufsize = mtd->writesize; 4549 4377 4550 - /* propagate ecc info to mtd_info */ 4551 - mtd->ecclayout = ecc->layout; 4552 - mtd->ecc_strength = ecc->strength; 4553 - mtd->ecc_step_size = ecc->size; 4554 4378 /* 4555 4379 * Initialize bitflip_threshold to its default prior scan_bbt() call. 4556 4380 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be ··· 4561 4393 4562 4394 /* Build bad block table */ 4563 4395 return chip->scan_bbt(mtd); 4396 + err_free: 4397 + if (!(chip->options & NAND_OWN_BUFFERS)) 4398 + kfree(chip->buffers); 4399 + return ret; 4564 4400 } 4565 4401 EXPORT_SYMBOL(nand_scan_tail); 4566 4402 ··· 4608 4436 { 4609 4437 struct nand_chip *chip = mtd_to_nand(mtd); 4610 4438 4611 - if (chip->ecc.mode == NAND_ECC_SOFT_BCH) 4439 + if (chip->ecc.mode == NAND_ECC_SOFT && 4440 + chip->ecc.algo == NAND_ECC_BCH) 4612 4441 nand_bch_free((struct nand_bch_control *)chip->ecc.priv); 4613 4442 4614 4443 mtd_device_unregister(mtd);
+16 -32
drivers/mtd/nand/nand_bch.c
··· 32 32 /** 33 33 * struct nand_bch_control - private NAND BCH control structure 34 34 * @bch: BCH control structure 35 - * @ecclayout: private ecc layout for this BCH configuration 36 35 * @errloc: error location array 37 36 * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid 38 37 */ 39 38 struct nand_bch_control { 40 39 struct bch_control *bch; 41 - struct nand_ecclayout ecclayout; 42 40 unsigned int *errloc; 43 41 unsigned char *eccmask; 44 42 }; ··· 122 124 { 123 125 struct nand_chip *nand = mtd_to_nand(mtd); 124 126 unsigned int m, t, eccsteps, i; 125 - struct nand_ecclayout *layout = nand->ecc.layout; 126 127 struct nand_bch_control *nbc = NULL; 127 128 unsigned char *erased_page; 128 129 unsigned int eccsize = nand->ecc.size; ··· 158 161 159 162 eccsteps = mtd->writesize/eccsize; 160 163 161 - /* if no ecc placement scheme was provided, build one */ 162 - if (!layout) { 163 - 164 - /* handle large page devices only */ 165 - if (mtd->oobsize < 64) { 166 - printk(KERN_WARNING "must provide an oob scheme for " 167 - "oobsize %d\n", mtd->oobsize); 168 - goto fail; 169 - } 170 - 171 - layout = &nbc->ecclayout; 172 - layout->eccbytes = eccsteps*eccbytes; 173 - 174 - /* reserve 2 bytes for bad block marker */ 175 - if (layout->eccbytes+2 > mtd->oobsize) { 176 - printk(KERN_WARNING "no suitable oob scheme available " 177 - "for oobsize %d eccbytes %u\n", mtd->oobsize, 178 - eccbytes); 179 - goto fail; 180 - } 181 - /* put ecc bytes at oob tail */ 182 - for (i = 0; i < layout->eccbytes; i++) 183 - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; 184 - 185 - layout->oobfree[0].offset = 2; 186 - layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; 187 - 188 - nand->ecc.layout = layout; 164 + /* Check that we have an oob layout description. */ 165 + if (!mtd->ooblayout) { 166 + pr_warn("missing oob scheme"); 167 + goto fail; 189 168 } 190 169 191 170 /* sanity checks */ ··· 169 196 printk(KERN_WARNING "eccsize %u is too large\n", eccsize); 170 197 goto fail; 171 198 } 172 - if (layout->eccbytes != (eccsteps*eccbytes)) { 199 + 200 + /* 201 + * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(), 202 + * which is called by mtd_ooblayout_count_eccbytes(). 203 + * Make sure they are properly initialized before calling 204 + * mtd_ooblayout_count_eccbytes(). 205 + * FIXME: we should probably rework the sequencing in nand_scan_tail() 206 + * to avoid setting those fields twice. 207 + */ 208 + nand->ecc.steps = eccsteps; 209 + nand->ecc.total = eccsteps * eccbytes; 210 + if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) { 173 211 printk(KERN_WARNING "invalid ecc layout\n"); 174 212 goto fail; 175 213 }
+6 -4
drivers/mtd/nand/nandsim.c
··· 569 569 * 570 570 * RETURNS: 0 if success, -ENOMEM if memory alloc fails. 571 571 */ 572 - static int alloc_device(struct nandsim *ns) 572 + static int __init alloc_device(struct nandsim *ns) 573 573 { 574 574 struct file *cfile; 575 575 int i, err; ··· 654 654 } 655 655 } 656 656 657 - static char *get_partition_name(int i) 657 + static char __init *get_partition_name(int i) 658 658 { 659 659 return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i); 660 660 } ··· 664 664 * 665 665 * RETURNS: 0 if success, -ERRNO if failure. 666 666 */ 667 - static int init_nandsim(struct mtd_info *mtd) 667 + static int __init init_nandsim(struct mtd_info *mtd) 668 668 { 669 669 struct nand_chip *chip = mtd_to_nand(mtd); 670 670 struct nandsim *ns = nand_get_controller_data(chip); ··· 2261 2261 chip->read_buf = ns_nand_read_buf; 2262 2262 chip->read_word = ns_nand_read_word; 2263 2263 chip->ecc.mode = NAND_ECC_SOFT; 2264 + chip->ecc.algo = NAND_ECC_HAMMING; 2264 2265 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ 2265 2266 /* and 'badblocks' parameters to work */ 2266 2267 chip->options |= NAND_SKIP_BBTSCAN; ··· 2339 2338 retval = -EINVAL; 2340 2339 goto error; 2341 2340 } 2342 - chip->ecc.mode = NAND_ECC_SOFT_BCH; 2341 + chip->ecc.mode = NAND_ECC_SOFT; 2342 + chip->ecc.algo = NAND_ECC_BCH; 2343 2343 chip->ecc.size = 512; 2344 2344 chip->ecc.strength = bch; 2345 2345 chip->ecc.bytes = eccbytes;
+1
drivers/mtd/nand/nuc900_nand.c
··· 261 261 chip->chip_delay = 50; 262 262 chip->options = 0; 263 263 chip->ecc.mode = NAND_ECC_SOFT; 264 + chip->ecc.algo = NAND_ECC_HAMMING; 264 265 265 266 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 266 267 nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+299 -151
drivers/mtd/nand/omap2.c
··· 12 12 #include <linux/dmaengine.h> 13 13 #include <linux/dma-mapping.h> 14 14 #include <linux/delay.h> 15 + #include <linux/gpio/consumer.h> 15 16 #include <linux/module.h> 16 17 #include <linux/interrupt.h> 17 18 #include <linux/jiffies.h> ··· 29 28 #include <linux/mtd/nand_bch.h> 30 29 #include <linux/platform_data/elm.h> 31 30 31 + #include <linux/omap-gpmc.h> 32 32 #include <linux/platform_data/mtd-nand-omap2.h> 33 33 34 34 #define DRIVER_NAME "omap2-nand" ··· 153 151 }; 154 152 155 153 struct omap_nand_info { 156 - struct omap_nand_platform_data *pdata; 157 154 struct nand_chip nand; 158 155 struct platform_device *pdev; 159 156 160 157 int gpmc_cs; 161 - unsigned long phys_base; 158 + bool dev_ready; 159 + enum nand_io xfer_type; 160 + int devsize; 162 161 enum omap_ecc ecc_opt; 162 + struct device_node *elm_of_node; 163 + 164 + unsigned long phys_base; 163 165 struct completion comp; 164 166 struct dma_chan *dma; 165 167 int gpmc_irq_fifo; ··· 174 168 } iomode; 175 169 u_char *buf; 176 170 int buf_len; 171 + /* Interface to GPMC */ 177 172 struct gpmc_nand_regs reg; 178 - /* generated at runtime depending on ECC algorithm and layout selected */ 179 - struct nand_ecclayout oobinfo; 173 + struct gpmc_nand_ops *ops; 174 + bool flash_bbt; 180 175 /* fields specific for BCHx_HW ECC scheme */ 181 176 struct device *elm_dev; 182 - struct device_node *of_node; 177 + /* NAND ready gpio */ 178 + struct gpio_desc *ready_gpiod; 183 179 }; 184 180 185 181 static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd) ··· 216 208 */ 217 209 val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) | 218 210 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH | 219 - (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write)); 211 + (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1)); 220 212 writel(val, info->reg.gpmc_prefetch_config1); 221 213 222 214 /* Start the prefetch engine */ ··· 296 288 { 297 289 struct omap_nand_info *info = mtd_to_omap(mtd); 298 290 u_char *p = (u_char *)buf; 299 - u32 status = 0; 291 + bool status; 300 292 301 293 while (len--) { 302 294 iowrite8(*p++, info->nand.IO_ADDR_W); 303 295 /* wait until buffer is available for write */ 304 296 do { 305 - status = readl(info->reg.gpmc_status) & 306 - STATUS_BUFF_EMPTY; 297 + status = info->ops->nand_writebuffer_empty(); 307 298 } while (!status); 308 299 } 309 300 } ··· 330 323 { 331 324 struct omap_nand_info *info = mtd_to_omap(mtd); 332 325 u16 *p = (u16 *) buf; 333 - u32 status = 0; 326 + bool status; 334 327 /* FIXME try bursts of writesw() or DMA ... */ 335 328 len >>= 1; 336 329 ··· 338 331 iowrite16(*p++, info->nand.IO_ADDR_W); 339 332 /* wait until buffer is available for write */ 340 333 do { 341 - status = readl(info->reg.gpmc_status) & 342 - STATUS_BUFF_EMPTY; 334 + status = info->ops->nand_writebuffer_empty(); 343 335 } while (!status); 344 336 } 345 337 } ··· 473 467 int ret; 474 468 u32 val; 475 469 476 - if (addr >= high_memory) { 477 - struct page *p1; 478 - 479 - if (((size_t)addr & PAGE_MASK) != 480 - ((size_t)(addr + len - 1) & PAGE_MASK)) 481 - goto out_copy; 482 - p1 = vmalloc_to_page(addr); 483 - if (!p1) 484 - goto out_copy; 485 - addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 486 - } 470 + if (!virt_addr_valid(addr)) 471 + goto out_copy; 487 472 488 473 sg_init_one(&sg, addr, len); 489 474 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); ··· 494 497 tx->callback_param = &info->comp; 495 498 dmaengine_submit(tx); 496 499 500 + init_completion(&info->comp); 501 + 502 + /* setup and start DMA using dma_addr */ 503 + dma_async_issue_pending(info->dma); 504 + 497 505 /* configure and start prefetch transfer */ 498 506 ret = omap_prefetch_enable(info->gpmc_cs, 499 507 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info); ··· 506 504 /* PFPW engine is busy, use cpu copy method */ 507 505 goto out_copy_unmap; 508 506 509 - init_completion(&info->comp); 510 - dma_async_issue_pending(info->dma); 511 - 512 - /* setup and start DMA using dma_addr */ 513 507 wait_for_completion(&info->comp); 514 508 tim = 0; 515 509 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); ··· 1015 1017 } 1016 1018 1017 1019 /** 1018 - * omap_dev_ready - calls the platform specific dev_ready function 1020 + * omap_dev_ready - checks the NAND Ready GPIO line 1019 1021 * @mtd: MTD device structure 1022 + * 1023 + * Returns true if ready and false if busy. 1020 1024 */ 1021 1025 static int omap_dev_ready(struct mtd_info *mtd) 1022 1026 { 1023 - unsigned int val = 0; 1024 1027 struct omap_nand_info *info = mtd_to_omap(mtd); 1025 1028 1026 - val = readl(info->reg.gpmc_status); 1027 - 1028 - if ((val & 0x100) == 0x100) { 1029 - return 1; 1030 - } else { 1031 - return 0; 1032 - } 1029 + return gpiod_get_value(info->ready_gpiod); 1033 1030 } 1034 1031 1035 1032 /** ··· 1488 1495 static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 1489 1496 const uint8_t *buf, int oob_required, int page) 1490 1497 { 1491 - int i; 1498 + int ret; 1492 1499 uint8_t *ecc_calc = chip->buffers->ecccalc; 1493 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1494 1500 1495 1501 /* Enable GPMC ecc engine */ 1496 1502 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); ··· 1500 1508 /* Update ecc vector from GPMC result registers */ 1501 1509 chip->ecc.calculate(mtd, buf, &ecc_calc[0]); 1502 1510 1503 - for (i = 0; i < chip->ecc.total; i++) 1504 - chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1511 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 1512 + chip->ecc.total); 1513 + if (ret) 1514 + return ret; 1505 1515 1506 1516 /* Write ecc vector to OOB area */ 1507 1517 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); ··· 1530 1536 { 1531 1537 uint8_t *ecc_calc = chip->buffers->ecccalc; 1532 1538 uint8_t *ecc_code = chip->buffers->ecccode; 1533 - uint32_t *eccpos = chip->ecc.layout->eccpos; 1534 - uint8_t *oob = &chip->oob_poi[eccpos[0]]; 1535 - uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0]; 1536 - int stat; 1539 + int stat, ret; 1537 1540 unsigned int max_bitflips = 0; 1538 1541 1539 1542 /* Enable GPMC ecc engine */ ··· 1540 1549 chip->read_buf(mtd, buf, mtd->writesize); 1541 1550 1542 1551 /* Read oob bytes */ 1543 - chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1); 1544 - chip->read_buf(mtd, oob, chip->ecc.total); 1552 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 1553 + mtd->writesize + BADBLOCK_MARKER_LENGTH, -1); 1554 + chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH, 1555 + chip->ecc.total); 1545 1556 1546 1557 /* Calculate ecc bytes */ 1547 1558 chip->ecc.calculate(mtd, buf, ecc_calc); 1548 1559 1549 - memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total); 1560 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 1561 + chip->ecc.total); 1562 + if (ret) 1563 + return ret; 1550 1564 1551 1565 stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc); 1552 1566 ··· 1626 1630 "CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); 1627 1631 return false; 1628 1632 } 1629 - if (ecc_needs_elm && !is_elm_present(info, pdata->elm_of_node)) { 1633 + if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) { 1630 1634 dev_err(&info->pdev->dev, "ELM not available\n"); 1631 1635 return false; 1632 1636 } ··· 1634 1638 return true; 1635 1639 } 1636 1640 1641 + static const char * const nand_xfer_types[] = { 1642 + [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", 1643 + [NAND_OMAP_POLLED] = "polled", 1644 + [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma", 1645 + [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq", 1646 + }; 1647 + 1648 + static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info) 1649 + { 1650 + struct device_node *child = dev->of_node; 1651 + int i; 1652 + const char *s; 1653 + u32 cs; 1654 + 1655 + if (of_property_read_u32(child, "reg", &cs) < 0) { 1656 + dev_err(dev, "reg not found in DT\n"); 1657 + return -EINVAL; 1658 + } 1659 + 1660 + info->gpmc_cs = cs; 1661 + 1662 + /* detect availability of ELM module. Won't be present pre-OMAP4 */ 1663 + info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); 1664 + if (!info->elm_of_node) 1665 + dev_dbg(dev, "ti,elm-id not in DT\n"); 1666 + 1667 + /* select ecc-scheme for NAND */ 1668 + if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { 1669 + dev_err(dev, "ti,nand-ecc-opt not found\n"); 1670 + return -EINVAL; 1671 + } 1672 + 1673 + if (!strcmp(s, "sw")) { 1674 + info->ecc_opt = OMAP_ECC_HAM1_CODE_SW; 1675 + } else if (!strcmp(s, "ham1") || 1676 + !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) { 1677 + info->ecc_opt = OMAP_ECC_HAM1_CODE_HW; 1678 + } else if (!strcmp(s, "bch4")) { 1679 + if (info->elm_of_node) 1680 + info->ecc_opt = OMAP_ECC_BCH4_CODE_HW; 1681 + else 1682 + info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; 1683 + } else if (!strcmp(s, "bch8")) { 1684 + if (info->elm_of_node) 1685 + info->ecc_opt = OMAP_ECC_BCH8_CODE_HW; 1686 + else 1687 + info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; 1688 + } else if (!strcmp(s, "bch16")) { 1689 + info->ecc_opt = OMAP_ECC_BCH16_CODE_HW; 1690 + } else { 1691 + dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n"); 1692 + return -EINVAL; 1693 + } 1694 + 1695 + /* select data transfer mode */ 1696 + if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) { 1697 + for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) { 1698 + if (!strcasecmp(s, nand_xfer_types[i])) { 1699 + info->xfer_type = i; 1700 + return 0; 1701 + } 1702 + } 1703 + 1704 + dev_err(dev, "unrecognized value for ti,nand-xfer-type\n"); 1705 + return -EINVAL; 1706 + } 1707 + 1708 + return 0; 1709 + } 1710 + 1711 + static int omap_ooblayout_ecc(struct mtd_info *mtd, int section, 1712 + struct mtd_oob_region *oobregion) 1713 + { 1714 + struct omap_nand_info *info = mtd_to_omap(mtd); 1715 + struct nand_chip *chip = &info->nand; 1716 + int off = BADBLOCK_MARKER_LENGTH; 1717 + 1718 + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW && 1719 + !(chip->options & NAND_BUSWIDTH_16)) 1720 + off = 1; 1721 + 1722 + if (section) 1723 + return -ERANGE; 1724 + 1725 + oobregion->offset = off; 1726 + oobregion->length = chip->ecc.total; 1727 + 1728 + return 0; 1729 + } 1730 + 1731 + static int omap_ooblayout_free(struct mtd_info *mtd, int section, 1732 + struct mtd_oob_region *oobregion) 1733 + { 1734 + struct omap_nand_info *info = mtd_to_omap(mtd); 1735 + struct nand_chip *chip = &info->nand; 1736 + int off = BADBLOCK_MARKER_LENGTH; 1737 + 1738 + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW && 1739 + !(chip->options & NAND_BUSWIDTH_16)) 1740 + off = 1; 1741 + 1742 + if (section) 1743 + return -ERANGE; 1744 + 1745 + off += chip->ecc.total; 1746 + if (off >= mtd->oobsize) 1747 + return -ERANGE; 1748 + 1749 + oobregion->offset = off; 1750 + oobregion->length = mtd->oobsize - off; 1751 + 1752 + return 0; 1753 + } 1754 + 1755 + static const struct mtd_ooblayout_ops omap_ooblayout_ops = { 1756 + .ecc = omap_ooblayout_ecc, 1757 + .free = omap_ooblayout_free, 1758 + }; 1759 + 1760 + static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section, 1761 + struct mtd_oob_region *oobregion) 1762 + { 1763 + struct nand_chip *chip = mtd_to_nand(mtd); 1764 + int off = BADBLOCK_MARKER_LENGTH; 1765 + 1766 + if (section >= chip->ecc.steps) 1767 + return -ERANGE; 1768 + 1769 + /* 1770 + * When SW correction is employed, one OMAP specific marker byte is 1771 + * reserved after each ECC step. 1772 + */ 1773 + oobregion->offset = off + (section * (chip->ecc.bytes + 1)); 1774 + oobregion->length = chip->ecc.bytes; 1775 + 1776 + return 0; 1777 + } 1778 + 1779 + static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section, 1780 + struct mtd_oob_region *oobregion) 1781 + { 1782 + struct nand_chip *chip = mtd_to_nand(mtd); 1783 + int off = BADBLOCK_MARKER_LENGTH; 1784 + 1785 + if (section) 1786 + return -ERANGE; 1787 + 1788 + /* 1789 + * When SW correction is employed, one OMAP specific marker byte is 1790 + * reserved after each ECC step. 1791 + */ 1792 + off += ((chip->ecc.bytes + 1) * chip->ecc.steps); 1793 + if (off >= mtd->oobsize) 1794 + return -ERANGE; 1795 + 1796 + oobregion->offset = off; 1797 + oobregion->length = mtd->oobsize - off; 1798 + 1799 + return 0; 1800 + } 1801 + 1802 + static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = { 1803 + .ecc = omap_sw_ooblayout_ecc, 1804 + .free = omap_sw_ooblayout_free, 1805 + }; 1806 + 1637 1807 static int omap_nand_probe(struct platform_device *pdev) 1638 1808 { 1639 1809 struct omap_nand_info *info; 1640 - struct omap_nand_platform_data *pdata; 1810 + struct omap_nand_platform_data *pdata = NULL; 1641 1811 struct mtd_info *mtd; 1642 1812 struct nand_chip *nand_chip; 1643 - struct nand_ecclayout *ecclayout; 1644 1813 int err; 1645 - int i; 1646 1814 dma_cap_mask_t mask; 1647 1815 unsigned sig; 1648 - unsigned oob_index; 1649 1816 struct resource *res; 1650 - 1651 - pdata = dev_get_platdata(&pdev->dev); 1652 - if (pdata == NULL) { 1653 - dev_err(&pdev->dev, "platform data missing\n"); 1654 - return -ENODEV; 1655 - } 1817 + struct device *dev = &pdev->dev; 1818 + int min_oobbytes = BADBLOCK_MARKER_LENGTH; 1819 + int oobbytes_per_step; 1656 1820 1657 1821 info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info), 1658 1822 GFP_KERNEL); 1659 1823 if (!info) 1660 1824 return -ENOMEM; 1661 1825 1662 - platform_set_drvdata(pdev, info); 1826 + info->pdev = pdev; 1663 1827 1664 - info->pdev = pdev; 1665 - info->gpmc_cs = pdata->cs; 1666 - info->reg = pdata->reg; 1667 - info->of_node = pdata->of_node; 1668 - info->ecc_opt = pdata->ecc_opt; 1828 + if (dev->of_node) { 1829 + if (omap_get_dt_info(dev, info)) 1830 + return -EINVAL; 1831 + } else { 1832 + pdata = dev_get_platdata(&pdev->dev); 1833 + if (!pdata) { 1834 + dev_err(&pdev->dev, "platform data missing\n"); 1835 + return -EINVAL; 1836 + } 1837 + 1838 + info->gpmc_cs = pdata->cs; 1839 + info->reg = pdata->reg; 1840 + info->ecc_opt = pdata->ecc_opt; 1841 + if (pdata->dev_ready) 1842 + dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n"); 1843 + 1844 + info->xfer_type = pdata->xfer_type; 1845 + info->devsize = pdata->devsize; 1846 + info->elm_of_node = pdata->elm_of_node; 1847 + info->flash_bbt = pdata->flash_bbt; 1848 + } 1849 + 1850 + platform_set_drvdata(pdev, info); 1851 + info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); 1852 + if (!info->ops) { 1853 + dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); 1854 + return -ENODEV; 1855 + } 1856 + 1669 1857 nand_chip = &info->nand; 1670 1858 mtd = nand_to_mtd(nand_chip); 1671 1859 mtd->dev.parent = &pdev->dev; 1672 1860 nand_chip->ecc.priv = NULL; 1673 - nand_set_flash_node(nand_chip, pdata->of_node); 1861 + nand_set_flash_node(nand_chip, dev->of_node); 1674 1862 1675 1863 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1676 1864 nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); ··· 1868 1688 nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R; 1869 1689 nand_chip->cmd_ctrl = omap_hwcontrol; 1870 1690 1691 + info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb", 1692 + GPIOD_IN); 1693 + if (IS_ERR(info->ready_gpiod)) { 1694 + dev_err(dev, "failed to get ready gpio\n"); 1695 + return PTR_ERR(info->ready_gpiod); 1696 + } 1697 + 1871 1698 /* 1872 1699 * If RDY/BSY line is connected to OMAP then use the omap ready 1873 1700 * function and the generic nand_wait function which reads the status ··· 1882 1695 * chip delay which is slightly more than tR (AC Timing) of the NAND 1883 1696 * device and read status register until you get a failure or success 1884 1697 */ 1885 - if (pdata->dev_ready) { 1698 + if (info->ready_gpiod) { 1886 1699 nand_chip->dev_ready = omap_dev_ready; 1887 1700 nand_chip->chip_delay = 0; 1888 1701 } else { ··· 1890 1703 nand_chip->chip_delay = 50; 1891 1704 } 1892 1705 1893 - if (pdata->flash_bbt) 1894 - nand_chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1895 - else 1896 - nand_chip->options |= NAND_SKIP_BBTSCAN; 1706 + if (info->flash_bbt) 1707 + nand_chip->bbt_options |= NAND_BBT_USE_FLASH; 1897 1708 1898 1709 /* scan NAND device connected to chip controller */ 1899 - nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16; 1710 + nand_chip->options |= info->devsize & NAND_BUSWIDTH_16; 1900 1711 if (nand_scan_ident(mtd, 1, NULL)) { 1901 - dev_err(&info->pdev->dev, "scan failed, may be bus-width mismatch\n"); 1712 + dev_err(&info->pdev->dev, 1713 + "scan failed, may be bus-width mismatch\n"); 1902 1714 err = -ENXIO; 1903 1715 goto return_error; 1904 1716 } 1905 1717 1718 + if (nand_chip->bbt_options & NAND_BBT_USE_FLASH) 1719 + nand_chip->bbt_options |= NAND_BBT_NO_OOB; 1720 + else 1721 + nand_chip->options |= NAND_SKIP_BBTSCAN; 1722 + 1906 1723 /* re-populate low-level callbacks based on xfer modes */ 1907 - switch (pdata->xfer_type) { 1724 + switch (info->xfer_type) { 1908 1725 case NAND_OMAP_PREFETCH_POLLED: 1909 1726 nand_chip->read_buf = omap_read_buf_pref; 1910 1727 nand_chip->write_buf = omap_write_buf_pref; ··· 1988 1797 1989 1798 default: 1990 1799 dev_err(&pdev->dev, 1991 - "xfer_type(%d) not supported!\n", pdata->xfer_type); 1800 + "xfer_type(%d) not supported!\n", info->xfer_type); 1992 1801 err = -EINVAL; 1993 1802 goto return_error; 1994 1803 } ··· 2000 1809 2001 1810 /* 2002 1811 * Bail out earlier to let NAND_ECC_SOFT code create its own 2003 - * ecclayout instead of using ours. 1812 + * ooblayout instead of using ours. 2004 1813 */ 2005 1814 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) { 2006 1815 nand_chip->ecc.mode = NAND_ECC_SOFT; 1816 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 2007 1817 goto scan_tail; 2008 1818 } 2009 1819 2010 1820 /* populate MTD interface based on ECC scheme */ 2011 - ecclayout = &info->oobinfo; 2012 - nand_chip->ecc.layout = ecclayout; 2013 1821 switch (info->ecc_opt) { 2014 1822 case OMAP_ECC_HAM1_CODE_HW: 2015 1823 pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); ··· 2019 1829 nand_chip->ecc.calculate = omap_calculate_ecc; 2020 1830 nand_chip->ecc.hwctl = omap_enable_hwecc; 2021 1831 nand_chip->ecc.correct = omap_correct_data; 2022 - /* define ECC layout */ 2023 - ecclayout->eccbytes = nand_chip->ecc.bytes * 2024 - (mtd->writesize / 2025 - nand_chip->ecc.size); 2026 - if (nand_chip->options & NAND_BUSWIDTH_16) 2027 - oob_index = BADBLOCK_MARKER_LENGTH; 2028 - else 2029 - oob_index = 1; 2030 - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 2031 - ecclayout->eccpos[i] = oob_index; 2032 - /* no reserved-marker in ecclayout for this ecc-scheme */ 2033 - ecclayout->oobfree->offset = 2034 - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1832 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 1833 + oobbytes_per_step = nand_chip->ecc.bytes; 1834 + 1835 + if (!(nand_chip->options & NAND_BUSWIDTH_16)) 1836 + min_oobbytes = 1; 1837 + 2035 1838 break; 2036 1839 2037 1840 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: ··· 2036 1853 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2037 1854 nand_chip->ecc.correct = nand_bch_correct_data; 2038 1855 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2039 - /* define ECC layout */ 2040 - ecclayout->eccbytes = nand_chip->ecc.bytes * 2041 - (mtd->writesize / 2042 - nand_chip->ecc.size); 2043 - oob_index = BADBLOCK_MARKER_LENGTH; 2044 - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { 2045 - ecclayout->eccpos[i] = oob_index; 2046 - if (((i + 1) % nand_chip->ecc.bytes) == 0) 2047 - oob_index++; 2048 - } 2049 - /* include reserved-marker in ecclayout->oobfree calculation */ 2050 - ecclayout->oobfree->offset = 1 + 2051 - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1856 + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); 1857 + /* Reserve one byte for the OMAP marker */ 1858 + oobbytes_per_step = nand_chip->ecc.bytes + 1; 2052 1859 /* software bch library is used for locating errors */ 2053 1860 nand_chip->ecc.priv = nand_bch_init(mtd); 2054 1861 if (!nand_chip->ecc.priv) { ··· 2060 1887 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2061 1888 nand_chip->ecc.read_page = omap_read_page_bch; 2062 1889 nand_chip->ecc.write_page = omap_write_page_bch; 2063 - /* define ECC layout */ 2064 - ecclayout->eccbytes = nand_chip->ecc.bytes * 2065 - (mtd->writesize / 2066 - nand_chip->ecc.size); 2067 - oob_index = BADBLOCK_MARKER_LENGTH; 2068 - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 2069 - ecclayout->eccpos[i] = oob_index; 2070 - /* reserved marker already included in ecclayout->eccbytes */ 2071 - ecclayout->oobfree->offset = 2072 - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1890 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 1891 + oobbytes_per_step = nand_chip->ecc.bytes; 2073 1892 2074 1893 err = elm_config(info->elm_dev, BCH4_ECC, 2075 1894 mtd->writesize / nand_chip->ecc.size, ··· 2079 1914 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2080 1915 nand_chip->ecc.correct = nand_bch_correct_data; 2081 1916 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2082 - /* define ECC layout */ 2083 - ecclayout->eccbytes = nand_chip->ecc.bytes * 2084 - (mtd->writesize / 2085 - nand_chip->ecc.size); 2086 - oob_index = BADBLOCK_MARKER_LENGTH; 2087 - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { 2088 - ecclayout->eccpos[i] = oob_index; 2089 - if (((i + 1) % nand_chip->ecc.bytes) == 0) 2090 - oob_index++; 2091 - } 2092 - /* include reserved-marker in ecclayout->oobfree calculation */ 2093 - ecclayout->oobfree->offset = 1 + 2094 - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1917 + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); 1918 + /* Reserve one byte for the OMAP marker */ 1919 + oobbytes_per_step = nand_chip->ecc.bytes + 1; 2095 1920 /* software bch library is used for locating errors */ 2096 1921 nand_chip->ecc.priv = nand_bch_init(mtd); 2097 1922 if (!nand_chip->ecc.priv) { ··· 2103 1948 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2104 1949 nand_chip->ecc.read_page = omap_read_page_bch; 2105 1950 nand_chip->ecc.write_page = omap_write_page_bch; 1951 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 1952 + oobbytes_per_step = nand_chip->ecc.bytes; 2106 1953 2107 1954 err = elm_config(info->elm_dev, BCH8_ECC, 2108 1955 mtd->writesize / nand_chip->ecc.size, ··· 2112 1955 if (err < 0) 2113 1956 goto return_error; 2114 1957 2115 - /* define ECC layout */ 2116 - ecclayout->eccbytes = nand_chip->ecc.bytes * 2117 - (mtd->writesize / 2118 - nand_chip->ecc.size); 2119 - oob_index = BADBLOCK_MARKER_LENGTH; 2120 - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 2121 - ecclayout->eccpos[i] = oob_index; 2122 - /* reserved marker already included in ecclayout->eccbytes */ 2123 - ecclayout->oobfree->offset = 2124 - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 2125 1958 break; 2126 1959 2127 1960 case OMAP_ECC_BCH16_CODE_HW: ··· 2125 1978 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2126 1979 nand_chip->ecc.read_page = omap_read_page_bch; 2127 1980 nand_chip->ecc.write_page = omap_write_page_bch; 1981 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 1982 + oobbytes_per_step = nand_chip->ecc.bytes; 2128 1983 2129 1984 err = elm_config(info->elm_dev, BCH16_ECC, 2130 1985 mtd->writesize / nand_chip->ecc.size, ··· 2134 1985 if (err < 0) 2135 1986 goto return_error; 2136 1987 2137 - /* define ECC layout */ 2138 - ecclayout->eccbytes = nand_chip->ecc.bytes * 2139 - (mtd->writesize / 2140 - nand_chip->ecc.size); 2141 - oob_index = BADBLOCK_MARKER_LENGTH; 2142 - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 2143 - ecclayout->eccpos[i] = oob_index; 2144 - /* reserved marker already included in ecclayout->eccbytes */ 2145 - ecclayout->oobfree->offset = 2146 - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 2147 1988 break; 2148 1989 default: 2149 1990 dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n"); ··· 2141 2002 goto return_error; 2142 2003 } 2143 2004 2144 - /* all OOB bytes from oobfree->offset till end off OOB are free */ 2145 - ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; 2146 2005 /* check if NAND device's OOB is enough to store ECC signatures */ 2147 - if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { 2006 + min_oobbytes += (oobbytes_per_step * 2007 + (mtd->writesize / nand_chip->ecc.size)); 2008 + if (mtd->oobsize < min_oobbytes) { 2148 2009 dev_err(&info->pdev->dev, 2149 2010 "not enough OOB bytes required = %d, available=%d\n", 2150 - ecclayout->eccbytes, mtd->oobsize); 2011 + min_oobbytes, mtd->oobsize); 2151 2012 err = -EINVAL; 2152 2013 goto return_error; 2153 2014 } ··· 2159 2020 goto return_error; 2160 2021 } 2161 2022 2162 - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 2023 + if (dev->of_node) 2024 + mtd_device_register(mtd, NULL, 0); 2025 + else 2026 + mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 2163 2027 2164 2028 platform_set_drvdata(pdev, mtd); 2165 2029 ··· 2193 2051 return 0; 2194 2052 } 2195 2053 2054 + static const struct of_device_id omap_nand_ids[] = { 2055 + { .compatible = "ti,omap2-nand", }, 2056 + {}, 2057 + }; 2058 + 2196 2059 static struct platform_driver omap_nand_driver = { 2197 2060 .probe = omap_nand_probe, 2198 2061 .remove = omap_nand_remove, 2199 2062 .driver = { 2200 2063 .name = DRIVER_NAME, 2064 + .of_match_table = of_match_ptr(omap_nand_ids), 2201 2065 }, 2202 2066 }; 2203 2067
+1
drivers/mtd/nand/orion_nand.c
··· 130 130 nc->cmd_ctrl = orion_nand_cmd_ctrl; 131 131 nc->read_buf = orion_nand_read_buf; 132 132 nc->ecc.mode = NAND_ECC_SOFT; 133 + nc->ecc.algo = NAND_ECC_HAMMING; 133 134 134 135 if (board->chip_delay) 135 136 nc->chip_delay = board->chip_delay;
+8 -8
drivers/mtd/nand/pasemi_nand.c
··· 92 92 93 93 static int pasemi_nand_probe(struct platform_device *ofdev) 94 94 { 95 + struct device *dev = &ofdev->dev; 95 96 struct pci_dev *pdev; 96 - struct device_node *np = ofdev->dev.of_node; 97 + struct device_node *np = dev->of_node; 97 98 struct resource res; 98 99 struct nand_chip *chip; 99 100 int err = 0; ··· 108 107 if (pasemi_nand_mtd) 109 108 return -ENODEV; 110 109 111 - pr_debug("pasemi_nand at %pR\n", &res); 110 + dev_dbg(dev, "pasemi_nand at %pR\n", &res); 112 111 113 112 /* Allocate memory for MTD device structure and private data */ 114 113 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); 115 114 if (!chip) { 116 - printk(KERN_WARNING 117 - "Unable to allocate PASEMI NAND MTD device structure\n"); 118 115 err = -ENOMEM; 119 116 goto out; 120 117 } ··· 120 121 pasemi_nand_mtd = nand_to_mtd(chip); 121 122 122 123 /* Link the private data with the MTD structure */ 123 - pasemi_nand_mtd->dev.parent = &ofdev->dev; 124 + pasemi_nand_mtd->dev.parent = dev; 124 125 125 126 chip->IO_ADDR_R = of_iomap(np, 0); 126 127 chip->IO_ADDR_W = chip->IO_ADDR_R; ··· 150 151 chip->write_buf = pasemi_write_buf; 151 152 chip->chip_delay = 0; 152 153 chip->ecc.mode = NAND_ECC_SOFT; 154 + chip->ecc.algo = NAND_ECC_HAMMING; 153 155 154 156 /* Enable the following for a flash based bad block table */ 155 157 chip->bbt_options = NAND_BBT_USE_FLASH; ··· 162 162 } 163 163 164 164 if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { 165 - printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); 165 + dev_err(dev, "Unable to register MTD device\n"); 166 166 err = -ENODEV; 167 167 goto out_lpc; 168 168 } 169 169 170 - printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n", 171 - res.start, lpcctl); 170 + dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, 171 + lpcctl); 172 172 173 173 return 0; 174 174
+1
drivers/mtd/nand/plat_nand.c
··· 74 74 75 75 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; 76 76 data->chip.ecc.mode = NAND_ECC_SOFT; 77 + data->chip.ecc.algo = NAND_ECC_HAMMING; 77 78 78 79 platform_set_drvdata(pdev, data); 79 80
+75 -57
drivers/mtd/nand/pxa3xx_nand.c
··· 29 29 #include <linux/slab.h> 30 30 #include <linux/of.h> 31 31 #include <linux/of_device.h> 32 - #include <linux/of_mtd.h> 33 32 #include <linux/platform_data/mtd-nand-pxa3xx.h> 34 33 35 34 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) ··· 323 324 { 0xba20, 16, 16, &timing[3] }, 324 325 }; 325 326 327 + static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section, 328 + struct mtd_oob_region *oobregion) 329 + { 330 + struct nand_chip *chip = mtd_to_nand(mtd); 331 + struct pxa3xx_nand_host *host = nand_get_controller_data(chip); 332 + struct pxa3xx_nand_info *info = host->info_data; 333 + int nchunks = mtd->writesize / info->chunk_size; 334 + 335 + if (section >= nchunks) 336 + return -ERANGE; 337 + 338 + oobregion->offset = ((info->ecc_size + info->spare_size) * section) + 339 + info->spare_size; 340 + oobregion->length = info->ecc_size; 341 + 342 + return 0; 343 + } 344 + 345 + static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section, 346 + struct mtd_oob_region *oobregion) 347 + { 348 + struct nand_chip *chip = mtd_to_nand(mtd); 349 + struct pxa3xx_nand_host *host = nand_get_controller_data(chip); 350 + struct pxa3xx_nand_info *info = host->info_data; 351 + int nchunks = mtd->writesize / info->chunk_size; 352 + 353 + if (section >= nchunks) 354 + return -ERANGE; 355 + 356 + if (!info->spare_size) 357 + return 0; 358 + 359 + oobregion->offset = section * (info->ecc_size + info->spare_size); 360 + oobregion->length = info->spare_size; 361 + if (!section) { 362 + /* 363 + * Bootrom looks in bytes 0 & 5 for bad blocks for the 364 + * 4KB page / 4bit BCH combination. 365 + */ 366 + if (mtd->writesize == 4096 && info->chunk_size == 2048) { 367 + oobregion->offset += 6; 368 + oobregion->length -= 6; 369 + } else { 370 + oobregion->offset += 2; 371 + oobregion->length -= 2; 372 + } 373 + } 374 + 375 + return 0; 376 + } 377 + 378 + static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = { 379 + .ecc = pxa3xx_ooblayout_ecc, 380 + .free = pxa3xx_ooblayout_free, 381 + }; 382 + 326 383 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' }; 327 384 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' }; 328 385 ··· 400 345 .veroffs = 14, 401 346 .maxblocks = 8, /* Last 8 blocks in each chip */ 402 347 .pattern = bbt_mirror_pattern 403 - }; 404 - 405 - static struct nand_ecclayout ecc_layout_2KB_bch4bit = { 406 - .eccbytes = 32, 407 - .eccpos = { 408 - 32, 33, 34, 35, 36, 37, 38, 39, 409 - 40, 41, 42, 43, 44, 45, 46, 47, 410 - 48, 49, 50, 51, 52, 53, 54, 55, 411 - 56, 57, 58, 59, 60, 61, 62, 63}, 412 - .oobfree = { {2, 30} } 413 - }; 414 - 415 - static struct nand_ecclayout ecc_layout_4KB_bch4bit = { 416 - .eccbytes = 64, 417 - .eccpos = { 418 - 32, 33, 34, 35, 36, 37, 38, 39, 419 - 40, 41, 42, 43, 44, 45, 46, 47, 420 - 48, 49, 50, 51, 52, 53, 54, 55, 421 - 56, 57, 58, 59, 60, 61, 62, 63, 422 - 96, 97, 98, 99, 100, 101, 102, 103, 423 - 104, 105, 106, 107, 108, 109, 110, 111, 424 - 112, 113, 114, 115, 116, 117, 118, 119, 425 - 120, 121, 122, 123, 124, 125, 126, 127}, 426 - /* Bootrom looks in bytes 0 & 5 for bad blocks */ 427 - .oobfree = { {6, 26}, { 64, 32} } 428 - }; 429 - 430 - static struct nand_ecclayout ecc_layout_4KB_bch8bit = { 431 - .eccbytes = 128, 432 - .eccpos = { 433 - 32, 33, 34, 35, 36, 37, 38, 39, 434 - 40, 41, 42, 43, 44, 45, 46, 47, 435 - 48, 49, 50, 51, 52, 53, 54, 55, 436 - 56, 57, 58, 59, 60, 61, 62, 63}, 437 - .oobfree = { } 438 348 }; 439 349 440 350 #define NDTR0_tCH(c) (min((c), 7) << 19) ··· 1566 1546 } 1567 1547 1568 1548 static int pxa_ecc_init(struct pxa3xx_nand_info *info, 1569 - struct nand_ecc_ctrl *ecc, 1549 + struct mtd_info *mtd, 1570 1550 int strength, int ecc_stepsize, int page_size) 1571 1551 { 1552 + struct nand_chip *chip = mtd_to_nand(mtd); 1553 + struct nand_ecc_ctrl *ecc = &chip->ecc; 1554 + 1572 1555 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { 1573 1556 info->nfullchunks = 1; 1574 1557 info->ntotalchunks = 1; ··· 1605 1582 info->ecc_size = 32; 1606 1583 ecc->mode = NAND_ECC_HW; 1607 1584 ecc->size = info->chunk_size; 1608 - ecc->layout = &ecc_layout_2KB_bch4bit; 1585 + mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops); 1609 1586 ecc->strength = 16; 1610 1587 1611 1588 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { ··· 1617 1594 info->ecc_size = 32; 1618 1595 ecc->mode = NAND_ECC_HW; 1619 1596 ecc->size = info->chunk_size; 1620 - ecc->layout = &ecc_layout_4KB_bch4bit; 1597 + mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops); 1621 1598 ecc->strength = 16; 1622 1599 1623 1600 /* ··· 1635 1612 info->ecc_size = 32; 1636 1613 ecc->mode = NAND_ECC_HW; 1637 1614 ecc->size = info->chunk_size; 1638 - ecc->layout = &ecc_layout_4KB_bch8bit; 1615 + mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops); 1639 1616 ecc->strength = 16; 1640 1617 } else { 1641 1618 dev_err(&info->pdev->dev, ··· 1674 1651 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) 1675 1652 nand_writel(info, NDECCCTRL, 0x0); 1676 1653 1654 + if (pdata->flash_bbt) 1655 + chip->bbt_options |= NAND_BBT_USE_FLASH; 1656 + 1657 + chip->ecc.strength = pdata->ecc_strength; 1658 + chip->ecc.size = pdata->ecc_step_size; 1659 + 1677 1660 if (nand_scan_ident(mtd, 1, NULL)) 1678 1661 return -ENODEV; 1679 1662 ··· 1692 1663 } 1693 1664 } 1694 1665 1695 - if (pdata->flash_bbt) { 1666 + if (chip->bbt_options & NAND_BBT_USE_FLASH) { 1696 1667 /* 1697 1668 * We'll use a bad block table stored in-flash and don't 1698 1669 * allow writing the bad block marker to the flash. 1699 1670 */ 1700 - chip->bbt_options |= NAND_BBT_USE_FLASH | 1701 - NAND_BBT_NO_OOB_BBM; 1671 + chip->bbt_options |= NAND_BBT_NO_OOB_BBM; 1702 1672 chip->bbt_td = &bbt_main_descr; 1703 1673 chip->bbt_md = &bbt_mirror_descr; 1704 1674 } ··· 1717 1689 } 1718 1690 } 1719 1691 1720 - if (pdata->ecc_strength && pdata->ecc_step_size) { 1721 - ecc_strength = pdata->ecc_strength; 1722 - ecc_step = pdata->ecc_step_size; 1723 - } else { 1692 + ecc_strength = chip->ecc.strength; 1693 + ecc_step = chip->ecc.size; 1694 + if (!ecc_strength || !ecc_step) { 1724 1695 ecc_strength = chip->ecc_strength_ds; 1725 1696 ecc_step = chip->ecc_step_ds; 1726 1697 } ··· 1730 1703 ecc_step = 512; 1731 1704 } 1732 1705 1733 - ret = pxa_ecc_init(info, &chip->ecc, ecc_strength, 1706 + ret = pxa_ecc_init(info, mtd, ecc_strength, 1734 1707 ecc_step, mtd->writesize); 1735 1708 if (ret) 1736 1709 return ret; ··· 1930 1903 if (of_get_property(np, "marvell,nand-keep-config", NULL)) 1931 1904 pdata->keep_config = 1; 1932 1905 of_property_read_u32(np, "num-cs", &pdata->num_cs); 1933 - pdata->flash_bbt = of_get_nand_on_flash_bbt(np); 1934 - 1935 - pdata->ecc_strength = of_get_nand_ecc_strength(np); 1936 - if (pdata->ecc_strength < 0) 1937 - pdata->ecc_strength = 0; 1938 - 1939 - pdata->ecc_step_size = of_get_nand_ecc_step_size(np); 1940 - if (pdata->ecc_step_size < 0) 1941 - pdata->ecc_step_size = 0; 1942 1906 1943 1907 pdev->dev.platform_data = pdata; 1944 1908
+41 -55
drivers/mtd/nand/qcom_nandc.c
··· 21 21 #include <linux/mtd/partitions.h> 22 22 #include <linux/of.h> 23 23 #include <linux/of_device.h> 24 - #include <linux/of_mtd.h> 25 24 #include <linux/delay.h> 26 25 27 26 /* NANDc reg offsets */ ··· 1436 1437 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1437 1438 struct nand_ecc_ctrl *ecc = &chip->ecc; 1438 1439 u8 *oob = chip->oob_poi; 1439 - int free_boff; 1440 1440 int data_size, oob_size; 1441 1441 int ret, status = 0; 1442 1442 ··· 1449 1451 1450 1452 /* calculate the data and oob size for the last codeword/step */ 1451 1453 data_size = ecc->size - ((ecc->steps - 1) << 2); 1452 - oob_size = ecc->steps << 2; 1453 - 1454 - free_boff = ecc->layout->oobfree[0].offset; 1454 + oob_size = mtd->oobavail; 1455 1455 1456 1456 /* override new oob content to last codeword */ 1457 - memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size); 1457 + mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob, 1458 + 0, mtd->oobavail); 1458 1459 1459 1460 set_address(host, host->cw_size * (ecc->steps - 1), page); 1460 1461 update_rw_regs(host, 1, false); ··· 1707 1710 * This layout is read as is when ECC is disabled. When ECC is enabled, the 1708 1711 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob, 1709 1712 * and assumed as 0xffs when we read a page/oob. The ECC, unused and 1710 - * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e, 1711 - * ecc->bytes is the sum of the three). 1713 + * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is 1714 + * the sum of the three). 1712 1715 */ 1713 - 1714 - static struct nand_ecclayout * 1715 - qcom_nand_create_layout(struct qcom_nand_host *host) 1716 + static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section, 1717 + struct mtd_oob_region *oobregion) 1716 1718 { 1717 - struct nand_chip *chip = &host->chip; 1718 - struct mtd_info *mtd = nand_to_mtd(chip); 1719 - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1719 + struct nand_chip *chip = mtd_to_nand(mtd); 1720 + struct qcom_nand_host *host = to_qcom_nand_host(chip); 1720 1721 struct nand_ecc_ctrl *ecc = &chip->ecc; 1721 - struct nand_ecclayout *layout; 1722 - int i, j, steps, pos = 0, shift = 0; 1723 1722 1724 - layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL); 1725 - if (!layout) 1726 - return NULL; 1723 + if (section > 1) 1724 + return -ERANGE; 1727 1725 1728 - steps = mtd->writesize / ecc->size; 1729 - layout->eccbytes = steps * ecc->bytes; 1730 - 1731 - layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size; 1732 - layout->oobfree[0].length = steps << 2; 1733 - 1734 - /* 1735 - * the oob bytes in the first n - 1 codewords are all grouped together 1736 - * in the format: 1737 - * DUMMY_BBM + UNUSED + ECC 1738 - */ 1739 - for (i = 0; i < steps - 1; i++) { 1740 - for (j = 0; j < ecc->bytes; j++) 1741 - layout->eccpos[pos++] = i * ecc->bytes + j; 1726 + if (!section) { 1727 + oobregion->length = (ecc->bytes * (ecc->steps - 1)) + 1728 + host->bbm_size; 1729 + oobregion->offset = 0; 1730 + } else { 1731 + oobregion->length = host->ecc_bytes_hw + host->spare_bytes; 1732 + oobregion->offset = mtd->oobsize - oobregion->length; 1742 1733 } 1743 1734 1744 - /* 1745 - * the oob bytes in the last codeword are grouped in the format: 1746 - * BBM + FREE OOB + UNUSED + ECC 1747 - */ 1748 - 1749 - /* fill up the bbm positions */ 1750 - for (j = 0; j < host->bbm_size; j++) 1751 - layout->eccpos[pos++] = i * ecc->bytes + j; 1752 - 1753 - /* 1754 - * fill up the ecc and reserved positions, their indices are offseted 1755 - * by the free oob region 1756 - */ 1757 - shift = layout->oobfree[0].length + host->bbm_size; 1758 - 1759 - for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++) 1760 - layout->eccpos[pos++] = i * ecc->bytes + shift + j; 1761 - 1762 - return layout; 1735 + return 0; 1763 1736 } 1737 + 1738 + static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section, 1739 + struct mtd_oob_region *oobregion) 1740 + { 1741 + struct nand_chip *chip = mtd_to_nand(mtd); 1742 + struct qcom_nand_host *host = to_qcom_nand_host(chip); 1743 + struct nand_ecc_ctrl *ecc = &chip->ecc; 1744 + 1745 + if (section) 1746 + return -ERANGE; 1747 + 1748 + oobregion->length = ecc->steps * 4; 1749 + oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size; 1750 + 1751 + return 0; 1752 + } 1753 + 1754 + static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = { 1755 + .ecc = qcom_nand_ooblayout_ecc, 1756 + .free = qcom_nand_ooblayout_free, 1757 + }; 1764 1758 1765 1759 static int qcom_nand_host_setup(struct qcom_nand_host *host) 1766 1760 { ··· 1839 1851 1840 1852 ecc->mode = NAND_ECC_HW; 1841 1853 1842 - ecc->layout = qcom_nand_create_layout(host); 1843 - if (!ecc->layout) 1844 - return -ENOMEM; 1854 + mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops); 1845 1855 1846 1856 cwperpage = mtd->writesize / ecc->size; 1847 1857
+30 -6
drivers/mtd/nand/s3c2410.c
··· 84 84 85 85 /* new oob placement block for use with hardware ecc generation 86 86 */ 87 + static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section, 88 + struct mtd_oob_region *oobregion) 89 + { 90 + if (section) 91 + return -ERANGE; 87 92 88 - static struct nand_ecclayout nand_hw_eccoob = { 89 - .eccbytes = 3, 90 - .eccpos = {0, 1, 2}, 91 - .oobfree = {{8, 8}} 93 + oobregion->offset = 0; 94 + oobregion->length = 3; 95 + 96 + return 0; 97 + } 98 + 99 + static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section, 100 + struct mtd_oob_region *oobregion) 101 + { 102 + if (section) 103 + return -ERANGE; 104 + 105 + oobregion->offset = 8; 106 + oobregion->length = 8; 107 + 108 + return 0; 109 + } 110 + 111 + static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = { 112 + .ecc = s3c2410_ooblayout_ecc, 113 + .free = s3c2410_ooblayout_free, 92 114 }; 93 115 94 116 /* controller and mtd information */ ··· 564 542 diff0 |= (diff1 << 8); 565 543 diff0 |= (diff2 << 16); 566 544 567 - if ((diff0 & ~(1<<fls(diff0))) == 0) 545 + /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */ 546 + if ((diff0 & (diff0 - 1)) == 0) 568 547 return 1; 569 548 570 549 return -1; ··· 882 859 } 883 860 #else 884 861 chip->ecc.mode = NAND_ECC_SOFT; 862 + chip->ecc.algo = NAND_ECC_HAMMING; 885 863 #endif 886 864 887 865 if (set->disable_ecc) ··· 943 919 } else { 944 920 chip->ecc.size = 512; 945 921 chip->ecc.bytes = 3; 946 - chip->ecc.layout = &nand_hw_eccoob; 922 + mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops); 947 923 } 948 924 } 949 925
+80 -35
drivers/mtd/nand/sh_flctl.c
··· 31 31 #include <linux/io.h> 32 32 #include <linux/of.h> 33 33 #include <linux/of_device.h> 34 - #include <linux/of_mtd.h> 35 34 #include <linux/platform_device.h> 36 35 #include <linux/pm_runtime.h> 37 36 #include <linux/sh_dma.h> ··· 42 43 #include <linux/mtd/partitions.h> 43 44 #include <linux/mtd/sh_flctl.h> 44 45 45 - static struct nand_ecclayout flctl_4secc_oob_16 = { 46 - .eccbytes = 10, 47 - .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 48 - .oobfree = { 49 - {.offset = 12, 50 - . length = 4} }, 46 + static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section, 47 + struct mtd_oob_region *oobregion) 48 + { 49 + struct nand_chip *chip = mtd_to_nand(mtd); 50 + 51 + if (section) 52 + return -ERANGE; 53 + 54 + oobregion->offset = 0; 55 + oobregion->length = chip->ecc.bytes; 56 + 57 + return 0; 58 + } 59 + 60 + static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section, 61 + struct mtd_oob_region *oobregion) 62 + { 63 + if (section) 64 + return -ERANGE; 65 + 66 + oobregion->offset = 12; 67 + oobregion->length = 4; 68 + 69 + return 0; 70 + } 71 + 72 + static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = { 73 + .ecc = flctl_4secc_ooblayout_sp_ecc, 74 + .free = flctl_4secc_ooblayout_sp_free, 51 75 }; 52 76 53 - static struct nand_ecclayout flctl_4secc_oob_64 = { 54 - .eccbytes = 4 * 10, 55 - .eccpos = { 56 - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 57 - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 58 - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 59 - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, 60 - .oobfree = { 61 - {.offset = 2, .length = 4}, 62 - {.offset = 16, .length = 6}, 63 - {.offset = 32, .length = 6}, 64 - {.offset = 48, .length = 6} }, 77 + static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section, 78 + struct mtd_oob_region *oobregion) 79 + { 80 + struct nand_chip *chip = mtd_to_nand(mtd); 81 + 82 + if (section >= chip->ecc.steps) 83 + return -ERANGE; 84 + 85 + oobregion->offset = (section * 16) + 6; 86 + oobregion->length = chip->ecc.bytes; 87 + 88 + return 0; 89 + } 90 + 91 + static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section, 92 + struct mtd_oob_region *oobregion) 93 + { 94 + struct nand_chip *chip = mtd_to_nand(mtd); 95 + 96 + if (section >= chip->ecc.steps) 97 + return -ERANGE; 98 + 99 + oobregion->offset = section * 16; 100 + oobregion->length = 6; 101 + 102 + if (!section) { 103 + oobregion->offset += 2; 104 + oobregion->length -= 2; 105 + } 106 + 107 + return 0; 108 + } 109 + 110 + static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = { 111 + .ecc = flctl_4secc_ooblayout_lp_ecc, 112 + .free = flctl_4secc_ooblayout_lp_free, 65 113 }; 66 114 67 115 static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; ··· 1033 987 1034 988 if (flctl->hwecc) { 1035 989 if (mtd->writesize == 512) { 1036 - chip->ecc.layout = &flctl_4secc_oob_16; 990 + mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops); 1037 991 chip->badblock_pattern = &flctl_4secc_smallpage; 1038 992 } else { 1039 - chip->ecc.layout = &flctl_4secc_oob_64; 993 + mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops); 1040 994 chip->badblock_pattern = &flctl_4secc_largepage; 1041 995 } 1042 996 ··· 1051 1005 flctl->flcmncr_base |= _4ECCEN; 1052 1006 } else { 1053 1007 chip->ecc.mode = NAND_ECC_SOFT; 1008 + chip->ecc.algo = NAND_ECC_HAMMING; 1054 1009 } 1055 1010 1056 1011 return 0; ··· 1091 1044 const struct of_device_id *match; 1092 1045 struct flctl_soc_config *config; 1093 1046 struct sh_flctl_platform_data *pdata; 1094 - struct device_node *dn = dev->of_node; 1095 - int ret; 1096 1047 1097 1048 match = of_match_device(of_flctl_match, dev); 1098 1049 if (match) ··· 1109 1064 pdata->flcmncr_val = config->flcmncr_val; 1110 1065 pdata->has_hwecc = config->has_hwecc; 1111 1066 pdata->use_holden = config->use_holden; 1112 - 1113 - /* parse user defined options */ 1114 - ret = of_get_nand_bus_width(dn); 1115 - if (ret == 16) 1116 - pdata->flcmncr_val |= SEL_16BIT; 1117 - else if (ret != 8) { 1118 - dev_err(dev, "%s: invalid bus width\n", __func__); 1119 - return NULL; 1120 - } 1121 1067 1122 1068 return pdata; 1123 1069 } ··· 1172 1136 nand->chip_delay = 20; 1173 1137 1174 1138 nand->read_byte = flctl_read_byte; 1139 + nand->read_word = flctl_read_word; 1175 1140 nand->write_buf = flctl_write_buf; 1176 1141 nand->read_buf = flctl_read_buf; 1177 1142 nand->select_chip = flctl_select_chip; 1178 1143 nand->cmdfunc = flctl_cmdfunc; 1179 1144 1180 - if (pdata->flcmncr_val & SEL_16BIT) { 1145 + if (pdata->flcmncr_val & SEL_16BIT) 1181 1146 nand->options |= NAND_BUSWIDTH_16; 1182 - nand->read_word = flctl_read_word; 1183 - } 1184 1147 1185 1148 pm_runtime_enable(&pdev->dev); 1186 1149 pm_runtime_resume(&pdev->dev); ··· 1189 1154 ret = nand_scan_ident(flctl_mtd, 1, NULL); 1190 1155 if (ret) 1191 1156 goto err_chip; 1157 + 1158 + if (nand->options & NAND_BUSWIDTH_16) { 1159 + /* 1160 + * NAND_BUSWIDTH_16 may have been set by nand_scan_ident(). 1161 + * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign 1162 + * flctl->flcmncr_base to pdata->flcmncr_val. 1163 + */ 1164 + pdata->flcmncr_val |= SEL_16BIT; 1165 + flctl->flcmncr_base = pdata->flcmncr_val; 1166 + } 1192 1167 1193 1168 ret = flctl_chip_init_tail(flctl_mtd); 1194 1169 if (ret)
+1 -1
drivers/mtd/nand/sharpsl.c
··· 148 148 /* Link the private data with the MTD structure */ 149 149 mtd = nand_to_mtd(this); 150 150 mtd->dev.parent = &pdev->dev; 151 + mtd_set_ooblayout(mtd, data->ecc_layout); 151 152 152 153 platform_set_drvdata(pdev, sharpsl); 153 154 ··· 171 170 this->ecc.bytes = 3; 172 171 this->ecc.strength = 1; 173 172 this->badblock_pattern = data->badblock_pattern; 174 - this->ecc.layout = data->ecc_layout; 175 173 this->ecc.hwctl = sharpsl_nand_enable_hwecc; 176 174 this->ecc.calculate = sharpsl_nand_calculate_ecc; 177 175 this->ecc.correct = nand_correct_data;
+78 -17
drivers/mtd/nand/sm_common.c
··· 12 12 #include <linux/sizes.h> 13 13 #include "sm_common.h" 14 14 15 - static struct nand_ecclayout nand_oob_sm = { 16 - .eccbytes = 6, 17 - .eccpos = {8, 9, 10, 13, 14, 15}, 18 - .oobfree = { 19 - {.offset = 0 , .length = 4}, /* reserved */ 20 - {.offset = 6 , .length = 2}, /* LBA1 */ 21 - {.offset = 11, .length = 2} /* LBA2 */ 15 + static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section, 16 + struct mtd_oob_region *oobregion) 17 + { 18 + if (section > 1) 19 + return -ERANGE; 20 + 21 + oobregion->length = 3; 22 + oobregion->offset = ((section + 1) * 8) - 3; 23 + 24 + return 0; 25 + } 26 + 27 + static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section, 28 + struct mtd_oob_region *oobregion) 29 + { 30 + switch (section) { 31 + case 0: 32 + /* reserved */ 33 + oobregion->offset = 0; 34 + oobregion->length = 4; 35 + break; 36 + case 1: 37 + /* LBA1 */ 38 + oobregion->offset = 6; 39 + oobregion->length = 2; 40 + break; 41 + case 2: 42 + /* LBA2 */ 43 + oobregion->offset = 11; 44 + oobregion->length = 2; 45 + break; 46 + default: 47 + return -ERANGE; 22 48 } 49 + 50 + return 0; 51 + } 52 + 53 + static const struct mtd_ooblayout_ops oob_sm_ops = { 54 + .ecc = oob_sm_ooblayout_ecc, 55 + .free = oob_sm_ooblayout_free, 23 56 }; 24 57 25 58 /* NOTE: This layout is is not compatabable with SmartMedia, */ ··· 61 28 /* If you use smftl, it will bypass this and work correctly */ 62 29 /* If you not, then you break SmartMedia compliance anyway */ 63 30 64 - static struct nand_ecclayout nand_oob_sm_small = { 65 - .eccbytes = 3, 66 - .eccpos = {0, 1, 2}, 67 - .oobfree = { 68 - {.offset = 3 , .length = 2}, /* reserved */ 69 - {.offset = 6 , .length = 2}, /* LBA1 */ 70 - } 71 - }; 31 + static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section, 32 + struct mtd_oob_region *oobregion) 33 + { 34 + if (section) 35 + return -ERANGE; 72 36 37 + oobregion->length = 3; 38 + oobregion->offset = 0; 39 + 40 + return 0; 41 + } 42 + 43 + static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section, 44 + struct mtd_oob_region *oobregion) 45 + { 46 + switch (section) { 47 + case 0: 48 + /* reserved */ 49 + oobregion->offset = 3; 50 + oobregion->length = 2; 51 + break; 52 + case 1: 53 + /* LBA1 */ 54 + oobregion->offset = 6; 55 + oobregion->length = 2; 56 + break; 57 + default: 58 + return -ERANGE; 59 + } 60 + 61 + return 0; 62 + } 63 + 64 + static const struct mtd_ooblayout_ops oob_sm_small_ops = { 65 + .ecc = oob_sm_small_ooblayout_ecc, 66 + .free = oob_sm_small_ooblayout_free, 67 + }; 73 68 74 69 static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) 75 70 { ··· 182 121 183 122 /* ECC layout */ 184 123 if (mtd->writesize == SM_SECTOR_SIZE) 185 - chip->ecc.layout = &nand_oob_sm; 124 + mtd_set_ooblayout(mtd, &oob_sm_ops); 186 125 else if (mtd->writesize == SM_SMALL_PAGE) 187 - chip->ecc.layout = &nand_oob_sm_small; 126 + mtd_set_ooblayout(mtd, &oob_sm_small_ops); 188 127 else 189 128 return -ENODEV; 190 129
+1
drivers/mtd/nand/socrates_nand.c
··· 180 180 nand_chip->dev_ready = socrates_nand_device_ready; 181 181 182 182 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ 183 + nand_chip->ecc.algo = NAND_ECC_HAMMING; 183 184 184 185 /* TODO: I have no idea what real delay is. */ 185 186 nand_chip->chip_delay = 20; /* 20us command delay time */
+389 -211
drivers/mtd/nand/sunxi_nand.c
··· 30 30 #include <linux/of.h> 31 31 #include <linux/of_device.h> 32 32 #include <linux/of_gpio.h> 33 - #include <linux/of_mtd.h> 34 33 #include <linux/mtd/mtd.h> 35 34 #include <linux/mtd/nand.h> 36 35 #include <linux/mtd/partitions.h> ··· 38 39 #include <linux/dmaengine.h> 39 40 #include <linux/gpio.h> 40 41 #include <linux/interrupt.h> 41 - #include <linux/io.h> 42 + #include <linux/iopoll.h> 42 43 43 44 #define NFC_REG_CTL 0x0000 44 45 #define NFC_REG_ST 0x0004 ··· 154 155 /* define bit use in NFC_ECC_ST */ 155 156 #define NFC_ECC_ERR(x) BIT(x) 156 157 #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) 157 - #define NFC_ECC_ERR_CNT(b, x) (((x) >> ((b) * 8)) & 0xff) 158 + #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) 158 159 159 160 #define NFC_DEFAULT_TIMEOUT_MS 1000 160 161 ··· 211 212 * sunxi HW ECC infos: stores information related to HW ECC support 212 213 * 213 214 * @mode: the sunxi ECC mode field deduced from ECC requirements 214 - * @layout: the OOB layout depending on the ECC requirements and the 215 - * selected ECC mode 216 215 */ 217 216 struct sunxi_nand_hw_ecc { 218 217 int mode; 219 - struct nand_ecclayout layout; 220 218 }; 221 219 222 220 /* ··· 235 239 u32 timing_cfg; 236 240 u32 timing_ctl; 237 241 int selected; 242 + int addr_cycles; 243 + u32 addr[2]; 244 + int cmd_cycles; 245 + u8 cmd[2]; 238 246 int nsels; 239 247 struct sunxi_nand_chip_sel sels[0]; 240 248 }; ··· 298 298 return IRQ_HANDLED; 299 299 } 300 300 301 - static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags, 302 - unsigned int timeout_ms) 301 + static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events, 302 + bool use_polling, unsigned int timeout_ms) 303 303 { 304 - init_completion(&nfc->complete); 304 + int ret; 305 305 306 - writel(flags, nfc->regs + NFC_REG_INT); 306 + if (events & ~NFC_INT_MASK) 307 + return -EINVAL; 307 308 308 309 if (!timeout_ms) 309 310 timeout_ms = NFC_DEFAULT_TIMEOUT_MS; 310 311 311 - if (!wait_for_completion_timeout(&nfc->complete, 312 - msecs_to_jiffies(timeout_ms))) { 313 - dev_err(nfc->dev, "wait interrupt timedout\n"); 314 - return -ETIMEDOUT; 312 + if (!use_polling) { 313 + init_completion(&nfc->complete); 314 + 315 + writel(events, nfc->regs + NFC_REG_INT); 316 + 317 + ret = wait_for_completion_timeout(&nfc->complete, 318 + msecs_to_jiffies(timeout_ms)); 319 + 320 + writel(0, nfc->regs + NFC_REG_INT); 321 + } else { 322 + u32 status; 323 + 324 + ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status, 325 + (status & events) == events, 1, 326 + timeout_ms * 1000); 315 327 } 316 328 317 - return 0; 329 + writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST); 330 + 331 + if (ret) 332 + dev_err(nfc->dev, "wait interrupt timedout\n"); 333 + 334 + return ret; 318 335 } 319 336 320 337 static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc) 321 338 { 322 - unsigned long timeout = jiffies + 323 - msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS); 339 + u32 status; 340 + int ret; 324 341 325 - do { 326 - if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS)) 327 - return 0; 328 - } while (time_before(jiffies, timeout)); 342 + ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status, 343 + !(status & NFC_CMD_FIFO_STATUS), 1, 344 + NFC_DEFAULT_TIMEOUT_MS * 1000); 345 + if (ret) 346 + dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n"); 329 347 330 - dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n"); 331 - return -ETIMEDOUT; 348 + return ret; 332 349 } 333 350 334 351 static int sunxi_nfc_rst(struct sunxi_nfc *nfc) 335 352 { 336 - unsigned long timeout = jiffies + 337 - msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS); 353 + u32 ctl; 354 + int ret; 338 355 339 356 writel(0, nfc->regs + NFC_REG_ECC_CTL); 340 357 writel(NFC_RESET, nfc->regs + NFC_REG_CTL); 341 358 342 - do { 343 - if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET)) 344 - return 0; 345 - } while (time_before(jiffies, timeout)); 359 + ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl, 360 + !(ctl & NFC_RESET), 1, 361 + NFC_DEFAULT_TIMEOUT_MS * 1000); 362 + if (ret) 363 + dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); 346 364 347 - dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); 348 - return -ETIMEDOUT; 365 + return ret; 349 366 } 350 367 351 368 static int sunxi_nfc_dev_ready(struct mtd_info *mtd) ··· 371 354 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 372 355 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 373 356 struct sunxi_nand_rb *rb; 374 - unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20); 375 357 int ret; 376 358 377 359 if (sunxi_nand->selected < 0) ··· 380 364 381 365 switch (rb->type) { 382 366 case RB_NATIVE: 383 - ret = !!(readl(nfc->regs + NFC_REG_ST) & 384 - NFC_RB_STATE(rb->info.nativeid)); 385 - if (ret) 386 - break; 387 - 388 - sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo); 389 367 ret = !!(readl(nfc->regs + NFC_REG_ST) & 390 368 NFC_RB_STATE(rb->info.nativeid)); 391 369 break; ··· 417 407 sel = &sunxi_nand->sels[chip]; 418 408 419 409 ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | 420 - NFC_PAGE_SHIFT(nand->page_shift - 10); 410 + NFC_PAGE_SHIFT(nand->page_shift); 421 411 if (sel->rb.type == RB_NONE) { 422 412 nand->dev_ready = NULL; 423 413 } else { ··· 462 452 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; 463 453 writel(tmp, nfc->regs + NFC_REG_CMD); 464 454 465 - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 455 + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 466 456 if (ret) 467 457 break; 468 458 ··· 497 487 NFC_ACCESS_DIR; 498 488 writel(tmp, nfc->regs + NFC_REG_CMD); 499 489 500 - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 490 + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 501 491 if (ret) 502 492 break; 503 493 ··· 521 511 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 522 512 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 523 513 int ret; 524 - u32 tmp; 525 514 526 515 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 527 516 if (ret) 528 517 return; 529 518 530 - if (ctrl & NAND_CTRL_CHANGE) { 531 - tmp = readl(nfc->regs + NFC_REG_CTL); 532 - if (ctrl & NAND_NCE) 533 - tmp |= NFC_CE_CTL; 534 - else 535 - tmp &= ~NFC_CE_CTL; 536 - writel(tmp, nfc->regs + NFC_REG_CTL); 537 - } 519 + if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) && 520 + !(ctrl & (NAND_CLE | NAND_ALE))) { 521 + u32 cmd = 0; 538 522 539 - if (dat == NAND_CMD_NONE) 540 - return; 523 + if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles) 524 + return; 525 + 526 + if (sunxi_nand->cmd_cycles--) 527 + cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0]; 528 + 529 + if (sunxi_nand->cmd_cycles--) { 530 + cmd |= NFC_SEND_CMD2; 531 + writel(sunxi_nand->cmd[1], 532 + nfc->regs + NFC_REG_RCMD_SET); 533 + } 534 + 535 + sunxi_nand->cmd_cycles = 0; 536 + 537 + if (sunxi_nand->addr_cycles) { 538 + cmd |= NFC_SEND_ADR | 539 + NFC_ADR_NUM(sunxi_nand->addr_cycles); 540 + writel(sunxi_nand->addr[0], 541 + nfc->regs + NFC_REG_ADDR_LOW); 542 + } 543 + 544 + if (sunxi_nand->addr_cycles > 4) 545 + writel(sunxi_nand->addr[1], 546 + nfc->regs + NFC_REG_ADDR_HIGH); 547 + 548 + writel(cmd, nfc->regs + NFC_REG_CMD); 549 + sunxi_nand->addr[0] = 0; 550 + sunxi_nand->addr[1] = 0; 551 + sunxi_nand->addr_cycles = 0; 552 + sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 553 + } 541 554 542 555 if (ctrl & NAND_CLE) { 543 - writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD); 544 - } else { 545 - writel(dat, nfc->regs + NFC_REG_ADDR_LOW); 546 - writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD); 556 + sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat; 557 + } else if (ctrl & NAND_ALE) { 558 + sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |= 559 + dat << ((sunxi_nand->addr_cycles % 4) * 8); 560 + sunxi_nand->addr_cycles++; 547 561 } 548 - 549 - sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 550 562 } 551 563 552 564 /* These seed values have been extracted from Allwinner's BSP */ ··· 749 717 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); 750 718 ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | 751 719 NFC_ECC_BLOCK_SIZE_MSK); 752 - ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION; 720 + ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION | 721 + NFC_ECC_PIPELINE; 753 722 754 723 writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); 755 724 } ··· 772 739 buf[3] = user_data >> 24; 773 740 } 774 741 742 + static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) 743 + { 744 + return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); 745 + } 746 + 747 + static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob, 748 + int step, bool bbm, int page) 749 + { 750 + struct nand_chip *nand = mtd_to_nand(mtd); 751 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 752 + 753 + sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)), 754 + oob); 755 + 756 + /* De-randomize the Bad Block Marker. */ 757 + if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) 758 + sunxi_nfc_randomize_bbm(mtd, page, oob); 759 + } 760 + 761 + static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd, 762 + const u8 *oob, int step, 763 + bool bbm, int page) 764 + { 765 + struct nand_chip *nand = mtd_to_nand(mtd); 766 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 767 + u8 user_data[4]; 768 + 769 + /* Randomize the Bad Block Marker. */ 770 + if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) { 771 + memcpy(user_data, oob, sizeof(user_data)); 772 + sunxi_nfc_randomize_bbm(mtd, page, user_data); 773 + oob = user_data; 774 + } 775 + 776 + writel(sunxi_nfc_buf_to_user_data(oob), 777 + nfc->regs + NFC_REG_USER_DATA(step)); 778 + } 779 + 780 + static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, 781 + unsigned int *max_bitflips, int ret) 782 + { 783 + if (ret < 0) { 784 + mtd->ecc_stats.failed++; 785 + } else { 786 + mtd->ecc_stats.corrected += ret; 787 + *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 788 + } 789 + } 790 + 791 + static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, 792 + int step, bool *erased) 793 + { 794 + struct nand_chip *nand = mtd_to_nand(mtd); 795 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 796 + struct nand_ecc_ctrl *ecc = &nand->ecc; 797 + u32 status, tmp; 798 + 799 + *erased = false; 800 + 801 + status = readl(nfc->regs + NFC_REG_ECC_ST); 802 + 803 + if (status & NFC_ECC_ERR(step)) 804 + return -EBADMSG; 805 + 806 + if (status & NFC_ECC_PAT_FOUND(step)) { 807 + u8 pattern; 808 + 809 + if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) { 810 + pattern = 0x0; 811 + } else { 812 + pattern = 0xff; 813 + *erased = true; 814 + } 815 + 816 + if (data) 817 + memset(data, pattern, ecc->size); 818 + 819 + if (oob) 820 + memset(oob, pattern, ecc->bytes + 4); 821 + 822 + return 0; 823 + } 824 + 825 + tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step)); 826 + 827 + return NFC_ECC_ERR_CNT(step, tmp); 828 + } 829 + 775 830 static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, 776 831 u8 *data, int data_off, 777 832 u8 *oob, int oob_off, 778 833 int *cur_off, 779 834 unsigned int *max_bitflips, 780 - bool bbm, int page) 835 + bool bbm, bool oob_required, int page) 781 836 { 782 837 struct nand_chip *nand = mtd_to_nand(mtd); 783 838 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 784 839 struct nand_ecc_ctrl *ecc = &nand->ecc; 785 840 int raw_mode = 0; 786 - u32 status; 841 + bool erased; 787 842 int ret; 788 843 789 844 if (*cur_off != data_off) ··· 890 769 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, 891 770 nfc->regs + NFC_REG_CMD); 892 771 893 - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 772 + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 894 773 sunxi_nfc_randomizer_disable(mtd); 895 774 if (ret) 896 775 return ret; 897 776 898 777 *cur_off = oob_off + ecc->bytes + 4; 899 778 900 - status = readl(nfc->regs + NFC_REG_ECC_ST); 901 - if (status & NFC_ECC_PAT_FOUND(0)) { 902 - u8 pattern = 0xff; 903 - 904 - if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) 905 - pattern = 0x0; 906 - 907 - memset(data, pattern, ecc->size); 908 - memset(oob, pattern, ecc->bytes + 4); 909 - 779 + ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, 780 + &erased); 781 + if (erased) 910 782 return 1; 911 - } 912 783 913 - ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0))); 914 - 915 - memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); 916 - 917 - nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 918 - sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page); 919 - 920 - if (status & NFC_ECC_ERR(0)) { 784 + if (ret < 0) { 921 785 /* 922 786 * Re-read the data with the randomizer disabled to identify 923 787 * bitflips in erased pages. ··· 910 804 if (nand->options & NAND_NEED_SCRAMBLING) { 911 805 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); 912 806 nand->read_buf(mtd, data, ecc->size); 913 - nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 914 - nand->read_buf(mtd, oob, ecc->bytes + 4); 807 + } else { 808 + memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, 809 + ecc->size); 915 810 } 811 + 812 + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 813 + nand->read_buf(mtd, oob, ecc->bytes + 4); 916 814 917 815 ret = nand_check_erased_ecc_chunk(data, ecc->size, 918 816 oob, ecc->bytes + 4, ··· 924 814 if (ret >= 0) 925 815 raw_mode = 1; 926 816 } else { 927 - /* 928 - * The engine protects 4 bytes of OOB data per chunk. 929 - * Retrieve the corrected OOB bytes. 930 - */ 931 - sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)), 932 - oob); 817 + memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); 933 818 934 - /* De-randomize the Bad Block Marker. */ 935 - if (bbm && nand->options & NAND_NEED_SCRAMBLING) 936 - sunxi_nfc_randomize_bbm(mtd, page, oob); 819 + if (oob_required) { 820 + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); 821 + sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, 822 + true, page); 823 + 824 + sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0, 825 + bbm, page); 826 + } 937 827 } 938 828 939 - if (ret < 0) { 940 - mtd->ecc_stats.failed++; 941 - } else { 942 - mtd->ecc_stats.corrected += ret; 943 - *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 944 - } 829 + sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret); 945 830 946 831 return raw_mode; 947 832 } ··· 953 848 if (len <= 0) 954 849 return; 955 850 956 - if (*cur_off != offset) 851 + if (!cur_off || *cur_off != offset) 957 852 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 958 853 offset + mtd->writesize, -1); 959 854 ··· 963 858 sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len, 964 859 false, page); 965 860 966 - *cur_off = mtd->oobsize + mtd->writesize; 967 - } 968 - 969 - static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) 970 - { 971 - return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); 861 + if (cur_off) 862 + *cur_off = mtd->oobsize + mtd->writesize; 972 863 } 973 864 974 865 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, ··· 983 882 984 883 sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); 985 884 986 - /* Fill OOB data in */ 987 - if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) { 988 - u8 user_data[4]; 989 - 990 - memcpy(user_data, oob, 4); 991 - sunxi_nfc_randomize_bbm(mtd, page, user_data); 992 - writel(sunxi_nfc_buf_to_user_data(user_data), 993 - nfc->regs + NFC_REG_USER_DATA(0)); 994 - } else { 995 - writel(sunxi_nfc_buf_to_user_data(oob), 996 - nfc->regs + NFC_REG_USER_DATA(0)); 997 - } 998 - 999 885 if (data_off + ecc->size != oob_off) 1000 886 nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1); 1001 887 ··· 991 903 return ret; 992 904 993 905 sunxi_nfc_randomizer_enable(mtd); 906 + sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page); 907 + 994 908 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | 995 909 NFC_ACCESS_DIR | NFC_ECC_OP, 996 910 nfc->regs + NFC_REG_CMD); 997 911 998 - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); 912 + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 999 913 sunxi_nfc_randomizer_disable(mtd); 1000 914 if (ret) 1001 915 return ret; ··· 1019 929 if (len <= 0) 1020 930 return; 1021 931 1022 - if (*cur_off != offset) 932 + if (!cur_off || *cur_off != offset) 1023 933 nand->cmdfunc(mtd, NAND_CMD_RNDIN, 1024 934 offset + mtd->writesize, -1); 1025 935 1026 936 sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); 1027 937 1028 - *cur_off = mtd->oobsize + mtd->writesize; 938 + if (cur_off) 939 + *cur_off = mtd->oobsize + mtd->writesize; 1029 940 } 1030 941 1031 942 static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, ··· 1049 958 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, 1050 959 oob_off + mtd->writesize, 1051 960 &cur_off, &max_bitflips, 1052 - !i, page); 961 + !i, oob_required, page); 1053 962 if (ret < 0) 1054 963 return ret; 1055 964 else if (ret) ··· 1059 968 if (oob_required) 1060 969 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, 1061 970 !raw_mode, page); 971 + 972 + sunxi_nfc_hw_ecc_disable(mtd); 973 + 974 + return max_bitflips; 975 + } 976 + 977 + static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, 978 + struct nand_chip *chip, 979 + u32 data_offs, u32 readlen, 980 + u8 *bufpoi, int page) 981 + { 982 + struct nand_ecc_ctrl *ecc = &chip->ecc; 983 + int ret, i, cur_off = 0; 984 + unsigned int max_bitflips = 0; 985 + 986 + sunxi_nfc_hw_ecc_enable(mtd); 987 + 988 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 989 + for (i = data_offs / ecc->size; 990 + i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { 991 + int data_off = i * ecc->size; 992 + int oob_off = i * (ecc->bytes + 4); 993 + u8 *data = bufpoi + data_off; 994 + u8 *oob = chip->oob_poi + oob_off; 995 + 996 + ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, 997 + oob, 998 + oob_off + mtd->writesize, 999 + &cur_off, &max_bitflips, !i, 1000 + false, page); 1001 + if (ret < 0) 1002 + return ret; 1003 + } 1062 1004 1063 1005 sunxi_nfc_hw_ecc_disable(mtd); 1064 1006 ··· 1150 1026 1151 1027 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, 1152 1028 oob_off, &cur_off, 1153 - &max_bitflips, !i, page); 1029 + &max_bitflips, !i, 1030 + oob_required, 1031 + page); 1154 1032 if (ret < 0) 1155 1033 return ret; 1156 1034 else if (ret) ··· 1200 1074 return 0; 1201 1075 } 1202 1076 1077 + static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd, 1078 + struct nand_chip *chip, 1079 + int page) 1080 + { 1081 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 1082 + 1083 + chip->pagebuf = -1; 1084 + 1085 + return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page); 1086 + } 1087 + 1088 + static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd, 1089 + struct nand_chip *chip, 1090 + int page) 1091 + { 1092 + int ret, status; 1093 + 1094 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page); 1095 + 1096 + chip->pagebuf = -1; 1097 + 1098 + memset(chip->buffers->databuf, 0xff, mtd->writesize); 1099 + ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page); 1100 + if (ret) 1101 + return ret; 1102 + 1103 + /* Send command to program the OOB data */ 1104 + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1105 + 1106 + status = chip->waitfunc(mtd, chip); 1107 + 1108 + return status & NAND_STATUS_FAIL ? -EIO : 0; 1109 + } 1110 + 1203 1111 static const s32 tWB_lut[] = {6, 12, 16, 20}; 1204 1112 static const s32 tRHW_lut[] = {4, 8, 12, 20}; 1205 1113 ··· 1261 1101 struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller); 1262 1102 u32 min_clk_period = 0; 1263 1103 s32 tWB, tADL, tWHR, tRHW, tCAD; 1104 + long real_clk_rate; 1264 1105 1265 1106 /* T1 <=> tCLS */ 1266 1107 if (timings->tCLS_min > min_clk_period) ··· 1324 1163 min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2); 1325 1164 1326 1165 /* T16 - T19 + tCAD */ 1166 + if (timings->tWB_max > (min_clk_period * 20)) 1167 + min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20); 1168 + 1169 + if (timings->tADL_min > (min_clk_period * 32)) 1170 + min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32); 1171 + 1172 + if (timings->tWHR_min > (min_clk_period * 32)) 1173 + min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32); 1174 + 1175 + if (timings->tRHW_min > (min_clk_period * 20)) 1176 + min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20); 1177 + 1327 1178 tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max, 1328 1179 min_clk_period); 1329 1180 if (tWB < 0) { ··· 1371 1198 /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */ 1372 1199 chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); 1373 1200 1201 + /* Convert min_clk_period from picoseconds to nanoseconds */ 1202 + min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); 1203 + 1204 + /* 1205 + * Unlike what is stated in Allwinner datasheet, the clk_rate should 1206 + * be set to (1 / min_clk_period), and not (2 / min_clk_period). 1207 + * This new formula was verified with a scope and validated by 1208 + * Allwinner engineers. 1209 + */ 1210 + chip->clk_rate = NSEC_PER_SEC / min_clk_period; 1211 + real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); 1212 + 1374 1213 /* 1375 1214 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data 1376 1215 * output cycle timings shall be used if the host drives tRC less than 1377 1216 * 30 ns. 1378 1217 */ 1379 - chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0; 1380 - 1381 - /* Convert min_clk_period from picoseconds to nanoseconds */ 1382 - min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); 1383 - 1384 - /* 1385 - * Convert min_clk_period into a clk frequency, then get the 1386 - * appropriate rate for the NAND controller IP given this formula 1387 - * (specified in the datasheet): 1388 - * nand clk_rate = 2 * min_clk_rate 1389 - */ 1390 - chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period; 1218 + min_clk_period = NSEC_PER_SEC / real_clk_rate; 1219 + chip->timing_ctl = ((min_clk_period * 2) < 30) ? 1220 + NFC_TIMING_CTL_EDO : 0; 1391 1221 1392 1222 return 0; 1393 1223 } ··· 1433 1257 return sunxi_nand_chip_set_timings(chip, timings); 1434 1258 } 1435 1259 1260 + static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section, 1261 + struct mtd_oob_region *oobregion) 1262 + { 1263 + struct nand_chip *nand = mtd_to_nand(mtd); 1264 + struct nand_ecc_ctrl *ecc = &nand->ecc; 1265 + 1266 + if (section >= ecc->steps) 1267 + return -ERANGE; 1268 + 1269 + oobregion->offset = section * (ecc->bytes + 4) + 4; 1270 + oobregion->length = ecc->bytes; 1271 + 1272 + return 0; 1273 + } 1274 + 1275 + static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section, 1276 + struct mtd_oob_region *oobregion) 1277 + { 1278 + struct nand_chip *nand = mtd_to_nand(mtd); 1279 + struct nand_ecc_ctrl *ecc = &nand->ecc; 1280 + 1281 + if (section > ecc->steps) 1282 + return -ERANGE; 1283 + 1284 + /* 1285 + * The first 2 bytes are used for BB markers, hence we 1286 + * only have 2 bytes available in the first user data 1287 + * section. 1288 + */ 1289 + if (!section && ecc->mode == NAND_ECC_HW) { 1290 + oobregion->offset = 2; 1291 + oobregion->length = 2; 1292 + 1293 + return 0; 1294 + } 1295 + 1296 + oobregion->offset = section * (ecc->bytes + 4); 1297 + 1298 + if (section < ecc->steps) 1299 + oobregion->length = 4; 1300 + else 1301 + oobregion->offset = mtd->oobsize - oobregion->offset; 1302 + 1303 + return 0; 1304 + } 1305 + 1306 + static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = { 1307 + .ecc = sunxi_nand_ooblayout_ecc, 1308 + .free = sunxi_nand_ooblayout_free, 1309 + }; 1310 + 1436 1311 static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, 1437 1312 struct nand_ecc_ctrl *ecc, 1438 1313 struct device_node *np) ··· 1493 1266 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 1494 1267 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 1495 1268 struct sunxi_nand_hw_ecc *data; 1496 - struct nand_ecclayout *layout; 1497 1269 int nsectors; 1498 1270 int ret; 1499 1271 int i; ··· 1521 1295 /* HW ECC always work with even numbers of ECC bytes */ 1522 1296 ecc->bytes = ALIGN(ecc->bytes, 2); 1523 1297 1524 - layout = &data->layout; 1525 1298 nsectors = mtd->writesize / ecc->size; 1526 1299 1527 1300 if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) { ··· 1528 1303 goto err; 1529 1304 } 1530 1305 1531 - layout->eccbytes = (ecc->bytes * nsectors); 1532 - 1533 - ecc->layout = layout; 1306 + ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob; 1307 + ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob; 1308 + mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops); 1534 1309 ecc->priv = data; 1535 1310 1536 1311 return 0; ··· 1550 1325 struct nand_ecc_ctrl *ecc, 1551 1326 struct device_node *np) 1552 1327 { 1553 - struct nand_ecclayout *layout; 1554 - int nsectors; 1555 - int i, j; 1556 1328 int ret; 1557 1329 1558 1330 ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); ··· 1558 1336 1559 1337 ecc->read_page = sunxi_nfc_hw_ecc_read_page; 1560 1338 ecc->write_page = sunxi_nfc_hw_ecc_write_page; 1561 - layout = ecc->layout; 1562 - nsectors = mtd->writesize / ecc->size; 1563 - 1564 - for (i = 0; i < nsectors; i++) { 1565 - if (i) { 1566 - layout->oobfree[i].offset = 1567 - layout->oobfree[i - 1].offset + 1568 - layout->oobfree[i - 1].length + 1569 - ecc->bytes; 1570 - layout->oobfree[i].length = 4; 1571 - } else { 1572 - /* 1573 - * The first 2 bytes are used for BB markers, hence we 1574 - * only have 2 bytes available in the first user data 1575 - * section. 1576 - */ 1577 - layout->oobfree[i].length = 2; 1578 - layout->oobfree[i].offset = 2; 1579 - } 1580 - 1581 - for (j = 0; j < ecc->bytes; j++) 1582 - layout->eccpos[(ecc->bytes * i) + j] = 1583 - layout->oobfree[i].offset + 1584 - layout->oobfree[i].length + j; 1585 - } 1586 - 1587 - if (mtd->oobsize > (ecc->bytes + 4) * nsectors) { 1588 - layout->oobfree[nsectors].offset = 1589 - layout->oobfree[nsectors - 1].offset + 1590 - layout->oobfree[nsectors - 1].length + 1591 - ecc->bytes; 1592 - layout->oobfree[nsectors].length = mtd->oobsize - 1593 - ((ecc->bytes + 4) * nsectors); 1594 - } 1339 + ecc->read_oob_raw = nand_read_oob_std; 1340 + ecc->write_oob_raw = nand_write_oob_std; 1341 + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; 1595 1342 1596 1343 return 0; 1597 1344 } ··· 1569 1378 struct nand_ecc_ctrl *ecc, 1570 1379 struct device_node *np) 1571 1380 { 1572 - struct nand_ecclayout *layout; 1573 - int nsectors; 1574 - int i; 1575 1381 int ret; 1576 1382 1577 1383 ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); ··· 1578 1390 ecc->prepad = 4; 1579 1391 ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page; 1580 1392 ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page; 1581 - 1582 - layout = ecc->layout; 1583 - nsectors = mtd->writesize / ecc->size; 1584 - 1585 - for (i = 0; i < (ecc->bytes * nsectors); i++) 1586 - layout->eccpos[i] = i; 1587 - 1588 - layout->oobfree[0].length = mtd->oobsize - i; 1589 - layout->oobfree[0].offset = i; 1393 + ecc->read_oob_raw = nand_read_oob_syndrome; 1394 + ecc->write_oob_raw = nand_write_oob_syndrome; 1590 1395 1591 1396 return 0; 1592 1397 } ··· 1592 1411 sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc); 1593 1412 break; 1594 1413 case NAND_ECC_NONE: 1595 - kfree(ecc->layout); 1596 1414 default: 1597 1415 break; 1598 1416 } ··· 1612 1432 return -EINVAL; 1613 1433 1614 1434 switch (ecc->mode) { 1615 - case NAND_ECC_SOFT_BCH: 1616 - break; 1617 1435 case NAND_ECC_HW: 1618 1436 ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); 1619 1437 if (ret) ··· 1623 1445 return ret; 1624 1446 break; 1625 1447 case NAND_ECC_NONE: 1626 - ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL); 1627 - if (!ecc->layout) 1628 - return -ENOMEM; 1629 - ecc->layout->oobfree[0].length = mtd->oobsize; 1630 1448 case NAND_ECC_SOFT: 1631 1449 break; 1632 1450 default: ··· 1710 1536 } 1711 1537 } 1712 1538 1713 - timings = onfi_async_timing_mode_to_sdr_timings(0); 1714 - if (IS_ERR(timings)) { 1715 - ret = PTR_ERR(timings); 1716 - dev_err(dev, 1717 - "could not retrieve timings for ONFI mode 0: %d\n", 1718 - ret); 1719 - return ret; 1720 - } 1721 - 1722 - ret = sunxi_nand_chip_set_timings(chip, timings); 1723 - if (ret) { 1724 - dev_err(dev, "could not configure chip timings: %d\n", ret); 1725 - return ret; 1726 - } 1727 - 1728 1539 nand = &chip->nand; 1729 1540 /* Default tR value specified in the ONFI spec (chapter 4.15.1) */ 1730 1541 nand->chip_delay = 200; ··· 1729 1570 mtd = nand_to_mtd(nand); 1730 1571 mtd->dev.parent = dev; 1731 1572 1573 + timings = onfi_async_timing_mode_to_sdr_timings(0); 1574 + if (IS_ERR(timings)) { 1575 + ret = PTR_ERR(timings); 1576 + dev_err(dev, 1577 + "could not retrieve timings for ONFI mode 0: %d\n", 1578 + ret); 1579 + return ret; 1580 + } 1581 + 1582 + ret = sunxi_nand_chip_set_timings(chip, timings); 1583 + if (ret) { 1584 + dev_err(dev, "could not configure chip timings: %d\n", ret); 1585 + return ret; 1586 + } 1587 + 1732 1588 ret = nand_scan_ident(mtd, nsels, NULL); 1733 1589 if (ret) 1734 1590 return ret; ··· 1753 1579 1754 1580 if (nand->options & NAND_NEED_SCRAMBLING) 1755 1581 nand->options |= NAND_NO_SUBPAGE_WRITE; 1582 + 1583 + nand->options |= NAND_SUBPAGE_READ; 1756 1584 1757 1585 ret = sunxi_nand_chip_init_timings(chip, np); 1758 1586 if (ret) { ··· 1904 1728 struct sunxi_nfc *nfc = platform_get_drvdata(pdev); 1905 1729 1906 1730 sunxi_nand_chips_cleanup(nfc); 1731 + clk_disable_unprepare(nfc->mod_clk); 1732 + clk_disable_unprepare(nfc->ahb_clk); 1907 1733 1908 1734 return 0; 1909 1735 }
+4 -31
drivers/mtd/nand/vf610_nfc.c
··· 33 33 #include <linux/mtd/mtd.h> 34 34 #include <linux/mtd/nand.h> 35 35 #include <linux/mtd/partitions.h> 36 - #include <linux/of_mtd.h> 37 36 #include <linux/of_device.h> 38 37 #include <linux/pinctrl/consumer.h> 39 38 #include <linux/platform_device.h> ··· 173 174 { 174 175 return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip); 175 176 } 176 - 177 - static struct nand_ecclayout vf610_nfc_ecc45 = { 178 - .eccbytes = 45, 179 - .eccpos = {19, 20, 21, 22, 23, 180 - 24, 25, 26, 27, 28, 29, 30, 31, 181 - 32, 33, 34, 35, 36, 37, 38, 39, 182 - 40, 41, 42, 43, 44, 45, 46, 47, 183 - 48, 49, 50, 51, 52, 53, 54, 55, 184 - 56, 57, 58, 59, 60, 61, 62, 63}, 185 - .oobfree = { 186 - {.offset = 2, 187 - .length = 17} } 188 - }; 189 - 190 - static struct nand_ecclayout vf610_nfc_ecc60 = { 191 - .eccbytes = 60, 192 - .eccpos = { 4, 5, 6, 7, 8, 9, 10, 11, 193 - 12, 13, 14, 15, 16, 17, 18, 19, 194 - 20, 21, 22, 23, 24, 25, 26, 27, 195 - 28, 29, 30, 31, 32, 33, 34, 35, 196 - 36, 37, 38, 39, 40, 41, 42, 43, 197 - 44, 45, 46, 47, 48, 49, 50, 51, 198 - 52, 53, 54, 55, 56, 57, 58, 59, 199 - 60, 61, 62, 63 }, 200 - .oobfree = { 201 - {.offset = 2, 202 - .length = 2} } 203 - }; 204 177 205 178 static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg) 206 179 { ··· 752 781 if (mtd->oobsize > 64) 753 782 mtd->oobsize = 64; 754 783 784 + /* 785 + * mtd->ecclayout is not specified here because we're using the 786 + * default large page ECC layout defined in NAND core. 787 + */ 755 788 if (chip->ecc.strength == 32) { 756 789 nfc->ecc_mode = ECC_60_BYTE; 757 790 chip->ecc.bytes = 60; 758 - chip->ecc.layout = &vf610_nfc_ecc60; 759 791 } else if (chip->ecc.strength == 24) { 760 792 nfc->ecc_mode = ECC_45_BYTE; 761 793 chip->ecc.bytes = 45; 762 - chip->ecc.layout = &vf610_nfc_ecc45; 763 794 } else { 764 795 dev_err(nfc->dev, "Unsupported ECC strength\n"); 765 796 err = -ENXIO;
+111 -124
drivers/mtd/onenand/onenand_base.c
··· 68 68 * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page 69 69 * For now, we expose only 64 out of 80 ecc bytes 70 70 */ 71 - static struct nand_ecclayout flexonenand_oob_128 = { 72 - .eccbytes = 64, 73 - .eccpos = { 74 - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 75 - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 76 - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 77 - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 78 - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 79 - 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 80 - 102, 103, 104, 105 81 - }, 82 - .oobfree = { 83 - {2, 4}, {18, 4}, {34, 4}, {50, 4}, 84 - {66, 4}, {82, 4}, {98, 4}, {114, 4} 85 - } 71 + static int flexonenand_ooblayout_ecc(struct mtd_info *mtd, int section, 72 + struct mtd_oob_region *oobregion) 73 + { 74 + if (section > 7) 75 + return -ERANGE; 76 + 77 + oobregion->offset = (section * 16) + 6; 78 + oobregion->length = 10; 79 + 80 + return 0; 81 + } 82 + 83 + static int flexonenand_ooblayout_free(struct mtd_info *mtd, int section, 84 + struct mtd_oob_region *oobregion) 85 + { 86 + if (section > 7) 87 + return -ERANGE; 88 + 89 + oobregion->offset = (section * 16) + 2; 90 + oobregion->length = 4; 91 + 92 + return 0; 93 + } 94 + 95 + static const struct mtd_ooblayout_ops flexonenand_ooblayout_ops = { 96 + .ecc = flexonenand_ooblayout_ecc, 97 + .free = flexonenand_ooblayout_free, 86 98 }; 87 99 88 100 /* ··· 103 91 * Based on specification: 104 92 * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010 105 93 * 106 - * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout) 107 - * 108 - * oobfree uses the spare area fields marked as 109 - * "Managed by internal ECC logic for Logical Sector Number area" 110 94 */ 111 - static struct nand_ecclayout onenand_oob_128 = { 112 - .eccbytes = 64, 113 - .eccpos = { 114 - 7, 8, 9, 10, 11, 12, 13, 14, 15, 115 - 23, 24, 25, 26, 27, 28, 29, 30, 31, 116 - 39, 40, 41, 42, 43, 44, 45, 46, 47, 117 - 55, 56, 57, 58, 59, 60, 61, 62, 63, 118 - 71, 72, 73, 74, 75, 76, 77, 78, 79, 119 - 87, 88, 89, 90, 91, 92, 93, 94, 95, 120 - 103, 104, 105, 106, 107, 108, 109, 110, 111, 121 - 119 122 - }, 123 - .oobfree = { 124 - {2, 3}, {18, 3}, {34, 3}, {50, 3}, 125 - {66, 3}, {82, 3}, {98, 3}, {114, 3} 126 - } 95 + static int onenand_ooblayout_128_ecc(struct mtd_info *mtd, int section, 96 + struct mtd_oob_region *oobregion) 97 + { 98 + if (section > 7) 99 + return -ERANGE; 100 + 101 + oobregion->offset = (section * 16) + 7; 102 + oobregion->length = 9; 103 + 104 + return 0; 105 + } 106 + 107 + static int onenand_ooblayout_128_free(struct mtd_info *mtd, int section, 108 + struct mtd_oob_region *oobregion) 109 + { 110 + if (section >= 8) 111 + return -ERANGE; 112 + 113 + /* 114 + * free bytes are using the spare area fields marked as 115 + * "Managed by internal ECC logic for Logical Sector Number area" 116 + */ 117 + oobregion->offset = (section * 16) + 2; 118 + oobregion->length = 3; 119 + 120 + return 0; 121 + } 122 + 123 + static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = { 124 + .ecc = onenand_ooblayout_128_ecc, 125 + .free = onenand_ooblayout_128_free, 127 126 }; 128 127 129 128 /** 130 - * onenand_oob_64 - oob info for large (2KB) page 129 + * onenand_oob_32_64 - oob info for large (2KB) page 131 130 */ 132 - static struct nand_ecclayout onenand_oob_64 = { 133 - .eccbytes = 20, 134 - .eccpos = { 135 - 8, 9, 10, 11, 12, 136 - 24, 25, 26, 27, 28, 137 - 40, 41, 42, 43, 44, 138 - 56, 57, 58, 59, 60, 139 - }, 140 - .oobfree = { 141 - {2, 3}, {14, 2}, {18, 3}, {30, 2}, 142 - {34, 3}, {46, 2}, {50, 3}, {62, 2} 143 - } 144 - }; 131 + static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section, 132 + struct mtd_oob_region *oobregion) 133 + { 134 + if (section > 3) 135 + return -ERANGE; 145 136 146 - /** 147 - * onenand_oob_32 - oob info for middle (1KB) page 148 - */ 149 - static struct nand_ecclayout onenand_oob_32 = { 150 - .eccbytes = 10, 151 - .eccpos = { 152 - 8, 9, 10, 11, 12, 153 - 24, 25, 26, 27, 28, 154 - }, 155 - .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} } 137 + oobregion->offset = (section * 16) + 8; 138 + oobregion->length = 5; 139 + 140 + return 0; 141 + } 142 + 143 + static int onenand_ooblayout_32_64_free(struct mtd_info *mtd, int section, 144 + struct mtd_oob_region *oobregion) 145 + { 146 + int sections = (mtd->oobsize / 32) * 2; 147 + 148 + if (section >= sections) 149 + return -ERANGE; 150 + 151 + if (section & 1) { 152 + oobregion->offset = ((section - 1) * 16) + 14; 153 + oobregion->length = 2; 154 + } else { 155 + oobregion->offset = (section * 16) + 2; 156 + oobregion->length = 3; 157 + } 158 + 159 + return 0; 160 + } 161 + 162 + static const struct mtd_ooblayout_ops onenand_oob_32_64_ooblayout_ops = { 163 + .ecc = onenand_ooblayout_32_64_ecc, 164 + .free = onenand_ooblayout_32_64_free, 156 165 }; 157 166 158 167 static const unsigned char ffchars[] = { ··· 1057 1024 int thislen) 1058 1025 { 1059 1026 struct onenand_chip *this = mtd->priv; 1060 - struct nand_oobfree *free; 1061 - int readcol = column; 1062 - int readend = column + thislen; 1063 - int lastgap = 0; 1064 - unsigned int i; 1065 - uint8_t *oob_buf = this->oob_buf; 1027 + int ret; 1066 1028 1067 - free = this->ecclayout->oobfree; 1068 - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { 1069 - if (readcol >= lastgap) 1070 - readcol += free->offset - lastgap; 1071 - if (readend >= lastgap) 1072 - readend += free->offset - lastgap; 1073 - lastgap = free->offset + free->length; 1074 - } 1075 - this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); 1076 - free = this->ecclayout->oobfree; 1077 - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { 1078 - int free_end = free->offset + free->length; 1079 - if (free->offset < readend && free_end > readcol) { 1080 - int st = max_t(int,free->offset,readcol); 1081 - int ed = min_t(int,free_end,readend); 1082 - int n = ed - st; 1083 - memcpy(buf, oob_buf + st, n); 1084 - buf += n; 1085 - } else if (column == 0) 1086 - break; 1087 - } 1029 + this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0, 1030 + mtd->oobsize); 1031 + ret = mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf, 1032 + column, thislen); 1033 + if (ret) 1034 + return ret; 1035 + 1088 1036 return 0; 1089 1037 } 1090 1038 ··· 1822 1808 static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, 1823 1809 const u_char *buf, int column, int thislen) 1824 1810 { 1825 - struct onenand_chip *this = mtd->priv; 1826 - struct nand_oobfree *free; 1827 - int writecol = column; 1828 - int writeend = column + thislen; 1829 - int lastgap = 0; 1830 - unsigned int i; 1831 - 1832 - free = this->ecclayout->oobfree; 1833 - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { 1834 - if (writecol >= lastgap) 1835 - writecol += free->offset - lastgap; 1836 - if (writeend >= lastgap) 1837 - writeend += free->offset - lastgap; 1838 - lastgap = free->offset + free->length; 1839 - } 1840 - free = this->ecclayout->oobfree; 1841 - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { 1842 - int free_end = free->offset + free->length; 1843 - if (free->offset < writeend && free_end > writecol) { 1844 - int st = max_t(int,free->offset,writecol); 1845 - int ed = min_t(int,free_end,writeend); 1846 - int n = ed - st; 1847 - memcpy(oob_buf + st, buf, n); 1848 - buf += n; 1849 - } else if (column == 0) 1850 - break; 1851 - } 1852 - return 0; 1811 + return mtd_ooblayout_set_databytes(mtd, buf, oob_buf, column, thislen); 1853 1812 } 1854 1813 1855 1814 /** ··· 3990 4003 switch (mtd->oobsize) { 3991 4004 case 128: 3992 4005 if (FLEXONENAND(this)) { 3993 - this->ecclayout = &flexonenand_oob_128; 4006 + mtd_set_ooblayout(mtd, &flexonenand_ooblayout_ops); 3994 4007 mtd->subpage_sft = 0; 3995 4008 } else { 3996 - this->ecclayout = &onenand_oob_128; 4009 + mtd_set_ooblayout(mtd, &onenand_oob_128_ooblayout_ops); 3997 4010 mtd->subpage_sft = 2; 3998 4011 } 3999 4012 if (ONENAND_IS_NOP_1(this)) 4000 4013 mtd->subpage_sft = 0; 4001 4014 break; 4002 4015 case 64: 4003 - this->ecclayout = &onenand_oob_64; 4016 + mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops); 4004 4017 mtd->subpage_sft = 2; 4005 4018 break; 4006 4019 4007 4020 case 32: 4008 - this->ecclayout = &onenand_oob_32; 4021 + mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops); 4009 4022 mtd->subpage_sft = 1; 4010 4023 break; 4011 4024 ··· 4014 4027 __func__, mtd->oobsize); 4015 4028 mtd->subpage_sft = 0; 4016 4029 /* To prevent kernel oops */ 4017 - this->ecclayout = &onenand_oob_32; 4030 + mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops); 4018 4031 break; 4019 4032 } 4020 4033 ··· 4024 4037 * The number of bytes available for a client to place data into 4025 4038 * the out of band area 4026 4039 */ 4027 - mtd->oobavail = 0; 4028 - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && 4029 - this->ecclayout->oobfree[i].length; i++) 4030 - mtd->oobavail += this->ecclayout->oobfree[i].length; 4040 + ret = mtd_ooblayout_count_freebytes(mtd); 4041 + if (ret < 0) 4042 + ret = 0; 4031 4043 4032 - mtd->ecclayout = this->ecclayout; 4044 + mtd->oobavail = ret; 4045 + 4033 4046 mtd->ecc_strength = 1; 4034 4047 4035 4048 /* Fill in remaining MTD driver data */
+1
drivers/mtd/spi-nor/spi-nor.c
··· 832 832 /* GigaDevice */ 833 833 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, 834 834 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, 835 + { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 835 836 { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) }, 836 837 837 838 /* Intel/Numonyx -- xxxs33b */
-1
drivers/of/Makefile
··· 10 10 obj-$(CONFIG_OF_MDIO) += of_mdio.o 11 11 obj-$(CONFIG_OF_PCI) += of_pci.o 12 12 obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o 13 - obj-$(CONFIG_OF_MTD) += of_mtd.o 14 13 obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o 15 14 obj-$(CONFIG_OF_RESOLVE) += resolver.o 16 15 obj-$(CONFIG_OF_OVERLAY) += overlay.o
-119
drivers/of/of_mtd.c
··· 1 - /* 2 - * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> 3 - * 4 - * OF helpers for mtd. 5 - * 6 - * This file is released under the GPLv2 7 - * 8 - */ 9 - #include <linux/kernel.h> 10 - #include <linux/of_mtd.h> 11 - #include <linux/mtd/nand.h> 12 - #include <linux/export.h> 13 - 14 - /** 15 - * It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h 16 - * into the device tree binding of 'nand-ecc', so that MTD 17 - * device driver can get nand ecc from device tree. 18 - */ 19 - static const char *nand_ecc_modes[] = { 20 - [NAND_ECC_NONE] = "none", 21 - [NAND_ECC_SOFT] = "soft", 22 - [NAND_ECC_HW] = "hw", 23 - [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 24 - [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", 25 - [NAND_ECC_SOFT_BCH] = "soft_bch", 26 - }; 27 - 28 - /** 29 - * of_get_nand_ecc_mode - Get nand ecc mode for given device_node 30 - * @np: Pointer to the given device_node 31 - * 32 - * The function gets ecc mode string from property 'nand-ecc-mode', 33 - * and return its index in nand_ecc_modes table, or errno in error case. 34 - */ 35 - int of_get_nand_ecc_mode(struct device_node *np) 36 - { 37 - const char *pm; 38 - int err, i; 39 - 40 - err = of_property_read_string(np, "nand-ecc-mode", &pm); 41 - if (err < 0) 42 - return err; 43 - 44 - for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) 45 - if (!strcasecmp(pm, nand_ecc_modes[i])) 46 - return i; 47 - 48 - return -ENODEV; 49 - } 50 - EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode); 51 - 52 - /** 53 - * of_get_nand_ecc_step_size - Get ECC step size associated to 54 - * the required ECC strength (see below). 55 - * @np: Pointer to the given device_node 56 - * 57 - * return the ECC step size, or errno in error case. 58 - */ 59 - int of_get_nand_ecc_step_size(struct device_node *np) 60 - { 61 - int ret; 62 - u32 val; 63 - 64 - ret = of_property_read_u32(np, "nand-ecc-step-size", &val); 65 - return ret ? ret : val; 66 - } 67 - EXPORT_SYMBOL_GPL(of_get_nand_ecc_step_size); 68 - 69 - /** 70 - * of_get_nand_ecc_strength - Get required ECC strength over the 71 - * correspnding step size as defined by 'nand-ecc-size' 72 - * @np: Pointer to the given device_node 73 - * 74 - * return the ECC strength, or errno in error case. 75 - */ 76 - int of_get_nand_ecc_strength(struct device_node *np) 77 - { 78 - int ret; 79 - u32 val; 80 - 81 - ret = of_property_read_u32(np, "nand-ecc-strength", &val); 82 - return ret ? ret : val; 83 - } 84 - EXPORT_SYMBOL_GPL(of_get_nand_ecc_strength); 85 - 86 - /** 87 - * of_get_nand_bus_width - Get nand bus witdh for given device_node 88 - * @np: Pointer to the given device_node 89 - * 90 - * return bus width option, or errno in error case. 91 - */ 92 - int of_get_nand_bus_width(struct device_node *np) 93 - { 94 - u32 val; 95 - 96 - if (of_property_read_u32(np, "nand-bus-width", &val)) 97 - return 8; 98 - 99 - switch(val) { 100 - case 8: 101 - case 16: 102 - return val; 103 - default: 104 - return -EIO; 105 - } 106 - } 107 - EXPORT_SYMBOL_GPL(of_get_nand_bus_width); 108 - 109 - /** 110 - * of_get_nand_on_flash_bbt - Get nand on flash bbt for given device_node 111 - * @np: Pointer to the given device_node 112 - * 113 - * return true if present false other wise 114 - */ 115 - bool of_get_nand_on_flash_bbt(struct device_node *np) 116 - { 117 - return of_property_read_bool(np, "nand-on-flash-bbt"); 118 - } 119 - EXPORT_SYMBOL_GPL(of_get_nand_on_flash_bbt);
+31 -18
drivers/staging/mt29f_spinand/mt29f_spinand.c
··· 42 42 static int enable_hw_ecc; 43 43 static int enable_read_hw_ecc; 44 44 45 - static struct nand_ecclayout spinand_oob_64 = { 46 - .eccbytes = 24, 47 - .eccpos = { 48 - 1, 2, 3, 4, 5, 6, 49 - 17, 18, 19, 20, 21, 22, 50 - 33, 34, 35, 36, 37, 38, 51 - 49, 50, 51, 52, 53, 54, }, 52 - .oobfree = { 53 - {.offset = 8, 54 - .length = 8}, 55 - {.offset = 24, 56 - .length = 8}, 57 - {.offset = 40, 58 - .length = 8}, 59 - {.offset = 56, 60 - .length = 8}, 61 - } 45 + static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section, 46 + struct mtd_oob_region *oobregion) 47 + { 48 + if (section > 3) 49 + return -ERANGE; 50 + 51 + oobregion->offset = (section * 16) + 1; 52 + oobregion->length = 6; 53 + 54 + return 0; 55 + } 56 + 57 + static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section, 58 + struct mtd_oob_region *oobregion) 59 + { 60 + if (section > 3) 61 + return -ERANGE; 62 + 63 + oobregion->offset = (section * 16) + 8; 64 + oobregion->length = 8; 65 + 66 + return 0; 67 + } 68 + 69 + static const struct mtd_ooblayout_ops spinand_oob_64_ops = { 70 + .ecc = spinand_ooblayout_64_ecc, 71 + .free = spinand_ooblayout_64_free, 62 72 }; 63 73 #endif 64 74 ··· 896 886 897 887 chip->ecc.strength = 1; 898 888 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; 899 - chip->ecc.layout = &spinand_oob_64; 900 889 chip->ecc.read_page = spinand_read_page_hwecc; 901 890 chip->ecc.write_page = spinand_write_page_hwecc; 902 891 #else 903 892 chip->ecc.mode = NAND_ECC_SOFT; 893 + chip->ecc.algo = NAND_ECC_HAMMING; 904 894 if (spinand_disable_ecc(spi_nand) < 0) 905 895 dev_info(&spi_nand->dev, "%s: disable ecc failed!\n", 906 896 __func__); ··· 922 912 923 913 mtd->dev.parent = &spi_nand->dev; 924 914 mtd->oobsize = 64; 915 + #ifdef CONFIG_MTD_SPINAND_ONDIEECC 916 + mtd_set_ooblayout(mtd, &spinand_oob_64_ops); 917 + #endif 925 918 926 919 if (nand_scan(mtd, 1)) 927 920 return -ENXIO;
-1
include/linux/bcma/bcma_driver_chipcommon.h
··· 587 587 588 588 struct bcma_sflash { 589 589 bool present; 590 - u32 window; 591 590 u32 blocksize; 592 591 u16 numblocks; 593 592 u32 size;
+30 -15
include/linux/fsl_ifc.h
··· 39 39 #define FSL_IFC_VERSION_MASK 0x0F0F0000 40 40 #define FSL_IFC_VERSION_1_0_0 0x01000000 41 41 #define FSL_IFC_VERSION_1_1_0 0x01010000 42 + #define FSL_IFC_VERSION_2_0_0 0x02000000 43 + 44 + #define PGOFFSET_64K (64*1024) 45 + #define PGOFFSET_4K (4*1024) 42 46 43 47 /* 44 48 * CSPR - Chip Select Property Register ··· 727 723 __be32 nand_evter_en; 728 724 u32 res17[0x2]; 729 725 __be32 nand_evter_intr_en; 730 - u32 res18[0x2]; 726 + __be32 nand_vol_addr_stat; 727 + u32 res18; 731 728 __be32 nand_erattr0; 732 729 __be32 nand_erattr1; 733 730 u32 res19[0x10]; 734 731 __be32 nand_fsr; 735 - u32 res20; 736 - __be32 nand_eccstat[4]; 737 - u32 res21[0x20]; 732 + u32 res20[0x3]; 733 + __be32 nand_eccstat[6]; 734 + u32 res21[0x1c]; 738 735 __be32 nanndcr; 739 736 u32 res22[0x2]; 740 737 __be32 nand_autoboot_trgr; 741 738 u32 res23; 742 739 __be32 nand_mdr; 743 - u32 res24[0x5C]; 740 + u32 res24[0x1C]; 741 + __be32 nand_dll_lowcfg0; 742 + __be32 nand_dll_lowcfg1; 743 + u32 res25; 744 + __be32 nand_dll_lowstat; 745 + u32 res26[0x3c]; 744 746 }; 745 747 746 748 /* ··· 781 771 __be32 gpcm_erattr1; 782 772 __be32 gpcm_erattr2; 783 773 __be32 gpcm_stat; 784 - u32 res4[0x1F3]; 785 774 }; 786 775 787 776 /* 788 777 * IFC Controller Registers 789 778 */ 790 - struct fsl_ifc_regs { 779 + struct fsl_ifc_global { 791 780 __be32 ifc_rev; 792 781 u32 res1[0x2]; 793 782 struct { ··· 812 803 } ftim_cs[FSL_IFC_BANK_COUNT]; 813 804 u32 res9[0x30]; 814 805 __be32 rb_stat; 815 - u32 res10[0x2]; 806 + __be32 rb_map; 807 + __be32 wb_map; 816 808 __be32 ifc_gcr; 817 - u32 res11[0x2]; 809 + u32 res10[0x2]; 818 810 __be32 cm_evter_stat; 819 - u32 res12[0x2]; 811 + u32 res11[0x2]; 820 812 __be32 cm_evter_en; 821 - u32 res13[0x2]; 813 + u32 res12[0x2]; 822 814 __be32 cm_evter_intr_en; 823 - u32 res14[0x2]; 815 + u32 res13[0x2]; 824 816 __be32 cm_erattr0; 825 817 __be32 cm_erattr1; 826 - u32 res15[0x2]; 818 + u32 res14[0x2]; 827 819 __be32 ifc_ccr; 828 820 __be32 ifc_csr; 829 - u32 res16[0x2EB]; 821 + __be32 ddr_ccr_low; 822 + }; 823 + 824 + 825 + struct fsl_ifc_runtime { 830 826 struct fsl_ifc_nand ifc_nand; 831 827 struct fsl_ifc_nor ifc_nor; 832 828 struct fsl_ifc_gpcm ifc_gpcm; ··· 845 831 struct fsl_ifc_ctrl { 846 832 /* device info */ 847 833 struct device *dev; 848 - struct fsl_ifc_regs __iomem *regs; 834 + struct fsl_ifc_global __iomem *gregs; 835 + struct fsl_ifc_runtime __iomem *rregs; 849 836 int irq; 850 837 int nand_irq; 851 838 spinlock_t lock;
-18
include/linux/mtd/fsmc.h
··· 103 103 104 104 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) 105 105 106 - /* 107 - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 108 - * and it has to be read consecutively and immediately after the 512 109 - * byte data block for hardware to generate the error bit offsets 110 - * Managing the ecc bytes in the following way is easier. This way is 111 - * similar to oobfree structure maintained already in u-boot nand driver 112 - */ 113 - #define MAX_ECCPLACE_ENTRIES 32 114 - 115 - struct fsmc_nand_eccplace { 116 - uint8_t offset; 117 - uint8_t length; 118 - }; 119 - 120 - struct fsmc_eccplace { 121 - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; 122 - }; 123 - 124 106 struct fsmc_nand_timings { 125 107 uint8_t tclr; 126 108 uint8_t tar;
+7 -12
include/linux/mtd/map.h
··· 122 122 #endif 123 123 124 124 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 125 - # ifdef map_bankwidth 126 - # undef map_bankwidth 127 - # define map_bankwidth(map) ((map)->bankwidth) 128 - # undef map_bankwidth_is_large 129 - # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 130 - # undef map_words 131 - # define map_words(map) map_calc_words(map) 132 - # else 133 - # define map_bankwidth(map) 32 134 - # define map_bankwidth_is_large(map) (1) 135 - # define map_words(map) map_calc_words(map) 136 - # endif 125 + /* always use indirect access for 256-bit to preserve kernel stack */ 126 + # undef map_bankwidth 127 + # define map_bankwidth(map) ((map)->bankwidth) 128 + # undef map_bankwidth_is_large 129 + # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 130 + # undef map_words 131 + # define map_words(map) map_calc_words(map) 137 132 #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) 138 133 #undef MAX_MAP_BANKWIDTH 139 134 #define MAX_MAP_BANKWIDTH 32
+54 -11
include/linux/mtd/mtd.h
··· 96 96 97 97 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 98 98 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 99 - /* 100 - * Internal ECC layout control structure. For historical reasons, there is a 101 - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained 102 - * for export to user-space via the ECCGETLAYOUT ioctl. 103 - * nand_ecclayout should be expandable in the future simply by the above macros. 99 + /** 100 + * struct mtd_oob_region - oob region definition 101 + * @offset: region offset 102 + * @length: region length 103 + * 104 + * This structure describes a region of the OOB area, and is used 105 + * to retrieve ECC or free bytes sections. 106 + * Each section is defined by an offset within the OOB area and a 107 + * length. 104 108 */ 105 - struct nand_ecclayout { 106 - __u32 eccbytes; 107 - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; 108 - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; 109 + struct mtd_oob_region { 110 + u32 offset; 111 + u32 length; 112 + }; 113 + 114 + /* 115 + * struct mtd_ooblayout_ops - NAND OOB layout operations 116 + * @ecc: function returning an ECC region in the OOB area. 117 + * Should return -ERANGE if %section exceeds the total number of 118 + * ECC sections. 119 + * @free: function returning a free region in the OOB area. 120 + * Should return -ERANGE if %section exceeds the total number of 121 + * free sections. 122 + */ 123 + struct mtd_ooblayout_ops { 124 + int (*ecc)(struct mtd_info *mtd, int section, 125 + struct mtd_oob_region *oobecc); 126 + int (*free)(struct mtd_info *mtd, int section, 127 + struct mtd_oob_region *oobfree); 109 128 }; 110 129 111 130 struct module; /* only needed for owner field in mtd_info */ ··· 185 166 const char *name; 186 167 int index; 187 168 188 - /* ECC layout structure pointer - read only! */ 189 - struct nand_ecclayout *ecclayout; 169 + /* OOB layout description */ 170 + const struct mtd_ooblayout_ops *ooblayout; 190 171 191 172 /* the ecc step size. */ 192 173 unsigned int ecc_step_size; ··· 271 252 struct device dev; 272 253 int usecount; 273 254 }; 255 + 256 + int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 257 + struct mtd_oob_region *oobecc); 258 + int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 259 + int *section, 260 + struct mtd_oob_region *oobregion); 261 + int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 262 + const u8 *oobbuf, int start, int nbytes); 263 + int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 264 + u8 *oobbuf, int start, int nbytes); 265 + int mtd_ooblayout_free(struct mtd_info *mtd, int section, 266 + struct mtd_oob_region *oobfree); 267 + int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 268 + const u8 *oobbuf, int start, int nbytes); 269 + int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 270 + u8 *oobbuf, int start, int nbytes); 271 + int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); 272 + int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); 273 + 274 + static inline void mtd_set_ooblayout(struct mtd_info *mtd, 275 + const struct mtd_ooblayout_ops *ooblayout) 276 + { 277 + mtd->ooblayout = ooblayout; 278 + } 274 279 275 280 static inline void mtd_set_of_node(struct mtd_info *mtd, 276 281 struct device_node *np)
+25 -3
include/linux/mtd/nand.h
··· 116 116 NAND_ECC_HW, 117 117 NAND_ECC_HW_SYNDROME, 118 118 NAND_ECC_HW_OOB_FIRST, 119 - NAND_ECC_SOFT_BCH, 120 119 } nand_ecc_modes_t; 120 + 121 + enum nand_ecc_algo { 122 + NAND_ECC_UNKNOWN, 123 + NAND_ECC_HAMMING, 124 + NAND_ECC_BCH, 125 + }; 121 126 122 127 /* 123 128 * Constants for Hardware ECC ··· 463 458 /** 464 459 * struct nand_ecc_ctrl - Control structure for ECC 465 460 * @mode: ECC mode 461 + * @algo: ECC algorithm 466 462 * @steps: number of ECC steps per page 467 463 * @size: data bytes per ECC step 468 464 * @bytes: ECC bytes per step ··· 472 466 * @prepad: padding information for syndrome based ECC generators 473 467 * @postpad: padding information for syndrome based ECC generators 474 468 * @options: ECC specific options (see NAND_ECC_XXX flags defined above) 475 - * @layout: ECC layout control struct pointer 476 469 * @priv: pointer to private ECC control data 477 470 * @hwctl: function to control hardware ECC generator. Must only 478 471 * be provided if an hardware ECC is available ··· 513 508 */ 514 509 struct nand_ecc_ctrl { 515 510 nand_ecc_modes_t mode; 511 + enum nand_ecc_algo algo; 516 512 int steps; 517 513 int size; 518 514 int bytes; ··· 522 516 int prepad; 523 517 int postpad; 524 518 unsigned int options; 525 - struct nand_ecclayout *layout; 526 519 void *priv; 527 520 void (*hwctl)(struct mtd_info *mtd, int mode); 528 521 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, ··· 744 739 745 740 void *priv; 746 741 }; 742 + 743 + extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; 744 + extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; 747 745 748 746 static inline void nand_set_flash_node(struct nand_chip *chip, 749 747 struct device_node *np) ··· 1078 1070 void *ecc, int ecclen, 1079 1071 void *extraoob, int extraooblen, 1080 1072 int threshold); 1073 + 1074 + /* Default write_oob implementation */ 1075 + int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); 1076 + 1077 + /* Default write_oob syndrome implementation */ 1078 + int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1079 + int page); 1080 + 1081 + /* Default read_oob implementation */ 1082 + int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); 1083 + 1084 + /* Default read_oob syndrome implementation */ 1085 + int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1086 + int page); 1081 1087 #endif /* __LINUX_MTD_NAND_H */
-2
include/linux/mtd/onenand.h
··· 80 80 * @page_buf: [INTERN] page main data buffer 81 81 * @oob_buf: [INTERN] page oob data buffer 82 82 * @subpagesize: [INTERN] holds the subpagesize 83 - * @ecclayout: [REPLACEABLE] the default ecc placement scheme 84 83 * @bbm: [REPLACEABLE] pointer to Bad Block Management 85 84 * @priv: [OPTIONAL] pointer to private chip date 86 85 */ ··· 133 134 #endif 134 135 135 136 int subpagesize; 136 - struct nand_ecclayout *ecclayout; 137 137 138 138 void *bbm; 139 139
+1 -1
include/linux/mtd/sharpsl.h
··· 14 14 15 15 struct sharpsl_nand_platform_data { 16 16 struct nand_bbt_descr *badblock_pattern; 17 - struct nand_ecclayout *ecc_layout; 17 + const struct mtd_ooblayout_ops *ecc_layout; 18 18 struct mtd_partition *partitions; 19 19 unsigned int nr_partitions; 20 20 };
+1
include/linux/mtd/spi-nor.h
··· 21 21 * Sometimes these are the same as CFI IDs, but sometimes they aren't. 22 22 */ 23 23 #define SNOR_MFR_ATMEL CFI_MFR_ATMEL 24 + #define SNOR_MFR_GIGADEVICE 0xc8 24 25 #define SNOR_MFR_INTEL CFI_MFR_INTEL 25 26 #define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ 26 27 #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
-50
include/linux/of_mtd.h
··· 1 - /* 2 - * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> 3 - * 4 - * OF helpers for mtd. 5 - * 6 - * This file is released under the GPLv2 7 - */ 8 - 9 - #ifndef __LINUX_OF_MTD_H 10 - #define __LINUX_OF_MTD_H 11 - 12 - #ifdef CONFIG_OF_MTD 13 - 14 - #include <linux/of.h> 15 - int of_get_nand_ecc_mode(struct device_node *np); 16 - int of_get_nand_ecc_step_size(struct device_node *np); 17 - int of_get_nand_ecc_strength(struct device_node *np); 18 - int of_get_nand_bus_width(struct device_node *np); 19 - bool of_get_nand_on_flash_bbt(struct device_node *np); 20 - 21 - #else /* CONFIG_OF_MTD */ 22 - 23 - static inline int of_get_nand_ecc_mode(struct device_node *np) 24 - { 25 - return -ENOSYS; 26 - } 27 - 28 - static inline int of_get_nand_ecc_step_size(struct device_node *np) 29 - { 30 - return -ENOSYS; 31 - } 32 - 33 - static inline int of_get_nand_ecc_strength(struct device_node *np) 34 - { 35 - return -ENOSYS; 36 - } 37 - 38 - static inline int of_get_nand_bus_width(struct device_node *np) 39 - { 40 - return -ENOSYS; 41 - } 42 - 43 - static inline bool of_get_nand_on_flash_bbt(struct device_node *np) 44 - { 45 - return false; 46 - } 47 - 48 - #endif /* CONFIG_OF_MTD */ 49 - 50 - #endif /* __LINUX_OF_MTD_H */
+30 -138
include/linux/omap-gpmc.h
··· 7 7 * option) any later version. 8 8 */ 9 9 10 - /* Maximum Number of Chip Selects */ 11 - #define GPMC_CS_NUM 8 10 + #include <linux/platform_data/gpmc-omap.h> 12 11 13 12 #define GPMC_CONFIG_WP 0x00000005 14 13 15 - #define GPMC_IRQ_FIFOEVENTENABLE 0x01 16 - #define GPMC_IRQ_COUNT_EVENT 0x02 14 + /* IRQ numbers in GPMC IRQ domain for legacy boot use */ 15 + #define GPMC_IRQ_FIFOEVENTENABLE 0 16 + #define GPMC_IRQ_COUNT_EVENT 1 17 17 18 - #define GPMC_BURST_4 4 /* 4 word burst */ 19 - #define GPMC_BURST_8 8 /* 8 word burst */ 20 - #define GPMC_BURST_16 16 /* 16 word burst */ 21 - #define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ 22 - #define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ 23 - #define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ 24 - #define GPMC_MUX_AD 2 /* Addr-Data multiplex */ 25 - 26 - /* bool type time settings */ 27 - struct gpmc_bool_timings { 28 - bool cycle2cyclediffcsen; 29 - bool cycle2cyclesamecsen; 30 - bool we_extra_delay; 31 - bool oe_extra_delay; 32 - bool adv_extra_delay; 33 - bool cs_extra_delay; 34 - bool time_para_granularity; 35 - }; 36 - 37 - /* 38 - * Note that all values in this struct are in nanoseconds except sync_clk 39 - * (which is in picoseconds), while the register values are in gpmc_fck cycles. 18 + /** 19 + * gpmc_nand_ops - Interface between NAND and GPMC 20 + * @nand_write_buffer_empty: get the NAND write buffer empty status. 40 21 */ 41 - struct gpmc_timings { 42 - /* Minimum clock period for synchronous mode (in picoseconds) */ 43 - u32 sync_clk; 44 - 45 - /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ 46 - u32 cs_on; /* Assertion time */ 47 - u32 cs_rd_off; /* Read deassertion time */ 48 - u32 cs_wr_off; /* Write deassertion time */ 49 - 50 - /* ADV signal timings corresponding to GPMC_CONFIG3 */ 51 - u32 adv_on; /* Assertion time */ 52 - u32 adv_rd_off; /* Read deassertion time */ 53 - u32 adv_wr_off; /* Write deassertion time */ 54 - u32 adv_aad_mux_on; /* ADV assertion time for AAD */ 55 - u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ 56 - u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ 57 - 58 - /* WE signals timings corresponding to GPMC_CONFIG4 */ 59 - u32 we_on; /* WE assertion time */ 60 - u32 we_off; /* WE deassertion time */ 61 - 62 - /* OE signals timings corresponding to GPMC_CONFIG4 */ 63 - u32 oe_on; /* OE assertion time */ 64 - u32 oe_off; /* OE deassertion time */ 65 - u32 oe_aad_mux_on; /* OE assertion time for AAD */ 66 - u32 oe_aad_mux_off; /* OE deassertion time for AAD */ 67 - 68 - /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ 69 - u32 page_burst_access; /* Multiple access word delay */ 70 - u32 access; /* Start-cycle to first data valid delay */ 71 - u32 rd_cycle; /* Total read cycle time */ 72 - u32 wr_cycle; /* Total write cycle time */ 73 - 74 - u32 bus_turnaround; 75 - u32 cycle2cycle_delay; 76 - 77 - u32 wait_monitoring; 78 - u32 clk_activation; 79 - 80 - /* The following are only on OMAP3430 */ 81 - u32 wr_access; /* WRACCESSTIME */ 82 - u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ 83 - 84 - struct gpmc_bool_timings bool_timings; 22 + struct gpmc_nand_ops { 23 + bool (*nand_writebuffer_empty)(void); 85 24 }; 86 25 87 - /* Device timings in picoseconds */ 88 - struct gpmc_device_timings { 89 - u32 t_ceasu; /* address setup to CS valid */ 90 - u32 t_avdasu; /* address setup to ADV valid */ 91 - /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is 92 - * of tusb using these timings even for sync whilst 93 - * ideally for adv_rd/(wr)_off it should have considered 94 - * t_avdh instead. This indirectly necessitates r/w 95 - * variations of t_avdp as it is possible to have one 96 - * sync & other async 97 - */ 98 - u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ 99 - u32 t_avdp_w; 100 - u32 t_aavdh; /* address hold time */ 101 - u32 t_oeasu; /* address setup to OE valid */ 102 - u32 t_aa; /* access time from ADV assertion */ 103 - u32 t_iaa; /* initial access time */ 104 - u32 t_oe; /* access time from OE assertion */ 105 - u32 t_ce; /* access time from CS asertion */ 106 - u32 t_rd_cycle; /* read cycle time */ 107 - u32 t_cez_r; /* read CS deassertion to high Z */ 108 - u32 t_cez_w; /* write CS deassertion to high Z */ 109 - u32 t_oez; /* OE deassertion to high Z */ 110 - u32 t_weasu; /* address setup to WE valid */ 111 - u32 t_wpl; /* write assertion time */ 112 - u32 t_wph; /* write deassertion time */ 113 - u32 t_wr_cycle; /* write cycle time */ 26 + struct gpmc_nand_regs; 114 27 115 - u32 clk; 116 - u32 t_bacc; /* burst access valid clock to output delay */ 117 - u32 t_ces; /* CS setup time to clk */ 118 - u32 t_avds; /* ADV setup time to clk */ 119 - u32 t_avdh; /* ADV hold time from clk */ 120 - u32 t_ach; /* address hold time from clk */ 121 - u32 t_rdyo; /* clk to ready valid */ 28 + #if IS_ENABLED(CONFIG_OMAP_GPMC) 29 + struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, 30 + int cs); 31 + #else 32 + static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, 33 + int cs) 34 + { 35 + return NULL; 36 + } 37 + #endif /* CONFIG_OMAP_GPMC */ 122 38 123 - u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ 124 - u32 t_ce_avd; /* CS on to ADV on delay */ 39 + /*--------------------------------*/ 125 40 126 - /* XXX: check the possibility of combining 127 - * cyc_aavhd_oe & cyc_aavdh_we 128 - */ 129 - u8 cyc_aavdh_oe;/* read address hold time in cycles */ 130 - u8 cyc_aavdh_we;/* write address hold time in cycles */ 131 - u8 cyc_oe; /* access time from OE assertion in cycles */ 132 - u8 cyc_wpl; /* write deassertion time in cycles */ 133 - u32 cyc_iaa; /* initial access time in cycles */ 134 - 135 - /* extra delays */ 136 - bool ce_xdelay; 137 - bool avd_xdelay; 138 - bool oe_xdelay; 139 - bool we_xdelay; 140 - }; 141 - 142 - struct gpmc_settings { 143 - bool burst_wrap; /* enables wrap bursting */ 144 - bool burst_read; /* enables read page/burst mode */ 145 - bool burst_write; /* enables write page/burst mode */ 146 - bool device_nand; /* device is NAND */ 147 - bool sync_read; /* enables synchronous reads */ 148 - bool sync_write; /* enables synchronous writes */ 149 - bool wait_on_read; /* monitor wait on reads */ 150 - bool wait_on_write; /* monitor wait on writes */ 151 - u32 burst_len; /* page/burst length */ 152 - u32 device_width; /* device bus width (8 or 16 bit) */ 153 - u32 mux_add_data; /* multiplex address & data */ 154 - u32 wait_pin; /* wait-pin to be used */ 155 - }; 41 + /* deprecated APIs */ 42 + #if IS_ENABLED(CONFIG_OMAP_GPMC) 43 + void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); 44 + #else 45 + static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) 46 + { 47 + } 48 + #endif /* CONFIG_OMAP_GPMC */ 49 + /*--------------------------------*/ 156 50 157 51 extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, 158 52 struct gpmc_settings *gpmc_s, 159 53 struct gpmc_device_timings *dev_t); 160 54 161 - struct gpmc_nand_regs; 162 55 struct device_node; 163 56 164 - extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); 165 57 extern int gpmc_get_client_irq(unsigned irq_config); 166 58 167 59 extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
+172
include/linux/platform_data/gpmc-omap.h
··· 1 + /* 2 + * OMAP GPMC Platform data 3 + * 4 + * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com 5 + * Roger Quadros <rogerq@ti.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + */ 11 + 12 + #ifndef _GPMC_OMAP_H_ 13 + #define _GPMC_OMAP_H_ 14 + 15 + /* Maximum Number of Chip Selects */ 16 + #define GPMC_CS_NUM 8 17 + 18 + /* bool type time settings */ 19 + struct gpmc_bool_timings { 20 + bool cycle2cyclediffcsen; 21 + bool cycle2cyclesamecsen; 22 + bool we_extra_delay; 23 + bool oe_extra_delay; 24 + bool adv_extra_delay; 25 + bool cs_extra_delay; 26 + bool time_para_granularity; 27 + }; 28 + 29 + /* 30 + * Note that all values in this struct are in nanoseconds except sync_clk 31 + * (which is in picoseconds), while the register values are in gpmc_fck cycles. 32 + */ 33 + struct gpmc_timings { 34 + /* Minimum clock period for synchronous mode (in picoseconds) */ 35 + u32 sync_clk; 36 + 37 + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ 38 + u32 cs_on; /* Assertion time */ 39 + u32 cs_rd_off; /* Read deassertion time */ 40 + u32 cs_wr_off; /* Write deassertion time */ 41 + 42 + /* ADV signal timings corresponding to GPMC_CONFIG3 */ 43 + u32 adv_on; /* Assertion time */ 44 + u32 adv_rd_off; /* Read deassertion time */ 45 + u32 adv_wr_off; /* Write deassertion time */ 46 + u32 adv_aad_mux_on; /* ADV assertion time for AAD */ 47 + u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ 48 + u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ 49 + 50 + /* WE signals timings corresponding to GPMC_CONFIG4 */ 51 + u32 we_on; /* WE assertion time */ 52 + u32 we_off; /* WE deassertion time */ 53 + 54 + /* OE signals timings corresponding to GPMC_CONFIG4 */ 55 + u32 oe_on; /* OE assertion time */ 56 + u32 oe_off; /* OE deassertion time */ 57 + u32 oe_aad_mux_on; /* OE assertion time for AAD */ 58 + u32 oe_aad_mux_off; /* OE deassertion time for AAD */ 59 + 60 + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ 61 + u32 page_burst_access; /* Multiple access word delay */ 62 + u32 access; /* Start-cycle to first data valid delay */ 63 + u32 rd_cycle; /* Total read cycle time */ 64 + u32 wr_cycle; /* Total write cycle time */ 65 + 66 + u32 bus_turnaround; 67 + u32 cycle2cycle_delay; 68 + 69 + u32 wait_monitoring; 70 + u32 clk_activation; 71 + 72 + /* The following are only on OMAP3430 */ 73 + u32 wr_access; /* WRACCESSTIME */ 74 + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ 75 + 76 + struct gpmc_bool_timings bool_timings; 77 + }; 78 + 79 + /* Device timings in picoseconds */ 80 + struct gpmc_device_timings { 81 + u32 t_ceasu; /* address setup to CS valid */ 82 + u32 t_avdasu; /* address setup to ADV valid */ 83 + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is 84 + * of tusb using these timings even for sync whilst 85 + * ideally for adv_rd/(wr)_off it should have considered 86 + * t_avdh instead. This indirectly necessitates r/w 87 + * variations of t_avdp as it is possible to have one 88 + * sync & other async 89 + */ 90 + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ 91 + u32 t_avdp_w; 92 + u32 t_aavdh; /* address hold time */ 93 + u32 t_oeasu; /* address setup to OE valid */ 94 + u32 t_aa; /* access time from ADV assertion */ 95 + u32 t_iaa; /* initial access time */ 96 + u32 t_oe; /* access time from OE assertion */ 97 + u32 t_ce; /* access time from CS asertion */ 98 + u32 t_rd_cycle; /* read cycle time */ 99 + u32 t_cez_r; /* read CS deassertion to high Z */ 100 + u32 t_cez_w; /* write CS deassertion to high Z */ 101 + u32 t_oez; /* OE deassertion to high Z */ 102 + u32 t_weasu; /* address setup to WE valid */ 103 + u32 t_wpl; /* write assertion time */ 104 + u32 t_wph; /* write deassertion time */ 105 + u32 t_wr_cycle; /* write cycle time */ 106 + 107 + u32 clk; 108 + u32 t_bacc; /* burst access valid clock to output delay */ 109 + u32 t_ces; /* CS setup time to clk */ 110 + u32 t_avds; /* ADV setup time to clk */ 111 + u32 t_avdh; /* ADV hold time from clk */ 112 + u32 t_ach; /* address hold time from clk */ 113 + u32 t_rdyo; /* clk to ready valid */ 114 + 115 + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ 116 + u32 t_ce_avd; /* CS on to ADV on delay */ 117 + 118 + /* XXX: check the possibility of combining 119 + * cyc_aavhd_oe & cyc_aavdh_we 120 + */ 121 + u8 cyc_aavdh_oe;/* read address hold time in cycles */ 122 + u8 cyc_aavdh_we;/* write address hold time in cycles */ 123 + u8 cyc_oe; /* access time from OE assertion in cycles */ 124 + u8 cyc_wpl; /* write deassertion time in cycles */ 125 + u32 cyc_iaa; /* initial access time in cycles */ 126 + 127 + /* extra delays */ 128 + bool ce_xdelay; 129 + bool avd_xdelay; 130 + bool oe_xdelay; 131 + bool we_xdelay; 132 + }; 133 + 134 + #define GPMC_BURST_4 4 /* 4 word burst */ 135 + #define GPMC_BURST_8 8 /* 8 word burst */ 136 + #define GPMC_BURST_16 16 /* 16 word burst */ 137 + #define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ 138 + #define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ 139 + #define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ 140 + #define GPMC_MUX_AD 2 /* Addr-Data multiplex */ 141 + 142 + struct gpmc_settings { 143 + bool burst_wrap; /* enables wrap bursting */ 144 + bool burst_read; /* enables read page/burst mode */ 145 + bool burst_write; /* enables write page/burst mode */ 146 + bool device_nand; /* device is NAND */ 147 + bool sync_read; /* enables synchronous reads */ 148 + bool sync_write; /* enables synchronous writes */ 149 + bool wait_on_read; /* monitor wait on reads */ 150 + bool wait_on_write; /* monitor wait on writes */ 151 + u32 burst_len; /* page/burst length */ 152 + u32 device_width; /* device bus width (8 or 16 bit) */ 153 + u32 mux_add_data; /* multiplex address & data */ 154 + u32 wait_pin; /* wait-pin to be used */ 155 + }; 156 + 157 + /* Data for each chip select */ 158 + struct gpmc_omap_cs_data { 159 + bool valid; /* data is valid */ 160 + bool is_nand; /* device within this CS is NAND */ 161 + struct gpmc_settings *settings; 162 + struct gpmc_device_timings *device_timings; 163 + struct gpmc_timings *gpmc_timings; 164 + struct platform_device *pdev; /* device within this CS region */ 165 + unsigned int pdata_size; 166 + }; 167 + 168 + struct gpmc_omap_platform_data { 169 + struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; 170 + }; 171 + 172 + #endif /* _GPMC_OMAP_H */
+7 -5
include/linux/platform_data/mtd-nand-omap2.h
··· 45 45 }; 46 46 47 47 struct gpmc_nand_regs { 48 - void __iomem *gpmc_status; 49 48 void __iomem *gpmc_nand_command; 50 49 void __iomem *gpmc_nand_address; 51 50 void __iomem *gpmc_nand_data; ··· 63 64 void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; 64 65 void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; 65 66 void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; 67 + /* Deprecated. Do not use */ 68 + void __iomem *gpmc_status; 66 69 }; 67 70 68 71 struct omap_nand_platform_data { 69 72 int cs; 70 73 struct mtd_partition *parts; 71 74 int nr_parts; 72 - bool dev_ready; 73 75 bool flash_bbt; 74 76 enum nand_io xfer_type; 75 77 int devsize; 76 78 enum omap_ecc ecc_opt; 77 - struct gpmc_nand_regs reg; 78 79 79 - /* for passing the partitions */ 80 - struct device_node *of_node; 81 80 struct device_node *elm_of_node; 81 + 82 + /* deprecated */ 83 + struct gpmc_nand_regs reg; 84 + struct device_node *of_node; 85 + bool dev_ready; 82 86 }; 83 87 #endif
+1 -1
include/uapi/mtd/mtd-abi.h
··· 228 228 * complete set of ECC information. The ioctl truncates the larger internal 229 229 * structure to retain binary compatibility with the static declaration of the 230 230 * ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of 231 - * the user struct, not the MAX size of the internal struct nand_ecclayout. 231 + * the user struct, not the MAX size of the internal OOB layout representation. 232 232 */ 233 233 struct nand_ecclayout_user { 234 234 __u32 eccbytes;