Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-20171120' of git://git.infradead.org/linux-mtd

Pull MTD updates from Richard Weinberger:
"General changes:
- Unconfuse get_unmapped_area and point/unpoint driver methods
- New partition parser: sharpslpart
- Kill GENERIC_IO
- Various fixes

NAND changes:
- Add a flag to mark NANDs that require 3 address cycles to encode a
page address
- Set a default ECC/free layout when NAND_ECC_NONE is requested
- Fix a bug in panic_nand_write()
- Another batch of cleanups for the denali driver
- Fix PM support in the atmel driver
- Remove support for platform data in the omap driver
- Fix subpage write in the omap driver
- Fix irq handling in the mtk driver
- Change link order of mtk_ecc and mtk_nand drivers to speed up boot
time
- Change log level of ECC error messages in the mxc driver
- Patch the pxa3xx driver to support Armada 8k platforms
- Add BAM DMA support to the qcom driver
- Convert gpio-nand to the GPIO desc API
- Fix ECC handling in the mt29f driver

SPI-NOR changes:
- Introduce system power management support
- New mechanism to select the proper .quad_enable() hook by JEDEC
ID, when needed, instead of only by manufacturer ID
- Add support to new memory parts from Gigadevice, Winbond, Macronix
and Everspin
- Maintainance for Cadence, Intel, Mediatek and STM32 drivers"

* tag 'for-linus-20171120' of git://git.infradead.org/linux-mtd: (85 commits)
mtd: Avoid probe failures when mtd->dbg.dfs_dir is invalid
mtd: sharpslpart: Add sharpslpart partition parser
mtd: Add sanity checks in mtd_write/read_oob()
mtd: remove the get_unmapped_area method
mtd: implement mtd_get_unmapped_area() using the point method
mtd: chips/map_rom.c: implement point and unpoint methods
mtd: chips/map_ram.c: implement point and unpoint methods
mtd: mtdram: properly handle the phys argument in the point method
mtd: mtdswap: fix spelling mistake: 'TRESHOLD' -> 'THRESHOLD'
mtd: slram: use memremap() instead of ioremap()
kconfig: kill off GENERIC_IO option
mtd: Fix C++ comment in include/linux/mtd/mtd.h
mtd: constify mtd_partition
mtd: plat-ram: Replace manual resource management by devm
mtd: nand: Fix writing mtdoops to nand flash.
mtd: intel-spi: Add Intel Lewisburg PCH SPI super SKU PCI ID
mtd: nand: mtk: fix infinite ECC decode IRQ issue
mtd: spi-nor: Add support for mr25h128
mtd: nand: mtk: change the compile sequence of mtk_nand.o and mtk_ecc.o
mtd: spi-nor: enable 4B opcodes for mx66l51235l
...

+1650 -756
+6 -1
Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
··· 1 1 * Cadence Quad SPI controller 2 2 3 3 Required properties: 4 - - compatible : Should be "cdns,qspi-nor". 4 + - compatible : should be one of the following: 5 + Generic default - "cdns,qspi-nor". 6 + For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor". 5 7 - reg : Contains two entries, each of which is a tuple consisting of a 6 8 physical address and length. The first entry is the address and 7 9 length of the controller register set. The second entry is the ··· 16 14 17 15 Optional properties: 18 16 - cdns,is-decoded-cs : Flag to indicate whether decoder is used or not. 17 + - cdns,rclk-en : Flag to indicate that QSPI return clock is used to latch 18 + the read data rather than the QSPI clock. Make sure that QSPI return 19 + clock is populated on the board before using this property. 19 20 20 21 Optional subnodes: 21 22 Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional
+1 -1
Documentation/devicetree/bindings/mtd/denali-nand.txt
··· 29 29 #address-cells = <1>; 30 30 #size-cells = <1>; 31 31 compatible = "altr,socfpga-denali-nand"; 32 - reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; 32 + reg = <0xff900000 0x20>, <0xffb80000 0x1000>; 33 33 reg-names = "nand_data", "denali_reg"; 34 34 interrupts = <0 144 4>; 35 35 };
+1
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
··· 14 14 at25df641 15 15 at26df081a 16 16 en25s64 17 + mr25h128 17 18 mr25h256 18 19 mr25h10 19 20 mr25h40
+9 -6
Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
··· 1 1 * Serial NOR flash controller for MTK MT81xx (and similar) 2 2 3 3 Required properties: 4 - - compatible: The possible values are: 5 - "mediatek,mt2701-nor" 6 - "mediatek,mt7623-nor" 4 + - compatible: For mt8173, compatible should be "mediatek,mt8173-nor", 5 + and it's the fallback compatible for other Soc. 6 + For every other SoC, should contain both the SoC-specific compatible 7 + string and "mediatek,mt8173-nor". 8 + The possible values are: 9 + "mediatek,mt2701-nor", "mediatek,mt8173-nor" 10 + "mediatek,mt2712-nor", "mediatek,mt8173-nor" 11 + "mediatek,mt7622-nor", "mediatek,mt8173-nor" 12 + "mediatek,mt7623-nor", "mediatek,mt8173-nor" 7 13 "mediatek,mt8173-nor" 8 - For mt8173, compatible should be "mediatek,mt8173-nor". 9 - For every other SoC, should contain both the SoC-specific compatible string 10 - and "mediatek,mt8173-nor". 11 14 - reg: physical base address and length of the controller's register 12 15 - clocks: the phandle of the clocks needed by the nor controller 13 16 - clock-names: the names of the clocks
+4
Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
··· 5 5 - compatible: Should be set to one of the following: 6 6 marvell,pxa3xx-nand 7 7 marvell,armada370-nand 8 + marvell,armada-8k-nand 8 9 - reg: The register base for the controller 9 10 - interrupts: The interrupt to map 10 11 - #address-cells: Set to <1> if the node includes partitions 12 + - marvell,system-controller: Set to retrieve the syscon node that handles 13 + NAND controller related registers (only required 14 + with marvell,armada-8k-nand compatible). 11 15 12 16 Optional properties: 13 17
+13 -6
arch/arm/mach-pxa/cm-x255.c
··· 14 14 #include <linux/mtd/partitions.h> 15 15 #include <linux/mtd/physmap.h> 16 16 #include <linux/mtd/nand-gpio.h> 17 - 17 + #include <linux/gpio/machine.h> 18 18 #include <linux/spi/spi.h> 19 19 #include <linux/spi/pxa2xx_spi.h> 20 20 ··· 176 176 #endif 177 177 178 178 #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) 179 + 180 + static struct gpiod_lookup_table cmx255_nand_gpiod_table = { 181 + .dev_id = "gpio-nand", 182 + .table = { 183 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH), 184 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH), 185 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH), 186 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH), 187 + }, 188 + }; 189 + 179 190 static struct resource cmx255_nand_resource[] = { 180 191 [0] = { 181 192 .start = PXA_CS1_PHYS, ··· 209 198 }; 210 199 211 200 static struct gpio_nand_platdata cmx255_nand_platdata = { 212 - .gpio_nce = GPIO_NAND_CS, 213 - .gpio_cle = GPIO_NAND_CLE, 214 - .gpio_ale = GPIO_NAND_ALE, 215 - .gpio_rdy = GPIO_NAND_RB, 216 - .gpio_nwp = -1, 217 201 .parts = cmx255_nand_parts, 218 202 .num_parts = ARRAY_SIZE(cmx255_nand_parts), 219 203 .chip_delay = 25, ··· 226 220 227 221 static void __init cmx255_init_nand(void) 228 222 { 223 + gpiod_add_lookup_table(&cmx255_nand_gpiod_table); 229 224 platform_device_register(&cmx255_nand); 230 225 } 231 226 #else
-1
arch/um/Kconfig.common
··· 10 10 select HAVE_DEBUG_KMEMLEAK 11 11 select GENERIC_IRQ_SHOW 12 12 select GENERIC_CPU_DEVICES 13 - select GENERIC_IO 14 13 select GENERIC_CLOCKEVENTS 15 14 select HAVE_GCC_PLUGINS 16 15 select TTY # Needed for line.c
-1
drivers/mtd/Kconfig
··· 1 1 menuconfig MTD 2 2 tristate "Memory Technology Device (MTD) support" 3 - depends on GENERIC_IO 4 3 help 5 4 Memory Technology Devices are flash, RAM and similar chips, often 6 5 used for solid state file systems on embedded devices. This option
+20 -14
drivers/mtd/chips/map_ram.c
··· 20 20 static int mapram_erase (struct mtd_info *, struct erase_info *); 21 21 static void mapram_nop (struct mtd_info *); 22 22 static struct mtd_info *map_ram_probe(struct map_info *map); 23 - static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long, 24 - unsigned long, unsigned long); 23 + static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len, 24 + size_t *retlen, void **virt, resource_size_t *phys); 25 + static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len); 25 26 26 27 27 28 static struct mtd_chip_driver mapram_chipdrv = { ··· 66 65 mtd->type = MTD_RAM; 67 66 mtd->size = map->size; 68 67 mtd->_erase = mapram_erase; 69 - mtd->_get_unmapped_area = mapram_unmapped_area; 70 68 mtd->_read = mapram_read; 71 69 mtd->_write = mapram_write; 72 70 mtd->_panic_write = mapram_write; 71 + mtd->_point = mapram_point; 73 72 mtd->_sync = mapram_nop; 73 + mtd->_unpoint = mapram_unpoint; 74 74 mtd->flags = MTD_CAP_RAM; 75 75 mtd->writesize = 1; 76 76 ··· 83 81 return mtd; 84 82 } 85 83 86 - 87 - /* 88 - * Allow NOMMU mmap() to directly map the device (if not NULL) 89 - * - return the address to which the offset maps 90 - * - return -ENOSYS to indicate refusal to do the mapping 91 - */ 92 - static unsigned long mapram_unmapped_area(struct mtd_info *mtd, 93 - unsigned long len, 94 - unsigned long offset, 95 - unsigned long flags) 84 + static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len, 85 + size_t *retlen, void **virt, resource_size_t *phys) 96 86 { 97 87 struct map_info *map = mtd->priv; 98 - return (unsigned long) map->virt + offset; 88 + 89 + if (!map->virt) 90 + return -EINVAL; 91 + *virt = map->virt + from; 92 + if (phys) 93 + *phys = map->phys + from; 94 + *retlen = len; 95 + return 0; 96 + } 97 + 98 + static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 99 + { 100 + return 0; 99 101 } 100 102 101 103 static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+21 -13
drivers/mtd/chips/map_rom.c
··· 20 20 static void maprom_nop (struct mtd_info *); 21 21 static struct mtd_info *map_rom_probe(struct map_info *map); 22 22 static int maprom_erase (struct mtd_info *mtd, struct erase_info *info); 23 - static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long, 24 - unsigned long, unsigned long); 23 + static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len, 24 + size_t *retlen, void **virt, resource_size_t *phys); 25 + static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len); 26 + 25 27 26 28 static struct mtd_chip_driver maprom_chipdrv = { 27 29 .probe = map_rom_probe, ··· 53 51 mtd->name = map->name; 54 52 mtd->type = MTD_ROM; 55 53 mtd->size = map->size; 56 - mtd->_get_unmapped_area = maprom_unmapped_area; 54 + mtd->_point = maprom_point; 55 + mtd->_unpoint = maprom_unpoint; 57 56 mtd->_read = maprom_read; 58 57 mtd->_write = maprom_write; 59 58 mtd->_sync = maprom_nop; ··· 69 66 } 70 67 71 68 72 - /* 73 - * Allow NOMMU mmap() to directly map the device (if not NULL) 74 - * - return the address to which the offset maps 75 - * - return -ENOSYS to indicate refusal to do the mapping 76 - */ 77 - static unsigned long maprom_unmapped_area(struct mtd_info *mtd, 78 - unsigned long len, 79 - unsigned long offset, 80 - unsigned long flags) 69 + static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len, 70 + size_t *retlen, void **virt, resource_size_t *phys) 81 71 { 82 72 struct map_info *map = mtd->priv; 83 - return (unsigned long) map->virt + offset; 73 + 74 + if (!map->virt) 75 + return -EINVAL; 76 + *virt = map->virt + from; 77 + if (phys) 78 + *phys = map->phys + from; 79 + *retlen = len; 80 + return 0; 81 + } 82 + 83 + static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 84 + { 85 + return 0; 84 86 } 85 87 86 88 static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+6 -1
drivers/mtd/devices/docg3.c
··· 1814 1814 struct dentry *root = floor->dbg.dfs_dir; 1815 1815 struct docg3 *docg3 = floor->priv; 1816 1816 1817 - if (IS_ERR_OR_NULL(root)) 1817 + if (IS_ERR_OR_NULL(root)) { 1818 + if (IS_ENABLED(CONFIG_DEBUG_FS) && 1819 + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) 1820 + dev_warn(floor->dev.parent, 1821 + "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); 1818 1822 return; 1823 + } 1819 1824 1820 1825 debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, 1821 1826 &flashcontrol_fops);
+1 -1
drivers/mtd/devices/lart.c
··· 583 583 } 584 584 }; 585 585 586 - static struct mtd_partition lart_partitions[] = { 586 + static const struct mtd_partition lart_partitions[] = { 587 587 /* blob */ 588 588 { 589 589 .name = "blob",
+1
drivers/mtd/devices/m25p80.c
··· 359 359 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, 360 360 361 361 /* Everspin MRAMs (non-JEDEC) */ 362 + { "mr25h128" }, /* 128 Kib, 40 MHz */ 362 363 { "mr25h256" }, /* 256 Kib, 40 MHz */ 363 364 { "mr25h10" }, /* 1 Mib, 40 MHz */ 364 365 { "mr25h40" }, /* 4 Mib, 40 MHz */
+22 -14
drivers/mtd/devices/mtdram.c
··· 13 13 #include <linux/slab.h> 14 14 #include <linux/ioport.h> 15 15 #include <linux/vmalloc.h> 16 + #include <linux/mm.h> 16 17 #include <linux/init.h> 17 18 #include <linux/mtd/mtd.h> 18 19 #include <linux/mtd/mtdram.h> ··· 70 69 { 71 70 *virt = mtd->priv + from; 72 71 *retlen = len; 72 + 73 + if (phys) { 74 + /* limit retlen to the number of contiguous physical pages */ 75 + unsigned long page_ofs = offset_in_page(*virt); 76 + void *addr = *virt - page_ofs; 77 + unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr); 78 + 79 + *phys = __pfn_to_phys(pfn0) + page_ofs; 80 + len += page_ofs; 81 + while (len > PAGE_SIZE) { 82 + len -= PAGE_SIZE; 83 + addr += PAGE_SIZE; 84 + pfn0++; 85 + pfn1 = vmalloc_to_pfn(addr); 86 + if (pfn1 != pfn0) { 87 + *retlen = addr - *virt; 88 + break; 89 + } 90 + } 91 + } 92 + 73 93 return 0; 74 94 } 75 95 76 96 static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 77 97 { 78 98 return 0; 79 - } 80 - 81 - /* 82 - * Allow NOMMU mmap() to directly map the device (if not NULL) 83 - * - return the address to which the offset maps 84 - * - return -ENOSYS to indicate refusal to do the mapping 85 - */ 86 - static unsigned long ram_get_unmapped_area(struct mtd_info *mtd, 87 - unsigned long len, 88 - unsigned long offset, 89 - unsigned long flags) 90 - { 91 - return (unsigned long) mtd->priv + offset; 92 99 } 93 100 94 101 static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, ··· 143 134 mtd->_erase = ram_erase; 144 135 mtd->_point = ram_point; 145 136 mtd->_unpoint = ram_unpoint; 146 - mtd->_get_unmapped_area = ram_get_unmapped_area; 147 137 mtd->_read = ram_read; 148 138 mtd->_write = ram_write; 149 139
+5 -4
drivers/mtd/devices/slram.c
··· 163 163 } 164 164 165 165 if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start = 166 - ioremap(start, length))) { 167 - E("slram: ioremap failed\n"); 166 + memremap(start, length, 167 + MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) { 168 + E("slram: memremap failed\n"); 168 169 return -EIO; 169 170 } 170 171 ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end = ··· 187 186 188 187 if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) { 189 188 E("slram: Failed to register new device\n"); 190 - iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); 189 + memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); 191 190 kfree((*curmtd)->mtdinfo->priv); 192 191 kfree((*curmtd)->mtdinfo); 193 192 return(-EAGAIN); ··· 207 206 while (slram_mtdlist) { 208 207 nextitem = slram_mtdlist->next; 209 208 mtd_device_unregister(slram_mtdlist->mtdinfo); 210 - iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); 209 + memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); 211 210 kfree(slram_mtdlist->mtdinfo->priv); 212 211 kfree(slram_mtdlist->mtdinfo); 213 212 kfree(slram_mtdlist);
+1 -1
drivers/mtd/maps/cfi_flagadm.c
··· 61 61 .bankwidth = 2, 62 62 }; 63 63 64 - static struct mtd_partition flagadm_parts[] = { 64 + static const struct mtd_partition flagadm_parts[] = { 65 65 { 66 66 .name = "Bootloader", 67 67 .offset = FLASH_PARTITION0_ADDR,
+1 -1
drivers/mtd/maps/impa7.c
··· 47 47 /* 48 48 * MTD partitioning stuff 49 49 */ 50 - static struct mtd_partition partitions[] = 50 + static const struct mtd_partition partitions[] = 51 51 { 52 52 { 53 53 .name = "FileSystem",
+1 -1
drivers/mtd/maps/netsc520.c
··· 52 52 /* partition_info gives details on the logical partitions that the split the 53 53 * single flash device into. If the size if zero we use up to the end of the 54 54 * device. */ 55 - static struct mtd_partition partition_info[]={ 55 + static const struct mtd_partition partition_info[] = { 56 56 { 57 57 .name = "NetSc520 boot kernel", 58 58 .offset = 0,
+1 -1
drivers/mtd/maps/nettel.c
··· 107 107 .bankwidth = AMD_BUSWIDTH, 108 108 }; 109 109 110 - static struct mtd_partition nettel_amd_partitions[] = { 110 + static const struct mtd_partition nettel_amd_partitions[] = { 111 111 { 112 112 .name = "SnapGear BIOS config", 113 113 .offset = 0x000e0000,
+4 -34
drivers/mtd/maps/plat-ram.c
··· 43 43 struct device *dev; 44 44 struct mtd_info *mtd; 45 45 struct map_info map; 46 - struct resource *area; 47 46 struct platdata_mtd_ram *pdata; 48 47 }; 49 48 ··· 96 97 97 98 platram_setrw(info, PLATRAM_RO); 98 99 99 - /* release resources */ 100 - 101 - if (info->area) { 102 - release_resource(info->area); 103 - kfree(info->area); 104 - } 105 - 106 - if (info->map.virt != NULL) 107 - iounmap(info->map.virt); 108 - 109 100 kfree(info); 110 101 111 102 return 0; ··· 136 147 info->pdata = pdata; 137 148 138 149 /* get the resource for the memory mapping */ 139 - 140 150 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 141 - 142 - if (res == NULL) { 143 - dev_err(&pdev->dev, "no memory resource specified\n"); 144 - err = -ENOENT; 151 + info->map.virt = devm_ioremap_resource(&pdev->dev, res); 152 + if (IS_ERR(info->map.virt)) { 153 + err = PTR_ERR(info->map.virt); 154 + dev_err(&pdev->dev, "failed to ioremap() region\n"); 145 155 goto exit_free; 146 156 } 147 157 ··· 155 167 (char *)pdata->mapname : (char *)pdev->name; 156 168 info->map.bankwidth = pdata->bankwidth; 157 169 158 - /* register our usage of the memory area */ 159 - 160 - info->area = request_mem_region(res->start, info->map.size, pdev->name); 161 - if (info->area == NULL) { 162 - dev_err(&pdev->dev, "failed to request memory region\n"); 163 - err = -EIO; 164 - goto exit_free; 165 - } 166 - 167 - /* remap the memory area */ 168 - 169 - info->map.virt = ioremap(res->start, info->map.size); 170 170 dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size); 171 - 172 - if (info->map.virt == NULL) { 173 - dev_err(&pdev->dev, "failed to ioremap() region\n"); 174 - err = -EIO; 175 - goto exit_free; 176 - } 177 171 178 172 simple_map_init(&info->map); 179 173
+1 -1
drivers/mtd/maps/sbc_gxx.c
··· 87 87 /* partition_info gives details on the logical partitions that the split the 88 88 * single flash device into. If the size if zero we use up to the end of the 89 89 * device. */ 90 - static struct mtd_partition partition_info[]={ 90 + static const struct mtd_partition partition_info[] = { 91 91 { .name = "SBC-GXx flash boot partition", 92 92 .offset = 0, 93 93 .size = BOOT_PARTITION_SIZE_KiB*1024 },
+1 -1
drivers/mtd/maps/ts5500_flash.c
··· 43 43 .phys = WINDOW_ADDR 44 44 }; 45 45 46 - static struct mtd_partition ts5500_partitions[] = { 46 + static const struct mtd_partition ts5500_partitions[] = { 47 47 { 48 48 .name = "Drive A", 49 49 .offset = 0,
+1 -1
drivers/mtd/maps/uclinux.c
··· 49 49 50 50 /****************************************************************************/ 51 51 52 - static struct mtd_partition uclinux_romfs[] = { 52 + static const struct mtd_partition uclinux_romfs[] = { 53 53 { .name = "ROMfs" } 54 54 }; 55 55
-27
drivers/mtd/mtdconcat.c
··· 644 644 } 645 645 646 646 /* 647 - * try to support NOMMU mmaps on concatenated devices 648 - * - we don't support subdev spanning as we can't guarantee it'll work 649 - */ 650 - static unsigned long concat_get_unmapped_area(struct mtd_info *mtd, 651 - unsigned long len, 652 - unsigned long offset, 653 - unsigned long flags) 654 - { 655 - struct mtd_concat *concat = CONCAT(mtd); 656 - int i; 657 - 658 - for (i = 0; i < concat->num_subdev; i++) { 659 - struct mtd_info *subdev = concat->subdev[i]; 660 - 661 - if (offset >= subdev->size) { 662 - offset -= subdev->size; 663 - continue; 664 - } 665 - 666 - return mtd_get_unmapped_area(subdev, len, offset, flags); 667 - } 668 - 669 - return (unsigned long) -ENOSYS; 670 - } 671 - 672 - /* 673 647 * This function constructs a virtual MTD device by concatenating 674 648 * num_devs MTD devices. A pointer to the new device object is 675 649 * stored to *new_dev upon success. This function does _not_ ··· 764 790 concat->mtd._unlock = concat_unlock; 765 791 concat->mtd._suspend = concat_suspend; 766 792 concat->mtd._resume = concat_resume; 767 - concat->mtd._get_unmapped_area = concat_get_unmapped_area; 768 793 769 794 /* 770 795 * Combine the erase block size info of the subdevices:
+56 -5
drivers/mtd/mtdcore.c
··· 1022 1022 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 1023 1023 unsigned long offset, unsigned long flags) 1024 1024 { 1025 - if (!mtd->_get_unmapped_area) 1026 - return -EOPNOTSUPP; 1027 - if (offset >= mtd->size || len > mtd->size - offset) 1028 - return -EINVAL; 1029 - return mtd->_get_unmapped_area(mtd, len, offset, flags); 1025 + size_t retlen; 1026 + void *virt; 1027 + int ret; 1028 + 1029 + ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); 1030 + if (ret) 1031 + return ret; 1032 + if (retlen != len) { 1033 + mtd_unpoint(mtd, offset, retlen); 1034 + return -ENOSYS; 1035 + } 1036 + return (unsigned long)virt; 1030 1037 } 1031 1038 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1032 1039 ··· 1100 1093 } 1101 1094 EXPORT_SYMBOL_GPL(mtd_panic_write); 1102 1095 1096 + static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, 1097 + struct mtd_oob_ops *ops) 1098 + { 1099 + /* 1100 + * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving 1101 + * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in 1102 + * this case. 1103 + */ 1104 + if (!ops->datbuf) 1105 + ops->len = 0; 1106 + 1107 + if (!ops->oobbuf) 1108 + ops->ooblen = 0; 1109 + 1110 + if (offs < 0 || offs + ops->len >= mtd->size) 1111 + return -EINVAL; 1112 + 1113 + if (ops->ooblen) { 1114 + u64 maxooblen; 1115 + 1116 + if (ops->ooboffs >= mtd_oobavail(mtd, ops)) 1117 + return -EINVAL; 1118 + 1119 + maxooblen = ((mtd_div_by_ws(mtd->size, mtd) - 1120 + mtd_div_by_ws(offs, mtd)) * 1121 + mtd_oobavail(mtd, ops)) - ops->ooboffs; 1122 + if (ops->ooblen > maxooblen) 1123 + return -EINVAL; 1124 + } 1125 + 1126 + return 0; 1127 + } 1128 + 1103 1129 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1104 1130 { 1105 1131 int ret_code; 1106 1132 ops->retlen = ops->oobretlen = 0; 1107 1133 if (!mtd->_read_oob) 1108 1134 return -EOPNOTSUPP; 1135 + 1136 + ret_code = mtd_check_oob_ops(mtd, from, ops); 1137 + if (ret_code) 1138 + return ret_code; 1109 1139 1110 1140 ledtrig_mtd_activity(); 1111 1141 /* ··· 1163 1119 int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1164 1120 struct mtd_oob_ops *ops) 1165 1121 { 1122 + int ret; 1123 + 1166 1124 ops->retlen = ops->oobretlen = 0; 1167 1125 if (!mtd->_write_oob) 1168 1126 return -EOPNOTSUPP; 1169 1127 if (!(mtd->flags & MTD_WRITEABLE)) 1170 1128 return -EROFS; 1129 + 1130 + ret = mtd_check_oob_ops(mtd, to, ops); 1131 + if (ret) 1132 + return ret; 1133 + 1171 1134 ledtrig_mtd_activity(); 1172 1135 return mtd->_write_oob(mtd, to, ops); 1173 1136 }
-14
drivers/mtd/mtdpart.c
··· 101 101 return part->parent->_unpoint(part->parent, from + part->offset, len); 102 102 } 103 103 104 - static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 105 - unsigned long len, 106 - unsigned long offset, 107 - unsigned long flags) 108 - { 109 - struct mtd_part *part = mtd_to_part(mtd); 110 - 111 - offset += part->offset; 112 - return part->parent->_get_unmapped_area(part->parent, len, offset, 113 - flags); 114 - } 115 - 116 104 static int part_read_oob(struct mtd_info *mtd, loff_t from, 117 105 struct mtd_oob_ops *ops) 118 106 { ··· 446 458 slave->mtd._unpoint = part_unpoint; 447 459 } 448 460 449 - if (parent->_get_unmapped_area) 450 - slave->mtd._get_unmapped_area = part_get_unmapped_area; 451 461 if (parent->_read_oob) 452 462 slave->mtd._read_oob = part_read_oob; 453 463 if (parent->_write_oob)
+2 -2
drivers/mtd/mtdswap.c
··· 50 50 * Number of free eraseblocks below which GC can also collect low frag 51 51 * blocks. 52 52 */ 53 - #define LOW_FRAG_GC_TRESHOLD 5 53 + #define LOW_FRAG_GC_THRESHOLD 5 54 54 55 55 /* 56 56 * Wear level cost amortization. We want to do wear leveling on the background ··· 805 805 { 806 806 int idx, stopat; 807 807 808 - if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) 808 + if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD) 809 809 stopat = MTDSWAP_LOWFRAG; 810 810 else 811 811 stopat = MTDSWAP_HIFRAG;
+4 -1
drivers/mtd/nand/Kconfig
··· 317 317 tristate "NAND support on PXA3xx and Armada 370/XP" 318 318 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU 319 319 help 320 + 320 321 This enables the driver for the NAND flash device found on 321 - PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). 322 + PXA3xx processors (NFCv1) and also on 32-bit Armada 323 + platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada 324 + platforms (7K, 8K) (NFCv2). 322 325 323 326 config MTD_NAND_SLC_LPC32XX 324 327 tristate "NXP LPC32xx SLC Controller"
+1 -1
drivers/mtd/nand/Makefile
··· 59 59 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o 60 60 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ 61 61 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o 62 - obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o 62 + obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o 63 63 64 64 nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o 65 65 nand-objs += nand_amd.o
+1 -1
drivers/mtd/nand/ams-delta.c
··· 41 41 * Define partitions for flash devices 42 42 */ 43 43 44 - static struct mtd_partition partition_info[] = { 44 + static const struct mtd_partition partition_info[] = { 45 45 { .name = "Kernel", 46 46 .offset = 0, 47 47 .size = 3 * SZ_1M + SZ_512K },
+5 -2
drivers/mtd/nand/atmel/nand-controller.c
··· 718 718 nc->op.addrs[nc->op.naddrs++] = page; 719 719 nc->op.addrs[nc->op.naddrs++] = page >> 8; 720 720 721 - if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) || 722 - (mtd->writesize <= 512 && chip->chipsize > SZ_32M)) 721 + if (chip->options & NAND_ROW_ADDR_3) 723 722 nc->op.addrs[nc->op.naddrs++] = page >> 16; 724 723 } 725 724 } ··· 2529 2530 struct atmel_nand_controller *nc = dev_get_drvdata(dev); 2530 2531 struct atmel_nand *nand; 2531 2532 2533 + if (nc->pmecc) 2534 + atmel_pmecc_reset(nc->pmecc); 2535 + 2532 2536 list_for_each_entry(nand, &nc->chips, node) { 2533 2537 int i; 2534 2538 ··· 2549 2547 .driver = { 2550 2548 .name = "atmel-nand-controller", 2551 2549 .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), 2550 + .pm = &atmel_nand_controller_pm_ops, 2552 2551 }, 2553 2552 .probe = atmel_nand_controller_probe, 2554 2553 .remove = atmel_nand_controller_remove,
+9 -8
drivers/mtd/nand/atmel/pmecc.c
··· 765 765 } 766 766 EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); 767 767 768 + void atmel_pmecc_reset(struct atmel_pmecc *pmecc) 769 + { 770 + writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); 771 + writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); 772 + } 773 + EXPORT_SYMBOL_GPL(atmel_pmecc_reset); 774 + 768 775 int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) 769 776 { 770 777 struct atmel_pmecc *pmecc = user->pmecc; ··· 804 797 805 798 void atmel_pmecc_disable(struct atmel_pmecc_user *user) 806 799 { 807 - struct atmel_pmecc *pmecc = user->pmecc; 808 - 809 - writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); 810 - writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); 800 + atmel_pmecc_reset(user->pmecc); 811 801 mutex_unlock(&user->pmecc->lock); 812 802 } 813 803 EXPORT_SYMBOL_GPL(atmel_pmecc_disable); ··· 859 855 860 856 /* Disable all interrupts before registering the PMECC handler. */ 861 857 writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); 862 - 863 - /* Reset the ECC engine */ 864 - writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); 865 - writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); 858 + atmel_pmecc_reset(pmecc); 866 859 867 860 return pmecc; 868 861 }
+1
drivers/mtd/nand/atmel/pmecc.h
··· 61 61 struct atmel_pmecc_user_req *req); 62 62 void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); 63 63 64 + void atmel_pmecc_reset(struct atmel_pmecc *pmecc); 64 65 int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); 65 66 void atmel_pmecc_disable(struct atmel_pmecc_user *user); 66 67 int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
+1 -2
drivers/mtd/nand/au1550nd.c
··· 331 331 332 332 ctx->write_byte(mtd, (u8)(page_addr >> 8)); 333 333 334 - /* One more address cycle for devices > 32MiB */ 335 - if (this->chipsize > (32 << 20)) 334 + if (this->options & NAND_ROW_ADDR_3) 336 335 ctx->write_byte(mtd, 337 336 ((page_addr >> 16) & 0x0f)); 338 337 }
+1 -1
drivers/mtd/nand/cmx270_nand.c
··· 42 42 /* 43 43 * Define static partitions for flash device 44 44 */ 45 - static struct mtd_partition partition_info[] = { 45 + static const struct mtd_partition partition_info[] = { 46 46 [0] = { 47 47 .name = "cmx270-0", 48 48 .offset = 0,
+126 -165
drivers/mtd/nand/denali.c
··· 10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 12 * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program; if not, write to the Free Software Foundation, Inc., 16 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 - * 18 13 */ 19 - #include <linux/interrupt.h> 20 - #include <linux/delay.h> 14 + 15 + #include <linux/bitfield.h> 16 + #include <linux/completion.h> 21 17 #include <linux/dma-mapping.h> 22 - #include <linux/wait.h> 23 - #include <linux/mutex.h> 24 - #include <linux/mtd/mtd.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/io.h> 25 20 #include <linux/module.h> 21 + #include <linux/mtd/mtd.h> 22 + #include <linux/mtd/rawnand.h> 26 23 #include <linux/slab.h> 24 + #include <linux/spinlock.h> 27 25 28 26 #include "denali.h" 29 27 ··· 29 31 30 32 #define DENALI_NAND_NAME "denali-nand" 31 33 32 - /* Host Data/Command Interface */ 33 - #define DENALI_HOST_ADDR 0x00 34 - #define DENALI_HOST_DATA 0x10 34 + /* for Indexed Addressing */ 35 + #define DENALI_INDEXED_CTRL 0x00 36 + #define DENALI_INDEXED_DATA 0x10 35 37 36 38 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 37 39 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ ··· 59 61 */ 60 62 #define DENALI_CLK_X_MULT 6 61 63 62 - /* 63 - * this macro allows us to convert from an MTD structure to our own 64 - * device context (denali) structure. 65 - */ 66 64 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 67 65 { 68 66 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 69 67 } 70 68 71 - static void denali_host_write(struct denali_nand_info *denali, 72 - uint32_t addr, uint32_t data) 69 + /* 70 + * Direct Addressing - the slave address forms the control information (command 71 + * type, bank, block, and page address). The slave data is the actual data to 72 + * be transferred. This mode requires 28 bits of address region allocated. 73 + */ 74 + static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 73 75 { 74 - iowrite32(addr, denali->host + DENALI_HOST_ADDR); 75 - iowrite32(data, denali->host + DENALI_HOST_DATA); 76 + return ioread32(denali->host + addr); 77 + } 78 + 79 + static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 80 + u32 data) 81 + { 82 + iowrite32(data, denali->host + addr); 83 + } 84 + 85 + /* 86 + * Indexed Addressing - address translation module intervenes in passing the 87 + * control information. This mode reduces the required address range. The 88 + * control information and transferred data are latched by the registers in 89 + * the translation module. 90 + */ 91 + static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 92 + { 93 + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 94 + return ioread32(denali->host + DENALI_INDEXED_DATA); 95 + } 96 + 97 + static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 98 + u32 data) 99 + { 100 + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 101 + iowrite32(data, denali->host + DENALI_INDEXED_DATA); 76 102 } 77 103 78 104 /* 79 105 * Use the configuration feature register to determine the maximum number of 80 106 * banks that the hardware supports. 81 107 */ 82 - static void detect_max_banks(struct denali_nand_info *denali) 108 + static void denali_detect_max_banks(struct denali_nand_info *denali) 83 109 { 84 110 uint32_t features = ioread32(denali->reg + FEATURES); 85 111 86 - denali->max_banks = 1 << (features & FEATURES__N_BANKS); 112 + denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 87 113 88 114 /* the encoding changed from rev 5.0 to 5.1 */ 89 115 if (denali->revision < 0x0501) ··· 211 189 msecs_to_jiffies(1000)); 212 190 if (!time_left) { 213 191 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 214 - denali->irq_mask); 192 + irq_mask); 215 193 return 0; 216 194 } 217 195 ··· 230 208 return irq_status; 231 209 } 232 210 233 - /* 234 - * This helper function setups the registers for ECC and whether or not 235 - * the spare area will be transferred. 236 - */ 237 - static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, 238 - bool transfer_spare) 239 - { 240 - int ecc_en_flag, transfer_spare_flag; 241 - 242 - /* set ECC, transfer spare bits if needed */ 243 - ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; 244 - transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; 245 - 246 - /* Enable spare area/ECC per user's request. */ 247 - iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE); 248 - iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG); 249 - } 250 - 251 211 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 252 212 { 253 213 struct denali_nand_info *denali = mtd_to_denali(mtd); 214 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 254 215 int i; 255 216 256 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 257 - denali->host + DENALI_HOST_ADDR); 258 - 259 217 for (i = 0; i < len; i++) 260 - buf[i] = ioread32(denali->host + DENALI_HOST_DATA); 218 + buf[i] = denali->host_read(denali, addr); 261 219 } 262 220 263 221 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 264 222 { 265 223 struct denali_nand_info *denali = mtd_to_denali(mtd); 224 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 266 225 int i; 267 226 268 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 269 - denali->host + DENALI_HOST_ADDR); 270 - 271 227 for (i = 0; i < len; i++) 272 - iowrite32(buf[i], denali->host + DENALI_HOST_DATA); 228 + denali->host_write(denali, addr, buf[i]); 273 229 } 274 230 275 231 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 276 232 { 277 233 struct denali_nand_info *denali = mtd_to_denali(mtd); 234 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 278 235 uint16_t *buf16 = (uint16_t *)buf; 279 236 int i; 280 237 281 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 282 - denali->host + DENALI_HOST_ADDR); 283 - 284 238 for (i = 0; i < len / 2; i++) 285 - buf16[i] = ioread32(denali->host + DENALI_HOST_DATA); 239 + buf16[i] = denali->host_read(denali, addr); 286 240 } 287 241 288 242 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, 289 243 int len) 290 244 { 291 245 struct denali_nand_info *denali = mtd_to_denali(mtd); 246 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 292 247 const uint16_t *buf16 = (const uint16_t *)buf; 293 248 int i; 294 249 295 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 296 - denali->host + DENALI_HOST_ADDR); 297 - 298 250 for (i = 0; i < len / 2; i++) 299 - iowrite32(buf16[i], denali->host + DENALI_HOST_DATA); 251 + denali->host_write(denali, addr, buf16[i]); 300 252 } 301 253 302 254 static uint8_t denali_read_byte(struct mtd_info *mtd) ··· 315 319 if (ctrl & NAND_CTRL_CHANGE) 316 320 denali_reset_irq(denali); 317 321 318 - denali_host_write(denali, DENALI_BANK(denali) | type, dat); 322 + denali->host_write(denali, DENALI_BANK(denali) | type, dat); 319 323 } 320 324 321 325 static int denali_dev_ready(struct mtd_info *mtd) ··· 385 389 return 0; 386 390 } 387 391 388 - max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS; 392 + max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 389 393 390 394 /* 391 395 * The register holds the maximum of per-sector corrected bitflips. ··· 397 401 398 402 return max_bitflips; 399 403 } 400 - 401 - #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) 402 - #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) 403 - #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) 404 - #define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE) 405 - #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) 406 - #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 407 404 408 405 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 409 406 struct denali_nand_info *denali, ··· 415 426 416 427 do { 417 428 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 418 - err_sector = ECC_SECTOR(err_addr); 419 - err_byte = ECC_BYTE(err_addr); 429 + err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 430 + err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 420 431 421 432 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 422 - err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); 423 - err_device = ECC_ERR_DEVICE(err_cor_info); 433 + err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 434 + err_cor_info); 435 + err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 436 + err_cor_info); 424 437 425 438 /* reset the bitflip counter when crossing ECC sector */ 426 439 if (err_sector != prev_sector) 427 440 bitflips = 0; 428 441 429 - if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) { 442 + if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 430 443 /* 431 444 * Check later if this is a real ECC error, or 432 445 * an erased sector. ··· 458 467 } 459 468 460 469 prev_sector = err_sector; 461 - } while (!ECC_LAST_ERR(err_cor_info)); 470 + } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 462 471 463 472 /* 464 - * Once handle all ecc errors, controller will trigger a 465 - * ECC_TRANSACTION_DONE interrupt, so here just wait for 466 - * a while for this interrupt 473 + * Once handle all ECC errors, controller will trigger an 474 + * ECC_TRANSACTION_DONE interrupt. 467 475 */ 468 476 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 469 477 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 470 478 return -EIO; 471 479 472 480 return max_bitflips; 473 - } 474 - 475 - /* programs the controller to either enable/disable DMA transfers */ 476 - static void denali_enable_dma(struct denali_nand_info *denali, bool en) 477 - { 478 - iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE); 479 - ioread32(denali->reg + DMA_ENABLE); 480 481 } 481 482 482 483 static void denali_setup_dma64(struct denali_nand_info *denali, ··· 485 502 * 1. setup transfer type, interrupt when complete, 486 503 * burst len = 64 bytes, the number of pages 487 504 */ 488 - denali_host_write(denali, mode, 489 - 0x01002000 | (64 << 16) | (write << 8) | page_count); 505 + denali->host_write(denali, mode, 506 + 0x01002000 | (64 << 16) | (write << 8) | page_count); 490 507 491 508 /* 2. set memory low address */ 492 - denali_host_write(denali, mode, dma_addr); 509 + denali->host_write(denali, mode, lower_32_bits(dma_addr)); 493 510 494 511 /* 3. set memory high address */ 495 - denali_host_write(denali, mode, (uint64_t)dma_addr >> 32); 512 + denali->host_write(denali, mode, upper_32_bits(dma_addr)); 496 513 } 497 514 498 515 static void denali_setup_dma32(struct denali_nand_info *denali, ··· 506 523 /* DMA is a four step process */ 507 524 508 525 /* 1. setup transfer type and # of pages */ 509 - denali_host_write(denali, mode | page, 510 - 0x2000 | (write << 8) | page_count); 526 + denali->host_write(denali, mode | page, 527 + 0x2000 | (write << 8) | page_count); 511 528 512 529 /* 2. set memory high address bits 23:8 */ 513 - denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 530 + denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 514 531 515 532 /* 3. set memory low address bits 23:8 */ 516 - denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 533 + denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 517 534 518 535 /* 4. interrupt when complete, burst len = 64 bytes */ 519 - denali_host_write(denali, mode | 0x14000, 0x2400); 520 - } 521 - 522 - static void denali_setup_dma(struct denali_nand_info *denali, 523 - dma_addr_t dma_addr, int page, int write) 524 - { 525 - if (denali->caps & DENALI_CAP_DMA_64BIT) 526 - denali_setup_dma64(denali, dma_addr, page, write); 527 - else 528 - denali_setup_dma32(denali, dma_addr, page, write); 536 + denali->host_write(denali, mode | 0x14000, 0x2400); 529 537 } 530 538 531 539 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 532 540 size_t size, int page, int raw) 533 541 { 534 - uint32_t addr = DENALI_BANK(denali) | page; 542 + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 535 543 uint32_t *buf32 = (uint32_t *)buf; 536 544 uint32_t irq_status, ecc_err_mask; 537 545 int i; ··· 534 560 535 561 denali_reset_irq(denali); 536 562 537 - iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); 538 563 for (i = 0; i < size / 4; i++) 539 - *buf32++ = ioread32(denali->host + DENALI_HOST_DATA); 564 + *buf32++ = denali->host_read(denali, addr); 540 565 541 566 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 542 567 if (!(irq_status & INTR__PAGE_XFER_INC)) ··· 550 577 static int denali_pio_write(struct denali_nand_info *denali, 551 578 const void *buf, size_t size, int page, int raw) 552 579 { 553 - uint32_t addr = DENALI_BANK(denali) | page; 580 + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 554 581 const uint32_t *buf32 = (uint32_t *)buf; 555 582 uint32_t irq_status; 556 583 int i; 557 584 558 585 denali_reset_irq(denali); 559 586 560 - iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); 561 587 for (i = 0; i < size / 4; i++) 562 - iowrite32(*buf32++, denali->host + DENALI_HOST_DATA); 588 + denali->host_write(denali, addr, *buf32++); 563 589 564 590 irq_status = denali_wait_for_irq(denali, 565 591 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); ··· 607 635 ecc_err_mask = INTR__ECC_ERR; 608 636 } 609 637 610 - denali_enable_dma(denali, true); 638 + iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 611 639 612 640 denali_reset_irq(denali); 613 - denali_setup_dma(denali, dma_addr, page, write); 641 + denali->setup_dma(denali, dma_addr, page, write); 614 642 615 - /* wait for operation to complete */ 616 643 irq_status = denali_wait_for_irq(denali, irq_mask); 617 644 if (!(irq_status & INTR__DMA_CMD_COMP)) 618 645 ret = -EIO; 619 646 else if (irq_status & ecc_err_mask) 620 647 ret = -EBADMSG; 621 648 622 - denali_enable_dma(denali, false); 649 + iowrite32(0, denali->reg + DMA_ENABLE); 650 + 623 651 dma_unmap_single(denali->dev, dma_addr, size, dir); 624 652 625 653 if (irq_status & INTR__ERASED_PAGE) ··· 631 659 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 632 660 size_t size, int page, int raw, int write) 633 661 { 634 - setup_ecc_for_xfer(denali, !raw, raw); 662 + iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 663 + iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 664 + denali->reg + TRANSFER_SPARE_REG); 635 665 636 666 if (denali->dma_avail) 637 667 return denali_dma_xfer(denali, buf, size, page, raw, write); ··· 944 970 945 971 denali_reset_irq(denali); 946 972 947 - denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 948 - DENALI_ERASE); 973 + denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 974 + DENALI_ERASE); 949 975 950 976 /* wait for erase to complete or failure to occur */ 951 977 irq_status = denali_wait_for_irq(denali, ··· 983 1009 984 1010 tmp = ioread32(denali->reg + ACC_CLKS); 985 1011 tmp &= ~ACC_CLKS__VALUE; 986 - tmp |= acc_clks; 1012 + tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 987 1013 iowrite32(tmp, denali->reg + ACC_CLKS); 988 1014 989 1015 /* tRWH -> RE_2_WE */ ··· 992 1018 993 1019 tmp = ioread32(denali->reg + RE_2_WE); 994 1020 tmp &= ~RE_2_WE__VALUE; 995 - tmp |= re_2_we; 1021 + tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 996 1022 iowrite32(tmp, denali->reg + RE_2_WE); 997 1023 998 1024 /* tRHZ -> RE_2_RE */ ··· 1001 1027 1002 1028 tmp = ioread32(denali->reg + RE_2_RE); 1003 1029 tmp &= ~RE_2_RE__VALUE; 1004 - tmp |= re_2_re; 1030 + tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 1005 1031 iowrite32(tmp, denali->reg + RE_2_RE); 1006 1032 1007 - /* tWHR -> WE_2_RE */ 1008 - we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk); 1033 + /* 1034 + * tCCS, tWHR -> WE_2_RE 1035 + * 1036 + * With WE_2_RE properly set, the Denali controller automatically takes 1037 + * care of the delay; the driver need not set NAND_WAIT_TCCS. 1038 + */ 1039 + we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), 1040 + t_clk); 1009 1041 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1010 1042 1011 1043 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1012 1044 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1013 - tmp |= we_2_re; 1045 + tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1014 1046 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1015 1047 1016 1048 /* tADL -> ADDR_2_DATA */ ··· 1030 1050 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1031 1051 1032 1052 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1033 - tmp &= ~addr_2_data_mask; 1034 - tmp |= addr_2_data; 1053 + tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1054 + tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1035 1055 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1036 1056 1037 1057 /* tREH, tWH -> RDWR_EN_HI_CNT */ ··· 1041 1061 1042 1062 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1043 1063 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1044 - tmp |= rdwr_en_hi; 1064 + tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1045 1065 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1046 1066 1047 1067 /* tRP, tWP -> RDWR_EN_LO_CNT */ ··· 1055 1075 1056 1076 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1057 1077 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1058 - tmp |= rdwr_en_lo; 1078 + tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1059 1079 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1060 1080 1061 1081 /* tCS, tCEA -> CS_SETUP_CNT */ ··· 1066 1086 1067 1087 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1068 1088 tmp &= ~CS_SETUP_CNT__VALUE; 1069 - tmp |= cs_setup; 1089 + tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1070 1090 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1071 1091 1072 1092 return 0; ··· 1111 1131 * if this value is 0, just let it be. 1112 1132 */ 1113 1133 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); 1114 - detect_max_banks(denali); 1134 + denali_detect_max_banks(denali); 1115 1135 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1116 1136 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1117 1137 1118 1138 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1119 - 1120 - /* Should set value for these registers when init */ 1121 - iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES); 1122 - iowrite32(1, denali->reg + ECC_ENABLE); 1123 1139 } 1124 1140 1125 1141 int denali_calc_ecc_bytes(int step_size, int strength) ··· 1187 1211 .free = denali_ooblayout_free, 1188 1212 }; 1189 1213 1190 - /* initialize driver data structures */ 1191 - static void denali_drv_init(struct denali_nand_info *denali) 1192 - { 1193 - /* 1194 - * the completion object will be used to notify 1195 - * the callee that the interrupt is done 1196 - */ 1197 - init_completion(&denali->complete); 1198 - 1199 - /* 1200 - * the spinlock will be used to synchronize the ISR with any 1201 - * element that might be access shared data (interrupt status) 1202 - */ 1203 - spin_lock_init(&denali->irq_lock); 1204 - } 1205 - 1206 1214 static int denali_multidev_fixup(struct denali_nand_info *denali) 1207 1215 { 1208 1216 struct nand_chip *chip = &denali->nand; ··· 1242 1282 { 1243 1283 struct nand_chip *chip = &denali->nand; 1244 1284 struct mtd_info *mtd = nand_to_mtd(chip); 1285 + u32 features = ioread32(denali->reg + FEATURES); 1245 1286 int ret; 1246 1287 1247 1288 mtd->dev.parent = denali->dev; 1248 1289 denali_hw_init(denali); 1249 - denali_drv_init(denali); 1290 + 1291 + init_completion(&denali->complete); 1292 + spin_lock_init(&denali->irq_lock); 1250 1293 1251 1294 denali_clear_irq_all(denali); 1252 1295 1253 - /* Request IRQ after all the hardware initialization is finished */ 1254 1296 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1255 1297 IRQF_SHARED, DENALI_NAND_NAME, denali); 1256 1298 if (ret) { ··· 1270 1308 if (!mtd->name) 1271 1309 mtd->name = "denali-nand"; 1272 1310 1273 - /* register the driver with the NAND core subsystem */ 1274 1311 chip->select_chip = denali_select_chip; 1275 1312 chip->read_byte = denali_read_byte; 1276 1313 chip->write_byte = denali_write_byte; ··· 1278 1317 chip->dev_ready = denali_dev_ready; 1279 1318 chip->waitfunc = denali_waitfunc; 1280 1319 1320 + if (features & FEATURES__INDEX_ADDR) { 1321 + denali->host_read = denali_indexed_read; 1322 + denali->host_write = denali_indexed_write; 1323 + } else { 1324 + denali->host_read = denali_direct_read; 1325 + denali->host_write = denali_direct_write; 1326 + } 1327 + 1281 1328 /* clk rate info is needed for setup_data_interface */ 1282 1329 if (denali->clk_x_rate) 1283 1330 chip->setup_data_interface = denali_setup_data_interface; 1284 1331 1285 - /* 1286 - * scan for NAND devices attached to the controller 1287 - * this is the first stage in a two step process to register 1288 - * with the nand subsystem 1289 - */ 1290 1332 ret = nand_scan_ident(mtd, denali->max_banks, NULL); 1291 1333 if (ret) 1292 1334 goto disable_irq; ··· 1311 1347 if (denali->dma_avail) { 1312 1348 chip->options |= NAND_USE_BOUNCE_BUFFER; 1313 1349 chip->buf_align = 16; 1350 + if (denali->caps & DENALI_CAP_DMA_64BIT) 1351 + denali->setup_dma = denali_setup_dma64; 1352 + else 1353 + denali->setup_dma = denali_setup_dma32; 1314 1354 } 1315 - 1316 - /* 1317 - * second stage of the NAND scan 1318 - * this stage requires information regarding ECC and 1319 - * bad block management. 1320 - */ 1321 1355 1322 1356 chip->bbt_options |= NAND_BBT_USE_FLASH; 1323 1357 chip->bbt_options |= NAND_BBT_NO_OOB; 1324 - 1325 1358 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1326 - 1327 - /* no subpage writes on denali */ 1328 1359 chip->options |= NAND_NO_SUBPAGE_WRITE; 1329 1360 1330 1361 ret = denali_ecc_setup(mtd, chip, denali); ··· 1332 1373 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1333 1374 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1334 1375 1335 - iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1), 1376 + iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1377 + FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1336 1378 denali->reg + ECC_CORRECTION); 1337 1379 iowrite32(mtd->erasesize / mtd->writesize, 1338 1380 denali->reg + PAGES_PER_BLOCK); 1339 1381 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1340 1382 denali->reg + DEVICE_WIDTH); 1383 + iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1384 + denali->reg + TWO_ROW_ADDR_CYCLES); 1341 1385 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1342 1386 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1343 1387 ··· 1403 1441 } 1404 1442 EXPORT_SYMBOL(denali_init); 1405 1443 1406 - /* driver exit point */ 1407 1444 void denali_remove(struct denali_nand_info *denali) 1408 1445 { 1409 1446 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
+20 -24
drivers/mtd/nand/denali.h
··· 10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 12 * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program; if not, write to the Free Software Foundation, Inc., 16 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 - * 18 13 */ 19 14 20 15 #ifndef __DENALI_H__ 21 16 #define __DENALI_H__ 22 17 23 18 #include <linux/bitops.h> 19 + #include <linux/completion.h> 24 20 #include <linux/mtd/rawnand.h> 21 + #include <linux/spinlock_types.h> 22 + #include <linux/types.h> 25 23 26 24 #define DEVICE_RESET 0x0 27 25 #define DEVICE_RESET__BANK(bank) BIT(bank) ··· 109 111 #define ECC_CORRECTION 0x1b0 110 112 #define ECC_CORRECTION__VALUE GENMASK(4, 0) 111 113 #define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) 112 - #define MAKE_ECC_CORRECTION(val, thresh) \ 113 - (((val) & (ECC_CORRECTION__VALUE)) | \ 114 - (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD))) 115 114 116 115 #define READ_MODE 0x1c0 117 116 #define READ_MODE__VALUE GENMASK(3, 0) ··· 250 255 251 256 #define ECC_ERROR_ADDRESS 0x630 252 257 #define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) 253 - #define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12) 258 + #define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12) 254 259 255 260 #define ERR_CORRECTION_INFO 0x640 256 - #define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0) 257 - #define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8) 258 - #define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14) 259 - #define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15) 261 + #define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0) 262 + #define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8) 263 + #define ERR_CORRECTION_INFO__UNCOR BIT(14) 264 + #define ERR_CORRECTION_INFO__LAST_ERR BIT(15) 260 265 261 266 #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) 262 267 #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) ··· 305 310 struct device *dev; 306 311 void __iomem *reg; /* Register Interface */ 307 312 void __iomem *host; /* Host Data/Command Interface */ 308 - 309 - /* elements used by ISR */ 310 313 struct completion complete; 311 - spinlock_t irq_lock; 312 - uint32_t irq_mask; 313 - uint32_t irq_status; 314 + spinlock_t irq_lock; /* protect irq_mask and irq_status */ 315 + u32 irq_mask; /* interrupts we are waiting for */ 316 + u32 irq_status; /* interrupts that have happened */ 314 317 int irq; 315 - 316 - void *buf; 318 + void *buf; /* for syndrome layout conversion */ 317 319 dma_addr_t dma_addr; 318 - int dma_avail; 320 + int dma_avail; /* can support DMA? */ 319 321 int devs_per_cs; /* devices connected in parallel */ 320 - int oob_skip_bytes; 322 + int oob_skip_bytes; /* number of bytes reserved for BBM */ 321 323 int max_banks; 322 - unsigned int revision; 323 - unsigned int caps; 324 + unsigned int revision; /* IP revision */ 325 + unsigned int caps; /* IP capability (or quirk) */ 324 326 const struct nand_ecc_caps *ecc_caps; 327 + u32 (*host_read)(struct denali_nand_info *denali, u32 addr); 328 + void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data); 329 + void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr, 330 + int page, int write); 325 331 }; 326 332 327 333 #define DENALI_CAP_HW_ECC_FIXUP BIT(0)
+2 -2
drivers/mtd/nand/denali_dt.c
··· 12 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 13 * more details. 14 14 */ 15 + 15 16 #include <linux/clk.h> 16 17 #include <linux/err.h> 17 18 #include <linux/io.h> 18 19 #include <linux/ioport.h> 19 20 #include <linux/kernel.h> 20 21 #include <linux/module.h> 21 - #include <linux/platform_device.h> 22 22 #include <linux/of.h> 23 23 #include <linux/of_device.h> 24 + #include <linux/platform_device.h> 24 25 25 26 #include "denali.h" 26 27 ··· 156 155 .of_match_table = denali_nand_dt_ids, 157 156 }, 158 157 }; 159 - 160 158 module_platform_driver(denali_dt_driver); 161 159 162 160 MODULE_LICENSE("GPL");
+3 -2
drivers/mtd/nand/denali_pci.c
··· 11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 12 * more details. 13 13 */ 14 + 15 + #include <linux/errno.h> 16 + #include <linux/io.h> 14 17 #include <linux/kernel.h> 15 18 #include <linux/module.h> 16 19 #include <linux/pci.h> ··· 109 106 return ret; 110 107 } 111 108 112 - /* driver exit point */ 113 109 static void denali_pci_remove(struct pci_dev *dev) 114 110 { 115 111 struct denali_nand_info *denali = pci_get_drvdata(dev); ··· 124 122 .probe = denali_pci_probe, 125 123 .remove = denali_pci_remove, 126 124 }; 127 - 128 125 module_pci_driver(denali_pci_driver);
+1 -2
drivers/mtd/nand/diskonchip.c
··· 705 705 if (page_addr != -1) { 706 706 WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); 707 707 WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); 708 - /* One more address cycle for higher density devices */ 709 - if (this->chipsize & 0x0c000000) { 708 + if (this->options & NAND_ROW_ADDR_3) { 710 709 WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); 711 710 printk("high density\n"); 712 711 }
+57 -55
drivers/mtd/nand/gpio.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/module.h> 25 25 #include <linux/platform_device.h> 26 - #include <linux/gpio.h> 26 + #include <linux/gpio/consumer.h> 27 27 #include <linux/io.h> 28 28 #include <linux/mtd/mtd.h> 29 29 #include <linux/mtd/rawnand.h> ··· 31 31 #include <linux/mtd/nand-gpio.h> 32 32 #include <linux/of.h> 33 33 #include <linux/of_address.h> 34 - #include <linux/of_gpio.h> 35 34 36 35 struct gpiomtd { 37 36 void __iomem *io_sync; 38 37 struct nand_chip nand_chip; 39 38 struct gpio_nand_platdata plat; 39 + struct gpio_desc *nce; /* Optional chip enable */ 40 + struct gpio_desc *cle; 41 + struct gpio_desc *ale; 42 + struct gpio_desc *rdy; 43 + struct gpio_desc *nwp; /* Optional write protection */ 40 44 }; 41 45 42 46 static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd) ··· 82 78 gpio_nand_dosync(gpiomtd); 83 79 84 80 if (ctrl & NAND_CTRL_CHANGE) { 85 - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) 86 - gpio_set_value(gpiomtd->plat.gpio_nce, 87 - !(ctrl & NAND_NCE)); 88 - gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); 89 - gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); 81 + if (gpiomtd->nce) 82 + gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE)); 83 + gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE)); 84 + gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE)); 90 85 gpio_nand_dosync(gpiomtd); 91 86 } 92 87 if (cmd == NAND_CMD_NONE) ··· 99 96 { 100 97 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); 101 98 102 - return gpio_get_value(gpiomtd->plat.gpio_rdy); 99 + return gpiod_get_value(gpiomtd->rdy); 103 100 } 104 101 105 102 #ifdef CONFIG_OF ··· 125 122 return -EINVAL; 126 123 } 127 124 } 128 - 129 - plat->gpio_rdy = of_get_gpio(dev->of_node, 0); 130 - plat->gpio_nce = of_get_gpio(dev->of_node, 1); 131 - plat->gpio_ale = of_get_gpio(dev->of_node, 2); 132 - plat->gpio_cle = of_get_gpio(dev->of_node, 3); 133 - plat->gpio_nwp = of_get_gpio(dev->of_node, 4); 134 125 135 126 if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) 136 127 plat->chip_delay = val; ··· 198 201 199 202 nand_release(nand_to_mtd(&gpiomtd->nand_chip)); 200 203 201 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 202 - gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 203 - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) 204 - gpio_set_value(gpiomtd->plat.gpio_nce, 1); 204 + /* Enable write protection and disable the chip */ 205 + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) 206 + gpiod_set_value(gpiomtd->nwp, 0); 207 + if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) 208 + gpiod_set_value(gpiomtd->nce, 0); 205 209 206 210 return 0; 207 211 } ··· 213 215 struct nand_chip *chip; 214 216 struct mtd_info *mtd; 215 217 struct resource *res; 218 + struct device *dev = &pdev->dev; 216 219 int ret = 0; 217 220 218 - if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev)) 221 + if (!dev->of_node && !dev_get_platdata(dev)) 219 222 return -EINVAL; 220 223 221 - gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); 224 + gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL); 222 225 if (!gpiomtd) 223 226 return -ENOMEM; 224 227 225 228 chip = &gpiomtd->nand_chip; 226 229 227 230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 228 - chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); 231 + chip->IO_ADDR_R = devm_ioremap_resource(dev, res); 229 232 if (IS_ERR(chip->IO_ADDR_R)) 230 233 return PTR_ERR(chip->IO_ADDR_R); 231 234 232 235 res = gpio_nand_get_io_sync(pdev); 233 236 if (res) { 234 - gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res); 237 + gpiomtd->io_sync = devm_ioremap_resource(dev, res); 235 238 if (IS_ERR(gpiomtd->io_sync)) 236 239 return PTR_ERR(gpiomtd->io_sync); 237 240 } 238 241 239 - ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat); 242 + ret = gpio_nand_get_config(dev, &gpiomtd->plat); 240 243 if (ret) 241 244 return ret; 242 245 243 - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) { 244 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, 245 - "NAND NCE"); 246 - if (ret) 247 - return ret; 248 - gpio_direction_output(gpiomtd->plat.gpio_nce, 1); 246 + /* Just enable the chip */ 247 + gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH); 248 + if (IS_ERR(gpiomtd->nce)) 249 + return PTR_ERR(gpiomtd->nce); 250 + 251 + /* We disable write protection once we know probe() will succeed */ 252 + gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW); 253 + if (IS_ERR(gpiomtd->nwp)) { 254 + ret = PTR_ERR(gpiomtd->nwp); 255 + goto out_ce; 249 256 } 250 257 251 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { 252 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, 253 - "NAND NWP"); 254 - if (ret) 255 - return ret; 258 + gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW); 259 + if (IS_ERR(gpiomtd->nwp)) { 260 + ret = PTR_ERR(gpiomtd->nwp); 261 + goto out_ce; 256 262 } 257 263 258 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE"); 259 - if (ret) 260 - return ret; 261 - gpio_direction_output(gpiomtd->plat.gpio_ale, 0); 264 + gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW); 265 + if (IS_ERR(gpiomtd->cle)) { 266 + ret = PTR_ERR(gpiomtd->cle); 267 + goto out_ce; 268 + } 262 269 263 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE"); 264 - if (ret) 265 - return ret; 266 - gpio_direction_output(gpiomtd->plat.gpio_cle, 0); 267 - 268 - if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { 269 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy, 270 - "NAND RDY"); 271 - if (ret) 272 - return ret; 273 - gpio_direction_input(gpiomtd->plat.gpio_rdy); 270 + gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN); 271 + if (IS_ERR(gpiomtd->rdy)) { 272 + ret = PTR_ERR(gpiomtd->rdy); 273 + goto out_ce; 274 + } 275 + /* Using RDY pin */ 276 + if (gpiomtd->rdy) 274 277 chip->dev_ready = gpio_nand_devready; 275 - } 276 278 277 279 nand_set_flash_node(chip, pdev->dev.of_node); 278 280 chip->IO_ADDR_W = chip->IO_ADDR_R; ··· 283 285 chip->cmd_ctrl = gpio_nand_cmd_ctrl; 284 286 285 287 mtd = nand_to_mtd(chip); 286 - mtd->dev.parent = &pdev->dev; 288 + mtd->dev.parent = dev; 287 289 288 290 platform_set_drvdata(pdev, gpiomtd); 289 291 290 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 291 - gpio_direction_output(gpiomtd->plat.gpio_nwp, 1); 292 + /* Disable write protection, if wired up */ 293 + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) 294 + gpiod_direction_output(gpiomtd->nwp, 1); 292 295 293 296 ret = nand_scan(mtd, 1); 294 297 if (ret) ··· 304 305 return 0; 305 306 306 307 err_wp: 307 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 308 - gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 308 + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) 309 + gpiod_set_value(gpiomtd->nwp, 0); 310 + out_ce: 311 + if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) 312 + gpiod_set_value(gpiomtd->nce, 0); 309 313 310 314 return ret; 311 315 }
+1 -2
drivers/mtd/nand/hisi504_nand.c
··· 432 432 host->addr_value[0] |= (page_addr & 0xffff) 433 433 << (host->addr_cycle * 8); 434 434 host->addr_cycle += 2; 435 - /* One more address cycle for devices > 128MiB */ 436 - if (chip->chipsize > (128 << 20)) { 435 + if (chip->options & NAND_ROW_ADDR_3) { 437 436 host->addr_cycle += 1; 438 437 if (host->command == NAND_CMD_ERASE1) 439 438 host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+11 -2
drivers/mtd/nand/mtk_ecc.c
··· 115 115 op = ECC_DECODE; 116 116 dec = readw(ecc->regs + ECC_DECDONE); 117 117 if (dec & ecc->sectors) { 118 + /* 119 + * Clear decode IRQ status once again to ensure that 120 + * there will be no extra IRQ. 121 + */ 122 + readw(ecc->regs + ECC_DECIRQ_STA); 118 123 ecc->sectors = 0; 119 124 complete(&ecc->done); 120 125 } else { ··· 134 129 return IRQ_NONE; 135 130 } 136 131 } 137 - 138 - writel(0, ecc->regs + ECC_IRQ_REG(op)); 139 132 140 133 return IRQ_HANDLED; 141 134 } ··· 310 307 311 308 /* disable it */ 312 309 mtk_ecc_wait_idle(ecc, op); 310 + if (op == ECC_DECODE) 311 + /* 312 + * Clear decode IRQ status in case there is a timeout to wait 313 + * decode IRQ. 314 + */ 315 + readw(ecc->regs + ECC_DECIRQ_STA); 313 316 writew(0, ecc->regs + ECC_IRQ_REG(op)); 314 317 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); 315 318
+9 -10
drivers/mtd/nand/mxc_nand.c
··· 415 415 * waits for completion. */ 416 416 static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) 417 417 { 418 - pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); 418 + dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); 419 419 420 420 writew(cmd, NFC_V1_V2_FLASH_CMD); 421 421 writew(NFC_CMD, NFC_V1_V2_CONFIG2); ··· 431 431 udelay(1); 432 432 } 433 433 if (max_retries < 0) 434 - pr_debug("%s: RESET failed\n", __func__); 434 + dev_dbg(host->dev, "%s: RESET failed\n", __func__); 435 435 } else { 436 436 /* Wait for operation to complete */ 437 437 wait_op_done(host, useirq); ··· 454 454 * a NAND command. */ 455 455 static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) 456 456 { 457 - pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); 457 + dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast); 458 458 459 459 writew(addr, NFC_V1_V2_FLASH_ADDR); 460 460 writew(NFC_ADDR, NFC_V1_V2_CONFIG2); ··· 607 607 uint16_t ecc_status = get_ecc_status_v1(host); 608 608 609 609 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 610 - pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 610 + dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n"); 611 611 return -EBADMSG; 612 612 } 613 613 ··· 634 634 do { 635 635 err = ecc_stat & ecc_bit_mask; 636 636 if (err > err_limit) { 637 - printk(KERN_WARNING "UnCorrectable RS-ECC Error\n"); 637 + dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n"); 638 638 return -EBADMSG; 639 639 } else { 640 640 ret += err; ··· 642 642 ecc_stat >>= 4; 643 643 } while (--no_subpages); 644 644 645 - pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); 645 + dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret); 646 646 647 647 return ret; 648 648 } ··· 673 673 host->buf_start++; 674 674 } 675 675 676 - pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); 676 + dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); 677 677 return ret; 678 678 } 679 679 ··· 859 859 host->devtype_data->send_addr(host, 860 860 (page_addr >> 8) & 0xff, true); 861 861 } else { 862 - /* One more address cycle for higher density devices */ 863 - if (mtd->size >= 0x4000000) { 862 + if (nand_chip->options & NAND_ROW_ADDR_3) { 864 863 /* paddr_8 - paddr_15 */ 865 864 host->devtype_data->send_addr(host, 866 865 (page_addr >> 8) & 0xff, ··· 1211 1212 struct nand_chip *nand_chip = mtd_to_nand(mtd); 1212 1213 struct mxc_nand_host *host = nand_get_controller_data(nand_chip); 1213 1214 1214 - pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 1215 + dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 1215 1216 command, column, page_addr); 1216 1217 1217 1218 /* Reset command state information */
+26 -8
drivers/mtd/nand/nand_base.c
··· 115 115 struct nand_chip *chip = mtd_to_nand(mtd); 116 116 struct nand_ecc_ctrl *ecc = &chip->ecc; 117 117 118 - if (section) 118 + if (section || !ecc->total) 119 119 return -ERANGE; 120 120 121 121 oobregion->length = ecc->total; ··· 727 727 chip->cmd_ctrl(mtd, page_addr, ctrl); 728 728 ctrl &= ~NAND_CTRL_CHANGE; 729 729 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); 730 - /* One more address cycle for devices > 32MiB */ 731 - if (chip->chipsize > (32 << 20)) 730 + if (chip->options & NAND_ROW_ADDR_3) 732 731 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); 733 732 } 734 733 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); ··· 853 854 chip->cmd_ctrl(mtd, page_addr, ctrl); 854 855 chip->cmd_ctrl(mtd, page_addr >> 8, 855 856 NAND_NCE | NAND_ALE); 856 - /* One more address cycle for devices > 128MiB */ 857 - if (chip->chipsize > (128 << 20)) 857 + if (chip->options & NAND_ROW_ADDR_3) 858 858 chip->cmd_ctrl(mtd, page_addr >> 16, 859 859 NAND_NCE | NAND_ALE); 860 860 } ··· 1244 1246 1245 1247 return 0; 1246 1248 } 1249 + EXPORT_SYMBOL_GPL(nand_reset); 1247 1250 1248 1251 /** 1249 1252 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data ··· 2798 2799 size_t *retlen, const uint8_t *buf) 2799 2800 { 2800 2801 struct nand_chip *chip = mtd_to_nand(mtd); 2802 + int chipnr = (int)(to >> chip->chip_shift); 2801 2803 struct mtd_oob_ops ops; 2802 2804 int ret; 2803 2805 2804 - /* Wait for the device to get ready */ 2805 - panic_nand_wait(mtd, chip, 400); 2806 - 2807 2806 /* Grab the device */ 2808 2807 panic_nand_get_device(chip, mtd, FL_WRITING); 2808 + 2809 + chip->select_chip(mtd, chipnr); 2810 + 2811 + /* Wait for the device to get ready */ 2812 + panic_nand_wait(mtd, chip, 400); 2809 2813 2810 2814 memset(&ops, 0, sizeof(ops)); 2811 2815 ops.len = len; ··· 4001 3999 chip->chip_shift += 32 - 1; 4002 4000 } 4003 4001 4002 + if (chip->chip_shift - chip->page_shift > 16) 4003 + chip->options |= NAND_ROW_ADDR_3; 4004 + 4004 4005 chip->badblockbits = 8; 4005 4006 chip->erase = single_erase; 4006 4007 ··· 4705 4700 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); 4706 4701 break; 4707 4702 default: 4703 + /* 4704 + * Expose the whole OOB area to users if ECC_NONE 4705 + * is passed. We could do that for all kind of 4706 + * ->oobsize, but we must keep the old large/small 4707 + * page with ECC layout when ->oobsize <= 128 for 4708 + * compatibility reasons. 4709 + */ 4710 + if (ecc->mode == NAND_ECC_NONE) { 4711 + mtd_set_ooblayout(mtd, 4712 + &nand_ooblayout_lp_ops); 4713 + break; 4714 + } 4715 + 4708 4716 WARN(1, "No oob scheme defined for oobsize %d\n", 4709 4717 mtd->oobsize); 4710 4718 ret = -EINVAL;
+9 -4
drivers/mtd/nand/nandsim.c
··· 520 520 struct dentry *root = nsmtd->dbg.dfs_dir; 521 521 struct dentry *dent; 522 522 523 - if (!IS_ENABLED(CONFIG_DEBUG_FS)) 523 + /* 524 + * Just skip debugfs initialization when the debugfs directory is 525 + * missing. 526 + */ 527 + if (IS_ERR_OR_NULL(root)) { 528 + if (IS_ENABLED(CONFIG_DEBUG_FS) && 529 + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) 530 + NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); 524 531 return 0; 525 - 526 - if (IS_ERR_OR_NULL(root)) 527 - return -1; 532 + } 528 533 529 534 dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, 530 535 root, dev, &dfs_fops);
+1 -1
drivers/mtd/nand/nuc900_nand.c
··· 154 154 if (page_addr != -1) { 155 155 write_addr_reg(nand, page_addr); 156 156 157 - if (chip->chipsize > (128 << 20)) { 157 + if (chip->options & NAND_ROW_ADDR_3) { 158 158 write_addr_reg(nand, page_addr >> 8); 159 159 write_addr_reg(nand, page_addr >> 16 | ENDADDR); 160 160 } else {
+232 -145
drivers/mtd/nand/omap2.c
··· 1133 1133 0x97, 0x79, 0xe5, 0x24, 0xb5}; 1134 1134 1135 1135 /** 1136 - * omap_calculate_ecc_bch - Generate bytes of ECC bytes 1136 + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector 1137 1137 * @mtd: MTD device structure 1138 1138 * @dat: The pointer to data on which ecc is computed 1139 1139 * @ecc_code: The ecc_code buffer 1140 + * @i: The sector number (for a multi sector page) 1140 1141 * 1141 - * Support calculating of BCH4/8 ecc vectors for the page 1142 + * Support calculating of BCH4/8/16 ECC vectors for one sector 1143 + * within a page. Sector number is in @i. 1142 1144 */ 1143 - static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, 1144 - const u_char *dat, u_char *ecc_calc) 1145 + static int _omap_calculate_ecc_bch(struct mtd_info *mtd, 1146 + const u_char *dat, u_char *ecc_calc, int i) 1145 1147 { 1146 1148 struct omap_nand_info *info = mtd_to_omap(mtd); 1147 1149 int eccbytes = info->nand.ecc.bytes; 1148 1150 struct gpmc_nand_regs *gpmc_regs = &info->reg; 1149 1151 u8 *ecc_code; 1150 - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; 1152 + unsigned long bch_val1, bch_val2, bch_val3, bch_val4; 1151 1153 u32 val; 1152 - int i, j; 1154 + int j; 1155 + 1156 + ecc_code = ecc_calc; 1157 + switch (info->ecc_opt) { 1158 + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1159 + case OMAP_ECC_BCH8_CODE_HW: 1160 + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1161 + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1162 + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); 1163 + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); 1164 + *ecc_code++ = (bch_val4 & 0xFF); 1165 + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); 1166 + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); 1167 + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); 1168 + *ecc_code++ = (bch_val3 & 0xFF); 1169 + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); 1170 + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); 1171 + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); 1172 + *ecc_code++ = (bch_val2 & 0xFF); 1173 + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); 1174 + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); 1175 + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); 1176 + *ecc_code++ = (bch_val1 & 0xFF); 1177 + break; 1178 + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1179 + case OMAP_ECC_BCH4_CODE_HW: 1180 + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1181 + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1182 + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); 1183 + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); 1184 + *ecc_code++ = ((bch_val2 & 0xF) << 4) | 1185 + ((bch_val1 >> 28) & 0xF); 1186 + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); 1187 + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); 1188 + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1189 + *ecc_code++ = ((bch_val1 & 0xF) << 4); 1190 + break; 1191 + case OMAP_ECC_BCH16_CODE_HW: 1192 + val = readl(gpmc_regs->gpmc_bch_result6[i]); 1193 + ecc_code[0] = ((val >> 8) & 0xFF); 1194 + ecc_code[1] = ((val >> 0) & 0xFF); 1195 + val = readl(gpmc_regs->gpmc_bch_result5[i]); 1196 + ecc_code[2] = ((val >> 24) & 0xFF); 1197 + ecc_code[3] = ((val >> 16) & 0xFF); 1198 + ecc_code[4] = ((val >> 8) & 0xFF); 1199 + ecc_code[5] = ((val >> 0) & 0xFF); 1200 + val = readl(gpmc_regs->gpmc_bch_result4[i]); 1201 + ecc_code[6] = ((val >> 24) & 0xFF); 1202 + ecc_code[7] = ((val >> 16) & 0xFF); 1203 + ecc_code[8] = ((val >> 8) & 0xFF); 1204 + ecc_code[9] = ((val >> 0) & 0xFF); 1205 + val = readl(gpmc_regs->gpmc_bch_result3[i]); 1206 + ecc_code[10] = ((val >> 24) & 0xFF); 1207 + ecc_code[11] = ((val >> 16) & 0xFF); 1208 + ecc_code[12] = ((val >> 8) & 0xFF); 1209 + ecc_code[13] = ((val >> 0) & 0xFF); 1210 + val = readl(gpmc_regs->gpmc_bch_result2[i]); 1211 + ecc_code[14] = ((val >> 24) & 0xFF); 1212 + ecc_code[15] = ((val >> 16) & 0xFF); 1213 + ecc_code[16] = ((val >> 8) & 0xFF); 1214 + ecc_code[17] = ((val >> 0) & 0xFF); 1215 + val = readl(gpmc_regs->gpmc_bch_result1[i]); 1216 + ecc_code[18] = ((val >> 24) & 0xFF); 1217 + ecc_code[19] = ((val >> 16) & 0xFF); 1218 + ecc_code[20] = ((val >> 8) & 0xFF); 1219 + ecc_code[21] = ((val >> 0) & 0xFF); 1220 + val = readl(gpmc_regs->gpmc_bch_result0[i]); 1221 + ecc_code[22] = ((val >> 24) & 0xFF); 1222 + ecc_code[23] = ((val >> 16) & 0xFF); 1223 + ecc_code[24] = ((val >> 8) & 0xFF); 1224 + ecc_code[25] = ((val >> 0) & 0xFF); 1225 + break; 1226 + default: 1227 + return -EINVAL; 1228 + } 1229 + 1230 + /* ECC scheme specific syndrome customizations */ 1231 + switch (info->ecc_opt) { 1232 + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1233 + /* Add constant polynomial to remainder, so that 1234 + * ECC of blank pages results in 0x0 on reading back 1235 + */ 1236 + for (j = 0; j < eccbytes; j++) 1237 + ecc_calc[j] ^= bch4_polynomial[j]; 1238 + break; 1239 + case OMAP_ECC_BCH4_CODE_HW: 1240 + /* Set 8th ECC byte as 0x0 for ROM compatibility */ 1241 + ecc_calc[eccbytes - 1] = 0x0; 1242 + break; 1243 + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1244 + /* Add constant polynomial to remainder, so that 1245 + * ECC of blank pages results in 0x0 on reading back 1246 + */ 1247 + for (j = 0; j < eccbytes; j++) 1248 + ecc_calc[j] ^= bch8_polynomial[j]; 1249 + break; 1250 + case OMAP_ECC_BCH8_CODE_HW: 1251 + /* Set 14th ECC byte as 0x0 for ROM compatibility */ 1252 + ecc_calc[eccbytes - 1] = 0x0; 1253 + break; 1254 + case OMAP_ECC_BCH16_CODE_HW: 1255 + break; 1256 + default: 1257 + return -EINVAL; 1258 + } 1259 + 1260 + return 0; 1261 + } 1262 + 1263 + /** 1264 + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction 1265 + * @mtd: MTD device structure 1266 + * @dat: The pointer to data on which ecc is computed 1267 + * @ecc_code: The ecc_code buffer 1268 + * 1269 + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used 1270 + * when SW based correction is required as ECC is required for one sector 1271 + * at a time. 1272 + */ 1273 + static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, 1274 + const u_char *dat, u_char *ecc_calc) 1275 + { 1276 + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); 1277 + } 1278 + 1279 + /** 1280 + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors 1281 + * @mtd: MTD device structure 1282 + * @dat: The pointer to data on which ecc is computed 1283 + * @ecc_code: The ecc_code buffer 1284 + * 1285 + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. 1286 + */ 1287 + static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, 1288 + const u_char *dat, u_char *ecc_calc) 1289 + { 1290 + struct omap_nand_info *info = mtd_to_omap(mtd); 1291 + int eccbytes = info->nand.ecc.bytes; 1292 + unsigned long nsectors; 1293 + int i, ret; 1153 1294 1154 1295 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; 1155 1296 for (i = 0; i < nsectors; i++) { 1156 - ecc_code = ecc_calc; 1157 - switch (info->ecc_opt) { 1158 - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1159 - case OMAP_ECC_BCH8_CODE_HW: 1160 - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1161 - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1162 - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); 1163 - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); 1164 - *ecc_code++ = (bch_val4 & 0xFF); 1165 - *ecc_code++ = ((bch_val3 >> 24) & 0xFF); 1166 - *ecc_code++ = ((bch_val3 >> 16) & 0xFF); 1167 - *ecc_code++ = ((bch_val3 >> 8) & 0xFF); 1168 - *ecc_code++ = (bch_val3 & 0xFF); 1169 - *ecc_code++ = ((bch_val2 >> 24) & 0xFF); 1170 - *ecc_code++ = ((bch_val2 >> 16) & 0xFF); 1171 - *ecc_code++ = ((bch_val2 >> 8) & 0xFF); 1172 - *ecc_code++ = (bch_val2 & 0xFF); 1173 - *ecc_code++ = ((bch_val1 >> 24) & 0xFF); 1174 - *ecc_code++ = ((bch_val1 >> 16) & 0xFF); 1175 - *ecc_code++ = ((bch_val1 >> 8) & 0xFF); 1176 - *ecc_code++ = (bch_val1 & 0xFF); 1177 - break; 1178 - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1179 - case OMAP_ECC_BCH4_CODE_HW: 1180 - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1181 - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1182 - *ecc_code++ = ((bch_val2 >> 12) & 0xFF); 1183 - *ecc_code++ = ((bch_val2 >> 4) & 0xFF); 1184 - *ecc_code++ = ((bch_val2 & 0xF) << 4) | 1185 - ((bch_val1 >> 28) & 0xF); 1186 - *ecc_code++ = ((bch_val1 >> 20) & 0xFF); 1187 - *ecc_code++ = ((bch_val1 >> 12) & 0xFF); 1188 - *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1189 - *ecc_code++ = ((bch_val1 & 0xF) << 4); 1190 - break; 1191 - case OMAP_ECC_BCH16_CODE_HW: 1192 - val = readl(gpmc_regs->gpmc_bch_result6[i]); 1193 - ecc_code[0] = ((val >> 8) & 0xFF); 1194 - ecc_code[1] = ((val >> 0) & 0xFF); 1195 - val = readl(gpmc_regs->gpmc_bch_result5[i]); 1196 - ecc_code[2] = ((val >> 24) & 0xFF); 1197 - ecc_code[3] = ((val >> 16) & 0xFF); 1198 - ecc_code[4] = ((val >> 8) & 0xFF); 1199 - ecc_code[5] = ((val >> 0) & 0xFF); 1200 - val = readl(gpmc_regs->gpmc_bch_result4[i]); 1201 - ecc_code[6] = ((val >> 24) & 0xFF); 1202 - ecc_code[7] = ((val >> 16) & 0xFF); 1203 - ecc_code[8] = ((val >> 8) & 0xFF); 1204 - ecc_code[9] = ((val >> 0) & 0xFF); 1205 - val = readl(gpmc_regs->gpmc_bch_result3[i]); 1206 - ecc_code[10] = ((val >> 24) & 0xFF); 1207 - ecc_code[11] = ((val >> 16) & 0xFF); 1208 - ecc_code[12] = ((val >> 8) & 0xFF); 1209 - ecc_code[13] = ((val >> 0) & 0xFF); 1210 - val = readl(gpmc_regs->gpmc_bch_result2[i]); 1211 - ecc_code[14] = ((val >> 24) & 0xFF); 1212 - ecc_code[15] = ((val >> 16) & 0xFF); 1213 - ecc_code[16] = ((val >> 8) & 0xFF); 1214 - ecc_code[17] = ((val >> 0) & 0xFF); 1215 - val = readl(gpmc_regs->gpmc_bch_result1[i]); 1216 - ecc_code[18] = ((val >> 24) & 0xFF); 1217 - ecc_code[19] = ((val >> 16) & 0xFF); 1218 - ecc_code[20] = ((val >> 8) & 0xFF); 1219 - ecc_code[21] = ((val >> 0) & 0xFF); 1220 - val = readl(gpmc_regs->gpmc_bch_result0[i]); 1221 - ecc_code[22] = ((val >> 24) & 0xFF); 1222 - ecc_code[23] = ((val >> 16) & 0xFF); 1223 - ecc_code[24] = ((val >> 8) & 0xFF); 1224 - ecc_code[25] = ((val >> 0) & 0xFF); 1225 - break; 1226 - default: 1227 - return -EINVAL; 1228 - } 1297 + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); 1298 + if (ret) 1299 + return ret; 1229 1300 1230 - /* ECC scheme specific syndrome customizations */ 1231 - switch (info->ecc_opt) { 1232 - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1233 - /* Add constant polynomial to remainder, so that 1234 - * ECC of blank pages results in 0x0 on reading back */ 1235 - for (j = 0; j < eccbytes; j++) 1236 - ecc_calc[j] ^= bch4_polynomial[j]; 1237 - break; 1238 - case OMAP_ECC_BCH4_CODE_HW: 1239 - /* Set 8th ECC byte as 0x0 for ROM compatibility */ 1240 - ecc_calc[eccbytes - 1] = 0x0; 1241 - break; 1242 - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1243 - /* Add constant polynomial to remainder, so that 1244 - * ECC of blank pages results in 0x0 on reading back */ 1245 - for (j = 0; j < eccbytes; j++) 1246 - ecc_calc[j] ^= bch8_polynomial[j]; 1247 - break; 1248 - case OMAP_ECC_BCH8_CODE_HW: 1249 - /* Set 14th ECC byte as 0x0 for ROM compatibility */ 1250 - ecc_calc[eccbytes - 1] = 0x0; 1251 - break; 1252 - case OMAP_ECC_BCH16_CODE_HW: 1253 - break; 1254 - default: 1255 - return -EINVAL; 1256 - } 1257 - 1258 - ecc_calc += eccbytes; 1301 + ecc_calc += eccbytes; 1259 1302 } 1260 1303 1261 1304 return 0; ··· 1539 1496 chip->write_buf(mtd, buf, mtd->writesize); 1540 1497 1541 1498 /* Update ecc vector from GPMC result registers */ 1542 - chip->ecc.calculate(mtd, buf, &ecc_calc[0]); 1499 + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); 1543 1500 1544 1501 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 1545 1502 chip->ecc.total); ··· 1548 1505 1549 1506 /* Write ecc vector to OOB area */ 1550 1507 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1508 + return 0; 1509 + } 1510 + 1511 + /** 1512 + * omap_write_subpage_bch - BCH hardware ECC based subpage write 1513 + * @mtd: mtd info structure 1514 + * @chip: nand chip info structure 1515 + * @offset: column address of subpage within the page 1516 + * @data_len: data length 1517 + * @buf: data buffer 1518 + * @oob_required: must write chip->oob_poi to OOB 1519 + * @page: page number to write 1520 + * 1521 + * OMAP optimized subpage write method. 1522 + */ 1523 + static int omap_write_subpage_bch(struct mtd_info *mtd, 1524 + struct nand_chip *chip, u32 offset, 1525 + u32 data_len, const u8 *buf, 1526 + int oob_required, int page) 1527 + { 1528 + u8 *ecc_calc = chip->buffers->ecccalc; 1529 + int ecc_size = chip->ecc.size; 1530 + int ecc_bytes = chip->ecc.bytes; 1531 + int ecc_steps = chip->ecc.steps; 1532 + u32 start_step = offset / ecc_size; 1533 + u32 end_step = (offset + data_len - 1) / ecc_size; 1534 + int step, ret = 0; 1535 + 1536 + /* 1537 + * Write entire page at one go as it would be optimal 1538 + * as ECC is calculated by hardware. 1539 + * ECC is calculated for all subpages but we choose 1540 + * only what we want. 1541 + */ 1542 + 1543 + /* Enable GPMC ECC engine */ 1544 + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 1545 + 1546 + /* Write data */ 1547 + chip->write_buf(mtd, buf, mtd->writesize); 1548 + 1549 + for (step = 0; step < ecc_steps; step++) { 1550 + /* mask ECC of un-touched subpages by padding 0xFF */ 1551 + if (step < start_step || step > end_step) 1552 + memset(ecc_calc, 0xff, ecc_bytes); 1553 + else 1554 + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); 1555 + 1556 + if (ret) 1557 + return ret; 1558 + 1559 + buf += ecc_size; 1560 + ecc_calc += ecc_bytes; 1561 + } 1562 + 1563 + /* copy calculated ECC for whole page to chip->buffer->oob */ 1564 + /* this include masked-value(0xFF) for unwritten subpages */ 1565 + ecc_calc = chip->buffers->ecccalc; 1566 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 1567 + chip->ecc.total); 1568 + if (ret) 1569 + return ret; 1570 + 1571 + /* write OOB buffer to NAND device */ 1572 + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1573 + 1551 1574 return 0; 1552 1575 } 1553 1576 ··· 1653 1544 chip->ecc.total); 1654 1545 1655 1546 /* Calculate ecc bytes */ 1656 - chip->ecc.calculate(mtd, buf, ecc_calc); 1547 + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); 1657 1548 1658 1549 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 1659 1550 chip->ecc.total); ··· 1697 1588 return true; 1698 1589 } 1699 1590 1700 - static bool omap2_nand_ecc_check(struct omap_nand_info *info, 1701 - struct omap_nand_platform_data *pdata) 1591 + static bool omap2_nand_ecc_check(struct omap_nand_info *info) 1702 1592 { 1703 1593 bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; 1704 1594 ··· 1912 1804 static int omap_nand_probe(struct platform_device *pdev) 1913 1805 { 1914 1806 struct omap_nand_info *info; 1915 - struct omap_nand_platform_data *pdata = NULL; 1916 1807 struct mtd_info *mtd; 1917 1808 struct nand_chip *nand_chip; 1918 1809 int err; ··· 1928 1821 1929 1822 info->pdev = pdev; 1930 1823 1931 - if (dev->of_node) { 1932 - if (omap_get_dt_info(dev, info)) 1933 - return -EINVAL; 1934 - } else { 1935 - pdata = dev_get_platdata(&pdev->dev); 1936 - if (!pdata) { 1937 - dev_err(&pdev->dev, "platform data missing\n"); 1938 - return -EINVAL; 1939 - } 1824 + err = omap_get_dt_info(dev, info); 1825 + if (err) 1826 + return err; 1940 1827 1941 - info->gpmc_cs = pdata->cs; 1942 - info->reg = pdata->reg; 1943 - info->ecc_opt = pdata->ecc_opt; 1944 - if (pdata->dev_ready) 1945 - dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n"); 1946 - 1947 - info->xfer_type = pdata->xfer_type; 1948 - info->devsize = pdata->devsize; 1949 - info->elm_of_node = pdata->elm_of_node; 1950 - info->flash_bbt = pdata->flash_bbt; 1951 - } 1952 - 1953 - platform_set_drvdata(pdev, info); 1954 1828 info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); 1955 1829 if (!info->ops) { 1956 1830 dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); ··· 2090 2002 goto return_error; 2091 2003 } 2092 2004 2093 - if (!omap2_nand_ecc_check(info, pdata)) { 2005 + if (!omap2_nand_ecc_check(info)) { 2094 2006 err = -EINVAL; 2095 2007 goto return_error; 2096 2008 } ··· 2132 2044 nand_chip->ecc.strength = 4; 2133 2045 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2134 2046 nand_chip->ecc.correct = nand_bch_correct_data; 2135 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2047 + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; 2136 2048 mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); 2137 2049 /* Reserve one byte for the OMAP marker */ 2138 2050 oobbytes_per_step = nand_chip->ecc.bytes + 1; ··· 2154 2066 nand_chip->ecc.strength = 4; 2155 2067 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2156 2068 nand_chip->ecc.correct = omap_elm_correct_data; 2157 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2158 2069 nand_chip->ecc.read_page = omap_read_page_bch; 2159 2070 nand_chip->ecc.write_page = omap_write_page_bch; 2071 + nand_chip->ecc.write_subpage = omap_write_subpage_bch; 2160 2072 mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 2161 2073 oobbytes_per_step = nand_chip->ecc.bytes; 2162 2074 ··· 2175 2087 nand_chip->ecc.strength = 8; 2176 2088 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2177 2089 nand_chip->ecc.correct = nand_bch_correct_data; 2178 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2090 + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; 2179 2091 mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); 2180 2092 /* Reserve one byte for the OMAP marker */ 2181 2093 oobbytes_per_step = nand_chip->ecc.bytes + 1; ··· 2197 2109 nand_chip->ecc.strength = 8; 2198 2110 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2199 2111 nand_chip->ecc.correct = omap_elm_correct_data; 2200 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2201 2112 nand_chip->ecc.read_page = omap_read_page_bch; 2202 2113 nand_chip->ecc.write_page = omap_write_page_bch; 2114 + nand_chip->ecc.write_subpage = omap_write_subpage_bch; 2203 2115 mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 2204 2116 oobbytes_per_step = nand_chip->ecc.bytes; 2205 2117 ··· 2219 2131 nand_chip->ecc.strength = 16; 2220 2132 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2221 2133 nand_chip->ecc.correct = omap_elm_correct_data; 2222 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2223 2134 nand_chip->ecc.read_page = omap_read_page_bch; 2224 2135 nand_chip->ecc.write_page = omap_write_page_bch; 2136 + nand_chip->ecc.write_subpage = omap_write_subpage_bch; 2225 2137 mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 2226 2138 oobbytes_per_step = nand_chip->ecc.bytes; 2227 2139 ··· 2255 2167 if (err) 2256 2168 goto return_error; 2257 2169 2258 - if (dev->of_node) 2259 - mtd_device_register(mtd, NULL, 0); 2260 - else 2261 - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 2170 + err = mtd_device_register(mtd, NULL, 0); 2171 + if (err) 2172 + goto return_error; 2262 2173 2263 2174 platform_set_drvdata(pdev, mtd); 2264 2175
+37 -4
drivers/mtd/nand/pxa3xx_nand.c
··· 30 30 #include <linux/of.h> 31 31 #include <linux/of_device.h> 32 32 #include <linux/platform_data/mtd-nand-pxa3xx.h> 33 + #include <linux/mfd/syscon.h> 34 + #include <linux/regmap.h> 33 35 34 36 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) 35 37 #define NAND_STOP_DELAY msecs_to_jiffies(40) ··· 46 44 * Hence this buffer should be at least 512 x 3. Let's pick 2048. 47 45 */ 48 46 #define INIT_BUFFER_SIZE 2048 47 + 48 + /* System control register and bit to enable NAND on some SoCs */ 49 + #define GENCONF_SOC_DEVICE_MUX 0x208 50 + #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) 49 51 50 52 /* registers and bit definitions */ 51 53 #define NDCR (0x00) /* Control register */ ··· 180 174 enum pxa3xx_nand_variant { 181 175 PXA3XX_NAND_VARIANT_PXA, 182 176 PXA3XX_NAND_VARIANT_ARMADA370, 177 + PXA3XX_NAND_VARIANT_ARMADA_8K, 183 178 }; 184 179 185 180 struct pxa3xx_nand_host { ··· 431 424 { 432 425 .compatible = "marvell,armada370-nand", 433 426 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, 427 + }, 428 + { 429 + .compatible = "marvell,armada-8k-nand", 430 + .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K, 434 431 }, 435 432 {} 436 433 }; ··· 836 825 info->retcode = ERR_UNCORERR; 837 826 if (status & NDSR_CORERR) { 838 827 info->retcode = ERR_CORERR; 839 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && 828 + if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 829 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) && 840 830 info->ecc_bch) 841 831 info->ecc_err_cnt = NDSR_ERR_CNT(status); 842 832 else ··· 900 888 nand_writel(info, NDCB0, info->ndcb2); 901 889 902 890 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ 903 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) 891 + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 892 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) 904 893 nand_writel(info, NDCB0, info->ndcb3); 905 894 } 906 895 ··· 1684 1671 chip->options |= NAND_BUSWIDTH_16; 1685 1672 1686 1673 /* Device detection must be done with ECC disabled */ 1687 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) 1674 + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 1675 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) 1688 1676 nand_writel(info, NDECCCTRL, 0x0); 1689 1677 1690 1678 if (pdata->flash_bbt) ··· 1723 1709 * (aka splitted) command handling, 1724 1710 */ 1725 1711 if (mtd->writesize > PAGE_CHUNK_SIZE) { 1726 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { 1712 + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 1713 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) { 1727 1714 chip->cmdfunc = nand_cmdfunc_extended; 1728 1715 } else { 1729 1716 dev_err(&info->pdev->dev, ··· 1942 1927 1943 1928 if (!of_id) 1944 1929 return 0; 1930 + 1931 + /* 1932 + * Some SoCs like A7k/A8k need to enable manually the NAND 1933 + * controller to avoid being bootloader dependent. This is done 1934 + * through the use of a single bit in the System Functions registers. 1935 + */ 1936 + if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) { 1937 + struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle( 1938 + pdev->dev.of_node, "marvell,system-controller"); 1939 + u32 reg; 1940 + 1941 + if (IS_ERR(sysctrl_base)) 1942 + return PTR_ERR(sysctrl_base); 1943 + 1944 + regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg); 1945 + reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN; 1946 + regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg); 1947 + } 1945 1948 1946 1949 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1947 1950 if (!pdata)
+110 -17
drivers/mtd/nand/qcom_nandc.c
··· 22 22 #include <linux/of.h> 23 23 #include <linux/of_device.h> 24 24 #include <linux/delay.h> 25 + #include <linux/dma/qcom_bam_dma.h> 25 26 26 27 /* NANDc reg offsets */ 27 28 #define NAND_FLASH_CMD 0x00 ··· 200 199 */ 201 200 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) 202 201 202 + /* Returns the NAND register physical address */ 203 + #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) 204 + 205 + /* Returns the dma address for reg read buffer */ 206 + #define reg_buf_dma_addr(chip, vaddr) \ 207 + ((chip)->reg_read_dma + \ 208 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) 209 + 210 + #define QPIC_PER_CW_CMD_ELEMENTS 32 203 211 #define QPIC_PER_CW_CMD_SGL 32 204 212 #define QPIC_PER_CW_DATA_SGL 8 205 213 ··· 231 221 /* 232 222 * This data type corresponds to the BAM transaction which will be used for all 233 223 * NAND transfers. 224 + * @bam_ce - the array of BAM command elements 234 225 * @cmd_sgl - sgl for NAND BAM command pipe 235 226 * @data_sgl - sgl for NAND BAM consumer/producer pipe 227 + * @bam_ce_pos - the index in bam_ce which is available for next sgl 228 + * @bam_ce_start - the index in bam_ce which marks the start position ce 229 + * for current sgl. It will be used for size calculation 230 + * for current sgl 236 231 * @cmd_sgl_pos - current index in command sgl. 237 232 * @cmd_sgl_start - start index in command sgl. 238 233 * @tx_sgl_pos - current index in data sgl for tx. ··· 246 231 * @rx_sgl_start - start index in data sgl for rx. 247 232 */ 248 233 struct bam_transaction { 234 + struct bam_cmd_element *bam_ce; 249 235 struct scatterlist *cmd_sgl; 250 236 struct scatterlist *data_sgl; 237 + u32 bam_ce_pos; 238 + u32 bam_ce_start; 251 239 u32 cmd_sgl_pos; 252 240 u32 cmd_sgl_start; 253 241 u32 tx_sgl_pos; ··· 325 307 * controller 326 308 * @dev: parent device 327 309 * @base: MMIO base 328 - * @base_dma: physical base address of controller registers 310 + * @base_phys: physical base address of controller registers 311 + * @base_dma: dma base address of controller registers 329 312 * @core_clk: controller clock 330 313 * @aon_clk: another controller clock 331 314 * ··· 359 340 struct device *dev; 360 341 361 342 void __iomem *base; 343 + phys_addr_t base_phys; 362 344 dma_addr_t base_dma; 363 345 364 346 struct clk *core_clk; ··· 482 462 483 463 bam_txn_size = 484 464 sizeof(*bam_txn) + num_cw * 485 - ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + 465 + ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + 466 + (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + 486 467 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); 487 468 488 469 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); ··· 492 471 493 472 bam_txn = bam_txn_buf; 494 473 bam_txn_buf += sizeof(*bam_txn); 474 + 475 + bam_txn->bam_ce = bam_txn_buf; 476 + bam_txn_buf += 477 + sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; 495 478 496 479 bam_txn->cmd_sgl = bam_txn_buf; 497 480 bam_txn_buf += ··· 514 489 if (!nandc->props->is_bam) 515 490 return; 516 491 492 + bam_txn->bam_ce_pos = 0; 493 + bam_txn->bam_ce_start = 0; 517 494 bam_txn->cmd_sgl_pos = 0; 518 495 bam_txn->cmd_sgl_start = 0; 519 496 bam_txn->tx_sgl_pos = 0; ··· 761 734 } 762 735 763 736 /* 737 + * Prepares the command descriptor for BAM DMA which will be used for NAND 738 + * register reads and writes. The command descriptor requires the command 739 + * to be formed in command element type so this function uses the command 740 + * element from bam transaction ce array and fills the same with required 741 + * data. A single SGL can contain multiple command elements so 742 + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL 743 + * after the current command element. 744 + */ 745 + static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, 746 + int reg_off, const void *vaddr, 747 + int size, unsigned int flags) 748 + { 749 + int bam_ce_size; 750 + int i, ret; 751 + struct bam_cmd_element *bam_ce_buffer; 752 + struct bam_transaction *bam_txn = nandc->bam_txn; 753 + 754 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; 755 + 756 + /* fill the command desc */ 757 + for (i = 0; i < size; i++) { 758 + if (read) 759 + bam_prep_ce(&bam_ce_buffer[i], 760 + nandc_reg_phys(nandc, reg_off + 4 * i), 761 + BAM_READ_COMMAND, 762 + reg_buf_dma_addr(nandc, 763 + (__le32 *)vaddr + i)); 764 + else 765 + bam_prep_ce_le32(&bam_ce_buffer[i], 766 + nandc_reg_phys(nandc, reg_off + 4 * i), 767 + BAM_WRITE_COMMAND, 768 + *((__le32 *)vaddr + i)); 769 + } 770 + 771 + bam_txn->bam_ce_pos += size; 772 + 773 + /* use the separate sgl after this command */ 774 + if (flags & NAND_BAM_NEXT_SGL) { 775 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; 776 + bam_ce_size = (bam_txn->bam_ce_pos - 777 + bam_txn->bam_ce_start) * 778 + sizeof(struct bam_cmd_element); 779 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], 780 + bam_ce_buffer, bam_ce_size); 781 + bam_txn->cmd_sgl_pos++; 782 + bam_txn->bam_ce_start = bam_txn->bam_ce_pos; 783 + 784 + if (flags & NAND_BAM_NWD) { 785 + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, 786 + DMA_PREP_FENCE | 787 + DMA_PREP_CMD); 788 + if (ret) 789 + return ret; 790 + } 791 + } 792 + 793 + return 0; 794 + } 795 + 796 + /* 764 797 * Prepares the data descriptor for BAM DMA which will be used for NAND 765 798 * data reads and writes. 766 799 */ ··· 938 851 { 939 852 bool flow_control = false; 940 853 void *vaddr; 941 - int size; 942 854 943 - if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) 944 - flow_control = true; 855 + vaddr = nandc->reg_read_buf + nandc->reg_read_pos; 856 + nandc->reg_read_pos += num_regs; 945 857 946 858 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) 947 859 first = dev_cmd_reg_addr(nandc, first); 948 860 949 - size = num_regs * sizeof(u32); 950 - vaddr = nandc->reg_read_buf + nandc->reg_read_pos; 951 - nandc->reg_read_pos += num_regs; 861 + if (nandc->props->is_bam) 862 + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, 863 + num_regs, flags); 952 864 953 - return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control); 865 + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) 866 + flow_control = true; 867 + 868 + return prep_adm_dma_desc(nandc, true, first, vaddr, 869 + num_regs * sizeof(u32), flow_control); 954 870 } 955 871 956 872 /* ··· 970 880 bool flow_control = false; 971 881 struct nandc_regs *regs = nandc->regs; 972 882 void *vaddr; 973 - int size; 974 883 975 884 vaddr = offset_to_nandc_reg(regs, first); 976 - 977 - if (first == NAND_FLASH_CMD) 978 - flow_control = true; 979 885 980 886 if (first == NAND_ERASED_CW_DETECT_CFG) { 981 887 if (flags & NAND_ERASED_CW_SET) ··· 989 903 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) 990 904 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); 991 905 992 - size = num_regs * sizeof(u32); 906 + if (nandc->props->is_bam) 907 + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, 908 + num_regs, flags); 993 909 994 - return prep_adm_dma_desc(nandc, false, first, vaddr, size, 995 - flow_control); 910 + if (first == NAND_FLASH_CMD) 911 + flow_control = true; 912 + 913 + return prep_adm_dma_desc(nandc, false, first, vaddr, 914 + num_regs * sizeof(u32), flow_control); 996 915 } 997 916 998 917 /* ··· 1261 1170 } 1262 1171 1263 1172 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { 1264 - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0); 1173 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 1174 + DMA_PREP_CMD); 1265 1175 if (r) 1266 1176 return r; 1267 1177 } ··· 2797 2705 if (IS_ERR(nandc->base)) 2798 2706 return PTR_ERR(nandc->base); 2799 2707 2708 + nandc->base_phys = res->start; 2800 2709 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); 2801 2710 2802 2711 nandc->core_clk = devm_clk_get(dev, "core");
+3 -6
drivers/mtd/nand/sh_flctl.c
··· 1094 1094 1095 1095 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 1096 1096 { 1097 - const struct of_device_id *match; 1098 - struct flctl_soc_config *config; 1097 + const struct flctl_soc_config *config; 1099 1098 struct sh_flctl_platform_data *pdata; 1100 1099 1101 - match = of_match_device(of_flctl_match, dev); 1102 - if (match) 1103 - config = (struct flctl_soc_config *)match->data; 1104 - else { 1100 + config = of_device_get_match_data(dev); 1101 + if (!config) { 1105 1102 dev_err(dev, "%s: no OF configuration attached\n", __func__); 1106 1103 return NULL; 1107 1104 }
+8
drivers/mtd/parsers/Kconfig
··· 6 6 may contain up to 3/4 partitions (depending on the version). 7 7 This driver will parse TRX header and report at least two partitions: 8 8 kernel and rootfs. 9 + 10 + config MTD_SHARPSL_PARTS 11 + tristate "Sharp SL Series NAND flash partition parser" 12 + depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST 13 + help 14 + This provides the read-only FTL logic necessary to read the partition 15 + table from the NAND flash of Sharp SL Series (Zaurus) and the MTD 16 + partition parser using this code.
+1
drivers/mtd/parsers/Makefile
··· 1 1 obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o 2 + obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
+398
drivers/mtd/parsers/sharpslpart.c
··· 1 + /* 2 + * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL 3 + * for logical addressing, as used on the PXA models of the SHARP SL Series. 4 + * 5 + * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com> 6 + * 7 + * Based on SHARP GPL 2.4 sources: 8 + * http://support.ezaurus.com/developer/source/source_dl.asp 9 + * drivers/mtd/nand/sharp_sl_logical.c 10 + * linux/include/asm-arm/sharp_nand_logical.h 11 + * 12 + * Copyright (C) 2002 SHARP 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License as published by 16 + * the Free Software Foundation; either version 2 of the License, or 17 + * (at your option) any later version. 18 + * 19 + * This program is distributed in the hope that it will be useful, 20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 + * GNU General Public License for more details. 23 + * 24 + */ 25 + 26 + #include <linux/kernel.h> 27 + #include <linux/slab.h> 28 + #include <linux/module.h> 29 + #include <linux/types.h> 30 + #include <linux/bitops.h> 31 + #include <linux/sizes.h> 32 + #include <linux/mtd/mtd.h> 33 + #include <linux/mtd/partitions.h> 34 + 35 + /* oob structure */ 36 + #define NAND_NOOB_LOGADDR_00 8 37 + #define NAND_NOOB_LOGADDR_01 9 38 + #define NAND_NOOB_LOGADDR_10 10 39 + #define NAND_NOOB_LOGADDR_11 11 40 + #define NAND_NOOB_LOGADDR_20 12 41 + #define NAND_NOOB_LOGADDR_21 13 42 + 43 + #define BLOCK_IS_RESERVED 0xffff 44 + #define BLOCK_UNMASK_COMPLEMENT 1 45 + 46 + /* factory defaults */ 47 + #define SHARPSL_NAND_PARTS 3 48 + #define SHARPSL_FTL_PART_SIZE (7 * SZ_1M) 49 + #define SHARPSL_PARTINFO1_LADDR 0x00060000 50 + #define SHARPSL_PARTINFO2_LADDR 0x00064000 51 + 52 + #define BOOT_MAGIC 0x424f4f54 53 + #define FSRO_MAGIC 0x4653524f 54 + #define FSRW_MAGIC 0x46535257 55 + 56 + /** 57 + * struct sharpsl_ftl - Sharp FTL Logical Table 58 + * @logmax: number of logical blocks 59 + * @log2phy: the logical-to-physical table 60 + * 61 + * Structure containing the logical-to-physical translation table 62 + * used by the SHARP SL FTL. 63 + */ 64 + struct sharpsl_ftl { 65 + unsigned int logmax; 66 + unsigned int *log2phy; 67 + }; 68 + 69 + /* verify that the OOB bytes 8 to 15 are free and available for the FTL */ 70 + static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd) 71 + { 72 + u8 freebytes = 0; 73 + int section = 0; 74 + 75 + while (true) { 76 + struct mtd_oob_region oobfree = { }; 77 + int ret, i; 78 + 79 + ret = mtd_ooblayout_free(mtd, section++, &oobfree); 80 + if (ret) 81 + break; 82 + 83 + if (!oobfree.length || oobfree.offset > 15 || 84 + (oobfree.offset + oobfree.length) < 8) 85 + continue; 86 + 87 + i = oobfree.offset >= 8 ? oobfree.offset : 8; 88 + for (; i < oobfree.offset + oobfree.length && i < 16; i++) 89 + freebytes |= BIT(i - 8); 90 + 91 + if (freebytes == 0xff) 92 + return 0; 93 + } 94 + 95 + return -ENOTSUPP; 96 + } 97 + 98 + static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf) 99 + { 100 + struct mtd_oob_ops ops = { }; 101 + int ret; 102 + 103 + ops.mode = MTD_OPS_PLACE_OOB; 104 + ops.ooblen = mtd->oobsize; 105 + ops.oobbuf = buf; 106 + 107 + ret = mtd_read_oob(mtd, offs, &ops); 108 + if (ret != 0 || mtd->oobsize != ops.oobretlen) 109 + return -1; 110 + 111 + return 0; 112 + } 113 + 114 + /* 115 + * The logical block number assigned to a physical block is stored in the OOB 116 + * of the first page, in 3 16-bit copies with the following layout: 117 + * 118 + * 01234567 89abcdef 119 + * -------- -------- 120 + * ECC BB xyxyxy 121 + * 122 + * When reading we check that the first two copies agree. 123 + * In case of error, matching is tried using the following pairs. 124 + * Reserved values 0xffff mean the block is kept for wear leveling. 125 + * 126 + * 01234567 89abcdef 127 + * -------- -------- 128 + * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9 129 + * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11 130 + * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13 131 + */ 132 + static int sharpsl_nand_get_logical_num(u8 *oob) 133 + { 134 + u16 us; 135 + int good0, good1; 136 + 137 + if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] && 138 + oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) { 139 + good0 = NAND_NOOB_LOGADDR_00; 140 + good1 = NAND_NOOB_LOGADDR_01; 141 + } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] && 142 + oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) { 143 + good0 = NAND_NOOB_LOGADDR_10; 144 + good1 = NAND_NOOB_LOGADDR_11; 145 + } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] && 146 + oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) { 147 + good0 = NAND_NOOB_LOGADDR_20; 148 + good1 = NAND_NOOB_LOGADDR_21; 149 + } else { 150 + return -EINVAL; 151 + } 152 + 153 + us = oob[good0] | oob[good1] << 8; 154 + 155 + /* parity check */ 156 + if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT) 157 + return -EINVAL; 158 + 159 + /* reserved */ 160 + if (us == BLOCK_IS_RESERVED) 161 + return BLOCK_IS_RESERVED; 162 + 163 + return (us >> 1) & GENMASK(9, 0); 164 + } 165 + 166 + static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl) 167 + { 168 + unsigned int block_num, log_num, phymax; 169 + loff_t block_adr; 170 + u8 *oob; 171 + int i, ret; 172 + 173 + oob = kzalloc(mtd->oobsize, GFP_KERNEL); 174 + if (!oob) 175 + return -ENOMEM; 176 + 177 + phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd); 178 + 179 + /* FTL reserves 5% of the blocks + 1 spare */ 180 + ftl->logmax = ((phymax * 95) / 100) - 1; 181 + 182 + ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy), 183 + GFP_KERNEL); 184 + if (!ftl->log2phy) { 185 + ret = -ENOMEM; 186 + goto exit; 187 + } 188 + 189 + /* initialize ftl->log2phy */ 190 + for (i = 0; i < ftl->logmax; i++) 191 + ftl->log2phy[i] = UINT_MAX; 192 + 193 + /* create physical-logical table */ 194 + for (block_num = 0; block_num < phymax; block_num++) { 195 + block_adr = block_num * mtd->erasesize; 196 + 197 + if (mtd_block_isbad(mtd, block_adr)) 198 + continue; 199 + 200 + if (sharpsl_nand_read_oob(mtd, block_adr, oob)) 201 + continue; 202 + 203 + /* get logical block */ 204 + log_num = sharpsl_nand_get_logical_num(oob); 205 + 206 + /* cut-off errors and skip the out-of-range values */ 207 + if (log_num > 0 && log_num < ftl->logmax) { 208 + if (ftl->log2phy[log_num] == UINT_MAX) 209 + ftl->log2phy[log_num] = block_num; 210 + } 211 + } 212 + 213 + pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n", 214 + phymax, ftl->logmax, phymax - ftl->logmax); 215 + 216 + ret = 0; 217 + exit: 218 + kfree(oob); 219 + return ret; 220 + } 221 + 222 + void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl) 223 + { 224 + kfree(ftl->log2phy); 225 + } 226 + 227 + static int sharpsl_nand_read_laddr(struct mtd_info *mtd, 228 + loff_t from, 229 + size_t len, 230 + void *buf, 231 + struct sharpsl_ftl *ftl) 232 + { 233 + unsigned int log_num, final_log_num; 234 + unsigned int block_num; 235 + loff_t block_adr; 236 + loff_t block_ofs; 237 + size_t retlen; 238 + int err; 239 + 240 + log_num = mtd_div_by_eb((u32)from, mtd); 241 + final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd); 242 + 243 + if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num) 244 + return -EINVAL; 245 + 246 + block_num = ftl->log2phy[log_num]; 247 + block_adr = block_num * mtd->erasesize; 248 + block_ofs = mtd_mod_by_eb((u32)from, mtd); 249 + 250 + err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf); 251 + /* Ignore corrected ECC errors */ 252 + if (mtd_is_bitflip(err)) 253 + err = 0; 254 + 255 + if (!err && retlen != len) 256 + err = -EIO; 257 + 258 + if (err) 259 + pr_err("sharpslpart: error, read failed at %#llx\n", 260 + block_adr + block_ofs); 261 + 262 + return err; 263 + } 264 + 265 + /* 266 + * MTD Partition Parser 267 + * 268 + * Sample values read from SL-C860 269 + * 270 + * # cat /proc/mtd 271 + * dev: size erasesize name 272 + * mtd0: 006d0000 00020000 "Filesystem" 273 + * mtd1: 00700000 00004000 "smf" 274 + * mtd2: 03500000 00004000 "root" 275 + * mtd3: 04400000 00004000 "home" 276 + * 277 + * PARTITIONINFO1 278 + * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT.... 279 + * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO.... 280 + * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW.... 281 + */ 282 + struct sharpsl_nand_partinfo { 283 + __le32 start; 284 + __le32 end; 285 + __be32 magic; 286 + u32 reserved; 287 + }; 288 + 289 + static int sharpsl_nand_read_partinfo(struct mtd_info *master, 290 + loff_t from, 291 + size_t len, 292 + struct sharpsl_nand_partinfo *buf, 293 + struct sharpsl_ftl *ftl) 294 + { 295 + int ret; 296 + 297 + ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl); 298 + if (ret) 299 + return ret; 300 + 301 + /* check for magics */ 302 + if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC || 303 + be32_to_cpu(buf[1].magic) != FSRO_MAGIC || 304 + be32_to_cpu(buf[2].magic) != FSRW_MAGIC) { 305 + pr_err("sharpslpart: magic values mismatch\n"); 306 + return -EINVAL; 307 + } 308 + 309 + /* fixup for hardcoded value 64 MiB (for older models) */ 310 + buf[2].end = cpu_to_le32(master->size); 311 + 312 + /* extra sanity check */ 313 + if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) || 314 + le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) || 315 + le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) || 316 + le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) || 317 + le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) { 318 + pr_err("sharpslpart: partition sizes mismatch\n"); 319 + return -EINVAL; 320 + } 321 + 322 + return 0; 323 + } 324 + 325 + static int sharpsl_parse_mtd_partitions(struct mtd_info *master, 326 + const struct mtd_partition **pparts, 327 + struct mtd_part_parser_data *data) 328 + { 329 + struct sharpsl_ftl ftl; 330 + struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS]; 331 + struct mtd_partition *sharpsl_nand_parts; 332 + int err; 333 + 334 + /* check that OOB bytes 8 to 15 used by the FTL are actually free */ 335 + err = sharpsl_nand_check_ooblayout(master); 336 + if (err) 337 + return err; 338 + 339 + /* init logical mgmt (FTL) */ 340 + err = sharpsl_nand_init_ftl(master, &ftl); 341 + if (err) 342 + return err; 343 + 344 + /* read and validate first partition table */ 345 + pr_info("sharpslpart: try reading first partition table\n"); 346 + err = sharpsl_nand_read_partinfo(master, 347 + SHARPSL_PARTINFO1_LADDR, 348 + sizeof(buf), buf, &ftl); 349 + if (err) { 350 + /* fallback: read second partition table */ 351 + pr_warn("sharpslpart: first partition table is invalid, retry using the second\n"); 352 + err = sharpsl_nand_read_partinfo(master, 353 + SHARPSL_PARTINFO2_LADDR, 354 + sizeof(buf), buf, &ftl); 355 + } 356 + 357 + /* cleanup logical mgmt (FTL) */ 358 + sharpsl_nand_cleanup_ftl(&ftl); 359 + 360 + if (err) { 361 + pr_err("sharpslpart: both partition tables are invalid\n"); 362 + return err; 363 + } 364 + 365 + sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) * 366 + SHARPSL_NAND_PARTS, GFP_KERNEL); 367 + if (!sharpsl_nand_parts) 368 + return -ENOMEM; 369 + 370 + /* original names */ 371 + sharpsl_nand_parts[0].name = "smf"; 372 + sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start); 373 + sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) - 374 + le32_to_cpu(buf[0].start); 375 + 376 + sharpsl_nand_parts[1].name = "root"; 377 + sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start); 378 + sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) - 379 + le32_to_cpu(buf[1].start); 380 + 381 + sharpsl_nand_parts[2].name = "home"; 382 + sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start); 383 + sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) - 384 + le32_to_cpu(buf[2].start); 385 + 386 + *pparts = sharpsl_nand_parts; 387 + return SHARPSL_NAND_PARTS; 388 + } 389 + 390 + static struct mtd_part_parser sharpsl_mtd_parser = { 391 + .parse_fn = sharpsl_parse_mtd_partitions, 392 + .name = "sharpslpart", 393 + }; 394 + module_mtd_part_parser(sharpsl_mtd_parser); 395 + 396 + MODULE_LICENSE("GPL"); 397 + MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>"); 398 + MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
+3 -3
drivers/mtd/spi-nor/Kconfig
··· 50 50 51 51 config SPI_CADENCE_QUADSPI 52 52 tristate "Cadence Quad SPI controller" 53 - depends on OF && (ARM || COMPILE_TEST) 53 + depends on OF && (ARM || ARM64 || COMPILE_TEST) 54 54 help 55 55 Enable support for the Cadence Quad SPI Flash controller. 56 56 ··· 90 90 tristate 91 91 92 92 config SPI_INTEL_SPI_PCI 93 - tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT 93 + tristate "Intel PCH/PCU SPI flash PCI driver" 94 94 depends on X86 && PCI 95 95 select SPI_INTEL_SPI 96 96 help ··· 106 106 will be called intel-spi-pci. 107 107 108 108 config SPI_INTEL_SPI_PLATFORM 109 - tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT 109 + tristate "Intel PCH/PCU SPI flash platform driver" 110 110 depends on X86 111 111 select SPI_INTEL_SPI 112 112 help
+51 -8
drivers/mtd/spi-nor/cadence-quadspi.c
··· 31 31 #include <linux/of_device.h> 32 32 #include <linux/of.h> 33 33 #include <linux/platform_device.h> 34 + #include <linux/pm_runtime.h> 34 35 #include <linux/sched.h> 35 36 #include <linux/spi/spi.h> 36 37 #include <linux/timer.h> 37 38 38 39 #define CQSPI_NAME "cadence-qspi" 39 40 #define CQSPI_MAX_CHIPSELECT 16 41 + 42 + /* Quirks */ 43 + #define CQSPI_NEEDS_WR_DELAY BIT(0) 40 44 41 45 struct cqspi_st; 42 46 ··· 79 75 bool is_decoded_cs; 80 76 u32 fifo_depth; 81 77 u32 fifo_width; 78 + bool rclk_en; 82 79 u32 trigger_address; 80 + u32 wr_delay; 83 81 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 84 82 }; 85 83 ··· 614 608 reinit_completion(&cqspi->transfer_complete); 615 609 writel(CQSPI_REG_INDIRECTWR_START_MASK, 616 610 reg_base + CQSPI_REG_INDIRECTWR); 611 + /* 612 + * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access 613 + * Controller programming sequence, couple of cycles of 614 + * QSPI_REF_CLK delay is required for the above bit to 615 + * be internally synchronized by the QSPI module. Provide 5 616 + * cycles of delay. 617 + */ 618 + if (cqspi->wr_delay) 619 + ndelay(cqspi->wr_delay); 617 620 618 621 while (remaining > 0) { 619 622 write_bytes = remaining > page_size ? page_size : remaining; ··· 790 775 } 791 776 792 777 static void cqspi_readdata_capture(struct cqspi_st *cqspi, 793 - const unsigned int bypass, 778 + const bool bypass, 794 779 const unsigned int delay) 795 780 { 796 781 void __iomem *reg_base = cqspi->iobase; ··· 854 839 cqspi->sclk = sclk; 855 840 cqspi_config_baudrate_div(cqspi); 856 841 cqspi_delay(nor); 857 - cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay); 842 + cqspi_readdata_capture(cqspi, !cqspi->rclk_en, 843 + f_pdata->read_delay); 858 844 } 859 845 860 846 if (switch_cs || switch_ck) ··· 1052 1036 return -ENXIO; 1053 1037 } 1054 1038 1039 + cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); 1040 + 1055 1041 return 0; 1056 1042 } 1057 1043 ··· 1174 1156 struct cqspi_st *cqspi; 1175 1157 struct resource *res; 1176 1158 struct resource *res_ahb; 1159 + unsigned long data; 1177 1160 int ret; 1178 1161 int irq; 1179 1162 ··· 1225 1206 return -ENXIO; 1226 1207 } 1227 1208 1228 - ret = clk_prepare_enable(cqspi->clk); 1229 - if (ret) { 1230 - dev_err(dev, "Cannot enable QSPI clock.\n"); 1209 + pm_runtime_enable(dev); 1210 + ret = pm_runtime_get_sync(dev); 1211 + if (ret < 0) { 1212 + pm_runtime_put_noidle(dev); 1231 1213 return ret; 1232 1214 } 1233 1215 1216 + ret = clk_prepare_enable(cqspi->clk); 1217 + if (ret) { 1218 + dev_err(dev, "Cannot enable QSPI clock.\n"); 1219 + goto probe_clk_failed; 1220 + } 1221 + 1234 1222 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); 1223 + data = (unsigned long)of_device_get_match_data(dev); 1224 + if (data & CQSPI_NEEDS_WR_DELAY) 1225 + cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, 1226 + cqspi->master_ref_clk_hz); 1235 1227 1236 1228 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, 1237 1229 pdev->name, cqspi); ··· 1263 1233 } 1264 1234 1265 1235 return ret; 1266 - probe_irq_failed: 1267 - cqspi_controller_enable(cqspi, 0); 1268 1236 probe_setup_failed: 1237 + cqspi_controller_enable(cqspi, 0); 1238 + probe_irq_failed: 1269 1239 clk_disable_unprepare(cqspi->clk); 1240 + probe_clk_failed: 1241 + pm_runtime_put_sync(dev); 1242 + pm_runtime_disable(dev); 1270 1243 return ret; 1271 1244 } 1272 1245 ··· 1285 1252 cqspi_controller_enable(cqspi, 0); 1286 1253 1287 1254 clk_disable_unprepare(cqspi->clk); 1255 + 1256 + pm_runtime_put_sync(&pdev->dev); 1257 + pm_runtime_disable(&pdev->dev); 1288 1258 1289 1259 return 0; 1290 1260 } ··· 1320 1284 #endif 1321 1285 1322 1286 static const struct of_device_id cqspi_dt_ids[] = { 1323 - {.compatible = "cdns,qspi-nor",}, 1287 + { 1288 + .compatible = "cdns,qspi-nor", 1289 + .data = (void *)0, 1290 + }, 1291 + { 1292 + .compatible = "ti,k2g-qspi", 1293 + .data = (void *)CQSPI_NEEDS_WR_DELAY, 1294 + }, 1324 1295 { /* end of table */ } 1325 1296 }; 1326 1297
+3
drivers/mtd/spi-nor/intel-spi-pci.c
··· 63 63 } 64 64 65 65 static const struct pci_device_id intel_spi_pci_ids[] = { 66 + { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, 66 67 { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, 68 + { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, 69 + { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, 67 70 { }, 68 71 }; 69 72 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+151 -58
drivers/mtd/spi-nor/intel-spi.c
··· 67 67 #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) 68 68 #define PR_RPE BIT(15) 69 69 #define PR_BASE_MASK 0x3fff 70 - /* Last PR is GPR0 */ 71 - #define PR_NUM (5 + 1) 72 70 73 71 /* Offsets are from @ispi->sregs */ 74 72 #define SSFSTS_CTL 0x00 ··· 88 90 #define OPMENU0 0x08 89 91 #define OPMENU1 0x0c 90 92 93 + #define OPTYPE_READ_NO_ADDR 0 94 + #define OPTYPE_WRITE_NO_ADDR 1 95 + #define OPTYPE_READ_WITH_ADDR 2 96 + #define OPTYPE_WRITE_WITH_ADDR 3 97 + 91 98 /* CPU specifics */ 92 99 #define BYT_PR 0x74 93 100 #define BYT_SSFSTS_CTL 0x90 94 101 #define BYT_BCR 0xfc 95 102 #define BYT_BCR_WPD BIT(0) 96 103 #define BYT_FREG_NUM 5 104 + #define BYT_PR_NUM 5 97 105 98 106 #define LPT_PR 0x74 99 107 #define LPT_SSFSTS_CTL 0x90 100 108 #define LPT_FREG_NUM 5 109 + #define LPT_PR_NUM 5 101 110 102 111 #define BXT_PR 0x84 103 112 #define BXT_SSFSTS_CTL 0xa0 104 113 #define BXT_FREG_NUM 12 114 + #define BXT_PR_NUM 6 115 + 116 + #define LVSCC 0xc4 117 + #define UVSCC 0xc8 118 + #define ERASE_OPCODE_SHIFT 8 119 + #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 120 + #define ERASE_64K_OPCODE_SHIFT 16 121 + #define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 105 122 106 123 #define INTEL_SPI_TIMEOUT 5000 /* ms */ 107 124 #define INTEL_SPI_FIFO_SZ 64 ··· 130 117 * @pregs: Start of protection registers 131 118 * @sregs: Start of software sequencer registers 132 119 * @nregions: Maximum number of regions 120 + * @pr_num: Maximum number of protected range registers 133 121 * @writeable: Is the chip writeable 134 - * @swseq: Use SW sequencer in register reads/writes 122 + * @locked: Is SPI setting locked 123 + * @swseq_reg: Use SW sequencer in register reads/writes 124 + * @swseq_erase: Use SW sequencer in erase operation 135 125 * @erase_64k: 64k erase supported 136 126 * @opcodes: Opcodes which are supported. This are programmed by BIOS 137 127 * before it locks down the controller. ··· 148 132 void __iomem *pregs; 149 133 void __iomem *sregs; 150 134 size_t nregions; 135 + size_t pr_num; 151 136 bool writeable; 152 - bool swseq; 137 + bool locked; 138 + bool swseq_reg; 139 + bool swseq_erase; 153 140 bool erase_64k; 154 141 u8 opcodes[8]; 155 142 u8 preopcodes[2]; ··· 186 167 for (i = 0; i < ispi->nregions; i++) 187 168 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, 188 169 readl(ispi->base + FREG(i))); 189 - for (i = 0; i < PR_NUM; i++) 170 + for (i = 0; i < ispi->pr_num; i++) 190 171 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, 191 172 readl(ispi->pregs + PR(i))); 192 173 ··· 200 181 if (ispi->info->type == INTEL_SPI_BYT) 201 182 dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); 202 183 184 + dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); 185 + dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); 186 + 203 187 dev_dbg(ispi->dev, "Protected regions:\n"); 204 - for (i = 0; i < PR_NUM; i++) { 188 + for (i = 0; i < ispi->pr_num; i++) { 205 189 u32 base, limit; 206 190 207 191 value = readl(ispi->pregs + PR(i)); ··· 236 214 } 237 215 238 216 dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", 239 - ispi->swseq ? 'S' : 'H'); 217 + ispi->swseq_reg ? 'S' : 'H'); 218 + dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", 219 + ispi->swseq_erase ? 'S' : 'H'); 240 220 } 241 221 242 222 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ ··· 302 278 303 279 static int intel_spi_init(struct intel_spi *ispi) 304 280 { 305 - u32 opmenu0, opmenu1, val; 281 + u32 opmenu0, opmenu1, lvscc, uvscc, val; 306 282 int i; 307 283 308 284 switch (ispi->info->type) { ··· 310 286 ispi->sregs = ispi->base + BYT_SSFSTS_CTL; 311 287 ispi->pregs = ispi->base + BYT_PR; 312 288 ispi->nregions = BYT_FREG_NUM; 289 + ispi->pr_num = BYT_PR_NUM; 290 + ispi->swseq_reg = true; 313 291 314 292 if (writeable) { 315 293 /* Disable write protection */ ··· 331 305 ispi->sregs = ispi->base + LPT_SSFSTS_CTL; 332 306 ispi->pregs = ispi->base + LPT_PR; 333 307 ispi->nregions = LPT_FREG_NUM; 308 + ispi->pr_num = LPT_PR_NUM; 309 + ispi->swseq_reg = true; 334 310 break; 335 311 336 312 case INTEL_SPI_BXT: 337 313 ispi->sregs = ispi->base + BXT_SSFSTS_CTL; 338 314 ispi->pregs = ispi->base + BXT_PR; 339 315 ispi->nregions = BXT_FREG_NUM; 316 + ispi->pr_num = BXT_PR_NUM; 340 317 ispi->erase_64k = true; 341 318 break; 342 319 ··· 347 318 return -EINVAL; 348 319 } 349 320 350 - /* Disable #SMI generation */ 321 + /* Disable #SMI generation from HW sequencer */ 351 322 val = readl(ispi->base + HSFSTS_CTL); 352 323 val &= ~HSFSTS_CTL_FSMIE; 353 324 writel(val, ispi->base + HSFSTS_CTL); 354 325 355 326 /* 356 - * BIOS programs allowed opcodes and then locks down the register. 357 - * So read back what opcodes it decided to support. That's the set 358 - * we are going to support as well. 327 + * Determine whether erase operation should use HW or SW sequencer. 328 + * 329 + * The HW sequencer has a predefined list of opcodes, with only the 330 + * erase opcode being programmable in LVSCC and UVSCC registers. 331 + * If these registers don't contain a valid erase opcode, erase 332 + * cannot be done using HW sequencer. 359 333 */ 360 - opmenu0 = readl(ispi->sregs + OPMENU0); 361 - opmenu1 = readl(ispi->sregs + OPMENU1); 334 + lvscc = readl(ispi->base + LVSCC); 335 + uvscc = readl(ispi->base + UVSCC); 336 + if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) 337 + ispi->swseq_erase = true; 338 + /* SPI controller on Intel BXT supports 64K erase opcode */ 339 + if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) 340 + if (!(lvscc & ERASE_64K_OPCODE_MASK) || 341 + !(uvscc & ERASE_64K_OPCODE_MASK)) 342 + ispi->erase_64k = false; 362 343 363 344 /* 364 345 * Some controllers can only do basic operations using hardware 365 346 * sequencer. All other operations are supposed to be carried out 366 - * using software sequencer. If we find that BIOS has programmed 367 - * opcodes for the software sequencer we use that over the hardware 368 - * sequencer. 347 + * using software sequencer. 369 348 */ 370 - if (opmenu0 && opmenu1) { 371 - for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { 372 - ispi->opcodes[i] = opmenu0 >> i * 8; 373 - ispi->opcodes[i + 4] = opmenu1 >> i * 8; 374 - } 375 - 376 - val = readl(ispi->sregs + PREOP_OPTYPE); 377 - ispi->preopcodes[0] = val; 378 - ispi->preopcodes[1] = val >> 8; 379 - 349 + if (ispi->swseq_reg) { 380 350 /* Disable #SMI generation from SW sequencer */ 381 351 val = readl(ispi->sregs + SSFSTS_CTL); 382 352 val &= ~SSFSTS_CTL_FSMIE; 383 353 writel(val, ispi->sregs + SSFSTS_CTL); 354 + } 384 355 385 - ispi->swseq = true; 356 + /* Check controller's lock status */ 357 + val = readl(ispi->base + HSFSTS_CTL); 358 + ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); 359 + 360 + if (ispi->locked) { 361 + /* 362 + * BIOS programs allowed opcodes and then locks down the 363 + * register. So read back what opcodes it decided to support. 364 + * That's the set we are going to support as well. 365 + */ 366 + opmenu0 = readl(ispi->sregs + OPMENU0); 367 + opmenu1 = readl(ispi->sregs + OPMENU1); 368 + 369 + if (opmenu0 && opmenu1) { 370 + for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { 371 + ispi->opcodes[i] = opmenu0 >> i * 8; 372 + ispi->opcodes[i + 4] = opmenu1 >> i * 8; 373 + } 374 + 375 + val = readl(ispi->sregs + PREOP_OPTYPE); 376 + ispi->preopcodes[0] = val; 377 + ispi->preopcodes[1] = val >> 8; 378 + } 386 379 } 387 380 388 381 intel_spi_dump_regs(ispi); ··· 412 361 return 0; 413 362 } 414 363 415 - static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode) 364 + static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) 416 365 { 417 366 int i; 367 + int preop; 418 368 419 - for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) 420 - if (ispi->opcodes[i] == opcode) 421 - return i; 422 - return -EINVAL; 369 + if (ispi->locked) { 370 + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) 371 + if (ispi->opcodes[i] == opcode) 372 + return i; 373 + 374 + return -EINVAL; 375 + } 376 + 377 + /* The lock is off, so just use index 0 */ 378 + writel(opcode, ispi->sregs + OPMENU0); 379 + preop = readw(ispi->sregs + PREOP_OPTYPE); 380 + writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); 381 + 382 + return 0; 423 383 } 424 384 425 - static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, 426 - int len) 385 + static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len) 427 386 { 428 387 u32 val, status; 429 388 int ret; ··· 455 394 return -EINVAL; 456 395 } 457 396 397 + if (len > INTEL_SPI_FIFO_SZ) 398 + return -EINVAL; 399 + 458 400 val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; 459 401 val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 460 402 val |= HSFSTS_CTL_FGO; ··· 476 412 return 0; 477 413 } 478 414 479 - static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, 480 - int len) 415 + static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len, 416 + int optype) 481 417 { 482 - u32 val, status; 418 + u32 val = 0, status; 419 + u16 preop; 483 420 int ret; 484 421 485 - ret = intel_spi_opcode_index(ispi, opcode); 422 + ret = intel_spi_opcode_index(ispi, opcode, optype); 486 423 if (ret < 0) 487 424 return ret; 488 425 489 - val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; 426 + if (len > INTEL_SPI_FIFO_SZ) 427 + return -EINVAL; 428 + 429 + /* Only mark 'Data Cycle' bit when there is data to be transferred */ 430 + if (len > 0) 431 + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; 490 432 val |= ret << SSFSTS_CTL_COP_SHIFT; 491 433 val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; 492 434 val |= SSFSTS_CTL_SCGO; 435 + preop = readw(ispi->sregs + PREOP_OPTYPE); 436 + if (preop) { 437 + val |= SSFSTS_CTL_ACS; 438 + if (preop >> 8) 439 + val |= SSFSTS_CTL_SPOP; 440 + } 493 441 writel(val, ispi->sregs + SSFSTS_CTL); 494 442 495 443 ret = intel_spi_wait_sw_busy(ispi); 496 444 if (ret) 497 445 return ret; 498 446 499 - status = readl(ispi->base + SSFSTS_CTL); 447 + status = readl(ispi->sregs + SSFSTS_CTL); 500 448 if (status & SSFSTS_CTL_FCERR) 501 449 return -EIO; 502 450 else if (status & SSFSTS_CTL_AEL) ··· 525 449 /* Address of the first chip */ 526 450 writel(0, ispi->base + FADDR); 527 451 528 - if (ispi->swseq) 529 - ret = intel_spi_sw_cycle(ispi, opcode, buf, len); 452 + if (ispi->swseq_reg) 453 + ret = intel_spi_sw_cycle(ispi, opcode, len, 454 + OPTYPE_READ_NO_ADDR); 530 455 else 531 - ret = intel_spi_hw_cycle(ispi, opcode, buf, len); 456 + ret = intel_spi_hw_cycle(ispi, opcode, len); 532 457 533 458 if (ret) 534 459 return ret; ··· 544 467 545 468 /* 546 469 * This is handled with atomic operation and preop code in Intel 547 - * controller so skip it here now. 470 + * controller so skip it here now. If the controller is not locked, 471 + * program the opcode to the PREOP register for later use. 548 472 */ 549 - if (opcode == SPINOR_OP_WREN) 473 + if (opcode == SPINOR_OP_WREN) { 474 + if (!ispi->locked) 475 + writel(opcode, ispi->sregs + PREOP_OPTYPE); 476 + 550 477 return 0; 478 + } 551 479 552 480 writel(0, ispi->base + FADDR); 553 481 ··· 561 479 if (ret) 562 480 return ret; 563 481 564 - if (ispi->swseq) 565 - return intel_spi_sw_cycle(ispi, opcode, buf, len); 566 - return intel_spi_hw_cycle(ispi, opcode, buf, len); 482 + if (ispi->swseq_reg) 483 + return intel_spi_sw_cycle(ispi, opcode, len, 484 + OPTYPE_WRITE_NO_ADDR); 485 + return intel_spi_hw_cycle(ispi, opcode, len); 567 486 } 568 487 569 488 static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, ··· 644 561 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 645 562 val |= HSFSTS_CTL_FCYCLE_WRITE; 646 563 647 - /* Write enable */ 648 - if (ispi->preopcodes[1] == SPINOR_OP_WREN) 649 - val |= SSFSTS_CTL_SPOP; 650 - val |= SSFSTS_CTL_ACS; 651 - writel(val, ispi->base + HSFSTS_CTL); 652 - 653 564 ret = intel_spi_write_block(ispi, write_buf, block_size); 654 565 if (ret) { 655 566 dev_err(ispi->dev, "failed to write block\n"); ··· 651 574 } 652 575 653 576 /* Start the write now */ 654 - val = readl(ispi->base + HSFSTS_CTL); 655 - writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL); 577 + val |= HSFSTS_CTL_FGO; 578 + writel(val, ispi->base + HSFSTS_CTL); 656 579 657 580 ret = intel_spi_wait_hw_busy(ispi); 658 581 if (ret) { ··· 697 620 erase_size = SZ_4K; 698 621 } 699 622 623 + if (ispi->swseq_erase) { 624 + while (len > 0) { 625 + writel(offs, ispi->base + FADDR); 626 + 627 + ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, 628 + 0, OPTYPE_WRITE_WITH_ADDR); 629 + if (ret) 630 + return ret; 631 + 632 + offs += erase_size; 633 + len -= erase_size; 634 + } 635 + 636 + return 0; 637 + } 638 + 700 639 while (len > 0) { 701 640 writel(offs, ispi->base + FADDR); 702 641 ··· 745 652 { 746 653 int i; 747 654 748 - for (i = 0; i < PR_NUM; i++) { 655 + for (i = 0; i < ispi->pr_num; i++) { 749 656 u32 pr_base, pr_limit, pr_value; 750 657 751 658 pr_value = readl(ispi->pregs + PR(i));
+58 -12
drivers/mtd/spi-nor/mtk-quadspi.c
··· 404 404 return ret; 405 405 } 406 406 407 + static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor) 408 + { 409 + clk_disable_unprepare(mt8173_nor->spi_clk); 410 + clk_disable_unprepare(mt8173_nor->nor_clk); 411 + } 412 + 413 + static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor) 414 + { 415 + int ret; 416 + 417 + ret = clk_prepare_enable(mt8173_nor->spi_clk); 418 + if (ret) 419 + return ret; 420 + 421 + ret = clk_prepare_enable(mt8173_nor->nor_clk); 422 + if (ret) { 423 + clk_disable_unprepare(mt8173_nor->spi_clk); 424 + return ret; 425 + } 426 + 427 + return 0; 428 + } 429 + 407 430 static int mtk_nor_init(struct mt8173_nor *mt8173_nor, 408 431 struct device_node *flash_node) 409 432 { ··· 491 468 return PTR_ERR(mt8173_nor->nor_clk); 492 469 493 470 mt8173_nor->dev = &pdev->dev; 494 - ret = clk_prepare_enable(mt8173_nor->spi_clk); 471 + 472 + ret = mt8173_nor_enable_clk(mt8173_nor); 495 473 if (ret) 496 474 return ret; 497 475 498 - ret = clk_prepare_enable(mt8173_nor->nor_clk); 499 - if (ret) { 500 - clk_disable_unprepare(mt8173_nor->spi_clk); 501 - return ret; 502 - } 503 476 /* only support one attached flash */ 504 477 flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); 505 478 if (!flash_np) { ··· 506 487 ret = mtk_nor_init(mt8173_nor, flash_np); 507 488 508 489 nor_free: 509 - if (ret) { 510 - clk_disable_unprepare(mt8173_nor->spi_clk); 511 - clk_disable_unprepare(mt8173_nor->nor_clk); 512 - } 490 + if (ret) 491 + mt8173_nor_disable_clk(mt8173_nor); 492 + 513 493 return ret; 514 494 } 515 495 ··· 516 498 { 517 499 struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev); 518 500 519 - clk_disable_unprepare(mt8173_nor->spi_clk); 520 - clk_disable_unprepare(mt8173_nor->nor_clk); 501 + mt8173_nor_disable_clk(mt8173_nor); 502 + 521 503 return 0; 522 504 } 505 + 506 + #ifdef CONFIG_PM_SLEEP 507 + static int mtk_nor_suspend(struct device *dev) 508 + { 509 + struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev); 510 + 511 + mt8173_nor_disable_clk(mt8173_nor); 512 + 513 + return 0; 514 + } 515 + 516 + static int mtk_nor_resume(struct device *dev) 517 + { 518 + struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev); 519 + 520 + return mt8173_nor_enable_clk(mt8173_nor); 521 + } 522 + 523 + static const struct dev_pm_ops mtk_nor_dev_pm_ops = { 524 + .suspend = mtk_nor_suspend, 525 + .resume = mtk_nor_resume, 526 + }; 527 + 528 + #define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops) 529 + #else 530 + #define MTK_NOR_DEV_PM_OPS NULL 531 + #endif 523 532 524 533 static const struct of_device_id mtk_nor_of_ids[] = { 525 534 { .compatible = "mediatek,mt8173-nor"}, ··· 559 514 .remove = mtk_nor_drv_remove, 560 515 .driver = { 561 516 .name = "mtk-nor", 517 + .pm = MTK_NOR_DEV_PM_OPS, 562 518 .of_match_table = mtk_nor_of_ids, 563 519 }, 564 520 };
+84 -21
drivers/mtd/spi-nor/spi-nor.c
··· 89 89 #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ 90 90 #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ 91 91 #define USE_CLSR BIT(14) /* use CLSR command */ 92 + 93 + int (*quad_enable)(struct spi_nor *nor); 92 94 }; 93 95 94 96 #define JEDEC_MFR(info) ((info)->id[0]) ··· 872 870 return ret; 873 871 } 874 872 873 + static int macronix_quad_enable(struct spi_nor *nor); 874 + 875 875 /* Used when the "_ext_id" is two bytes at most */ 876 876 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ 877 877 .id = { \ ··· 968 964 { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, 969 965 970 966 /* Everspin */ 967 + { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, 971 968 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, 972 969 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, 973 970 { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ··· 988 983 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) 989 984 }, 990 985 { 986 + "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64, 987 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 988 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) 989 + }, 990 + { 991 991 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, 992 992 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 993 993 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) ··· 1006 996 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, 1007 997 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 1008 998 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) 999 + }, 1000 + { 1001 + "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512, 1002 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 1003 + SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) 1004 + .quad_enable = macronix_quad_enable, 1009 1005 }, 1010 1006 1011 1007 /* Intel/Numonyx -- xxxs33b */ ··· 1040 1024 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 1041 1025 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, 1042 1026 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, 1043 - { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 1027 + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, 1044 1028 { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, 1045 1029 { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 1046 1030 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, ··· 1153 1137 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, 1154 1138 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, 1155 1139 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, 1140 + { 1141 + "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32, 1142 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 1143 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) 1144 + }, 1156 1145 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 1157 1146 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, 1158 1147 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, ··· 2309 2288 2310 2289 /* Check the SFDP header version. */ 2311 2290 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || 2312 - header.major != SFDP_JESD216_MAJOR || 2313 - header.minor < SFDP_JESD216_MINOR) 2291 + header.major != SFDP_JESD216_MAJOR) 2314 2292 return -EINVAL; 2315 2293 2316 2294 /* ··· 2447 2427 params->quad_enable = spansion_quad_enable; 2448 2428 break; 2449 2429 } 2430 + 2431 + /* 2432 + * Some manufacturer like GigaDevice may use different 2433 + * bit to set QE on different memories, so the MFR can't 2434 + * indicate the quad_enable method for this case, we need 2435 + * set it in flash info list. 2436 + */ 2437 + if (info->quad_enable) 2438 + params->quad_enable = info->quad_enable; 2450 2439 } 2451 2440 2452 2441 /* Override the parameters with data read from SFDP tables. */ ··· 2659 2630 /* Enable Quad I/O if needed. */ 2660 2631 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || 2661 2632 spi_nor_get_protocol_width(nor->write_proto) == 4); 2662 - if (enable_quad_io && params->quad_enable) { 2663 - err = params->quad_enable(nor); 2633 + if (enable_quad_io && params->quad_enable) 2634 + nor->quad_enable = params->quad_enable; 2635 + else 2636 + nor->quad_enable = NULL; 2637 + 2638 + return 0; 2639 + } 2640 + 2641 + static int spi_nor_init(struct spi_nor *nor) 2642 + { 2643 + int err; 2644 + 2645 + /* 2646 + * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up 2647 + * with the software protection bits set 2648 + */ 2649 + if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || 2650 + JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || 2651 + JEDEC_MFR(nor->info) == SNOR_MFR_SST || 2652 + nor->info->flags & SPI_NOR_HAS_LOCK) { 2653 + write_enable(nor); 2654 + write_sr(nor, 0); 2655 + spi_nor_wait_till_ready(nor); 2656 + } 2657 + 2658 + if (nor->quad_enable) { 2659 + err = nor->quad_enable(nor); 2664 2660 if (err) { 2665 2661 dev_err(nor->dev, "quad mode not supported\n"); 2666 2662 return err; 2667 2663 } 2668 2664 } 2669 2665 2666 + if ((nor->addr_width == 4) && 2667 + (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) && 2668 + !(nor->info->flags & SPI_NOR_4B_OPCODES)) 2669 + set_4byte(nor, nor->info, 1); 2670 + 2670 2671 return 0; 2672 + } 2673 + 2674 + /* mtd resume handler */ 2675 + static void spi_nor_resume(struct mtd_info *mtd) 2676 + { 2677 + struct spi_nor *nor = mtd_to_spi_nor(mtd); 2678 + struct device *dev = nor->dev; 2679 + int ret; 2680 + 2681 + /* re-initialize the nor chip */ 2682 + ret = spi_nor_init(nor); 2683 + if (ret) 2684 + dev_err(dev, "resume() failed\n"); 2671 2685 } 2672 2686 2673 2687 int spi_nor_scan(struct spi_nor *nor, const char *name, ··· 2780 2708 if (ret) 2781 2709 return ret; 2782 2710 2783 - /* 2784 - * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up 2785 - * with the software protection bits set 2786 - */ 2787 - 2788 - if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || 2789 - JEDEC_MFR(info) == SNOR_MFR_INTEL || 2790 - JEDEC_MFR(info) == SNOR_MFR_SST || 2791 - info->flags & SPI_NOR_HAS_LOCK) { 2792 - write_enable(nor); 2793 - write_sr(nor, 0); 2794 - spi_nor_wait_till_ready(nor); 2795 - } 2796 - 2797 2711 if (!mtd->name) 2798 2712 mtd->name = dev_name(dev); 2799 2713 mtd->priv = nor; ··· 2789 2731 mtd->size = params.size; 2790 2732 mtd->_erase = spi_nor_erase; 2791 2733 mtd->_read = spi_nor_read; 2734 + mtd->_resume = spi_nor_resume; 2792 2735 2793 2736 /* NOR protection support for STmicro/Micron chips and similar */ 2794 2737 if (JEDEC_MFR(info) == SNOR_MFR_MICRON || ··· 2863 2804 if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || 2864 2805 info->flags & SPI_NOR_4B_OPCODES) 2865 2806 spi_nor_set_4byte_opcodes(nor, info); 2866 - else 2867 - set_4byte(nor, info, 1); 2868 2807 } else { 2869 2808 nor->addr_width = 3; 2870 2809 } ··· 2878 2821 if (ret) 2879 2822 return ret; 2880 2823 } 2824 + 2825 + /* Send all the required SPI flash commands to initialize device */ 2826 + nor->info = info; 2827 + ret = spi_nor_init(nor); 2828 + if (ret) 2829 + return ret; 2881 2830 2882 2831 dev_info(dev, "%s (%lld Kbytes)\n", info->name, 2883 2832 (long long)mtd->size >> 10);
+29 -6
drivers/mtd/spi-nor/stm32-quadspi.c
··· 1 1 /* 2 - * stm32_quadspi.c 2 + * Driver for stm32 quadspi controller 3 3 * 4 - * Copyright (C) 2017, Ludovic Barre 4 + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved 5 + * Author(s): Ludovic Barre author <ludovic.barre@st.com>. 5 6 * 6 - * License terms: GNU General Public License (GPL), version 2 7 + * License terms: GPL V2.0. 8 + * 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License version 2 as published by 11 + * the Free Software Foundation. 12 + * 13 + * This program is distributed in the hope that it will be useful, but 14 + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 16 + * details. 17 + * 18 + * You should have received a copy of the GNU General Public License along with 19 + * This program. If not, see <http://www.gnu.org/licenses/>. 7 20 */ 8 21 #include <linux/clk.h> 9 22 #include <linux/errno.h> ··· 126 113 #define STM32_MAX_MMAP_SZ SZ_256M 127 114 #define STM32_MAX_NORCHIP 2 128 115 116 + #define STM32_QSPI_FIFO_SZ 32 129 117 #define STM32_QSPI_FIFO_TIMEOUT_US 30000 130 118 #define STM32_QSPI_BUSY_TIMEOUT_US 100000 131 119 ··· 138 124 u32 presc; 139 125 u32 read_mode; 140 126 bool registered; 127 + u32 prefetch_limit; 141 128 }; 142 129 143 130 struct stm32_qspi { ··· 255 240 STM32_QSPI_FIFO_TIMEOUT_US); 256 241 if (ret) { 257 242 dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); 258 - break; 243 + return ret; 259 244 } 260 245 tx_fifo(buf++, qspi->io_base + QUADSPI_DR); 261 246 } 262 247 263 - return ret; 248 + return 0; 264 249 } 265 250 266 251 static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, ··· 287 272 { 288 273 struct stm32_qspi *qspi = flash->qspi; 289 274 u32 ccr, dcr, cr; 275 + u32 last_byte; 290 276 int err; 291 277 292 278 err = stm32_qspi_wait_nobusy(qspi); ··· 330 314 if (err) 331 315 goto abort; 332 316 writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR); 317 + } else { 318 + last_byte = cmd->addr + cmd->len; 319 + if (last_byte > flash->prefetch_limit) 320 + goto abort; 333 321 } 334 322 335 323 return err; ··· 342 322 cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT; 343 323 writel_relaxed(cr, qspi->io_base + QUADSPI_CR); 344 324 345 - dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); 325 + if (err) 326 + dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); 327 + 346 328 return err; 347 329 } 348 330 ··· 572 550 } 573 551 574 552 flash->fsize = FSIZE_VAL(mtd->size); 553 + flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ; 575 554 576 555 flash->read_mode = CCR_FMODE_MM; 577 556 if (mtd->size > qspi->mm_size)
+1 -5
include/linux/mtd/mtd.h
··· 267 267 */ 268 268 unsigned int bitflip_threshold; 269 269 270 - // Kernel-only stuff starts here. 270 + /* Kernel-only stuff starts here. */ 271 271 const char *name; 272 272 int index; 273 273 ··· 297 297 int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, 298 298 size_t *retlen, void **virt, resource_size_t *phys); 299 299 int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 300 - unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, 301 - unsigned long len, 302 - unsigned long offset, 303 - unsigned long flags); 304 300 int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, 305 301 size_t *retlen, u_char *buf); 306 302 int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
-5
include/linux/mtd/nand-gpio.h
··· 5 5 #include <linux/mtd/rawnand.h> 6 6 7 7 struct gpio_nand_platdata { 8 - int gpio_nce; 9 - int gpio_nwp; 10 - int gpio_cle; 11 - int gpio_ale; 12 - int gpio_rdy; 13 8 void (*adjust_parts)(struct gpio_nand_platdata *, size_t); 14 9 struct mtd_partition *parts; 15 10 unsigned int num_parts;
+3
include/linux/mtd/rawnand.h
··· 177 177 */ 178 178 #define NAND_NEED_SCRAMBLING 0x00002000 179 179 180 + /* Device needs 3rd row address cycle */ 181 + #define NAND_ROW_ADDR_3 0x00004000 182 + 180 183 /* Options valid for Samsung large page devices */ 181 184 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG 182 185
+10
include/linux/mtd/spi-nor.h
··· 232 232 }; 233 233 234 234 /** 235 + * struct flash_info - Forward declaration of a structure used internally by 236 + * spi_nor_scan() 237 + */ 238 + struct flash_info; 239 + 240 + /** 235 241 * struct spi_nor - Structure for defining a the SPI NOR layer 236 242 * @mtd: point to a mtd_info structure 237 243 * @lock: the lock for the read/write/erase/lock/unlock operations 238 244 * @dev: point to a spi device, or a spi nor controller device. 245 + * @info: spi-nor part JDEC MFR id and other info 239 246 * @page_size: the page size of the SPI NOR 240 247 * @addr_width: number of address bytes 241 248 * @erase_opcode: the opcode for erasing a sector ··· 269 262 * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR 270 263 * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR 271 264 * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is 265 + * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode 272 266 * completely locked 273 267 * @priv: the private data 274 268 */ ··· 277 269 struct mtd_info mtd; 278 270 struct mutex lock; 279 271 struct device *dev; 272 + const struct flash_info *info; 280 273 u32 page_size; 281 274 u8 addr_width; 282 275 u8 erase_opcode; ··· 305 296 int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); 306 297 int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); 307 298 int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); 299 + int (*quad_enable)(struct spi_nor *nor); 308 300 309 301 void *priv; 310 302 };
-17
include/linux/platform_data/mtd-nand-omap2.h
··· 64 64 void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; 65 65 void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; 66 66 }; 67 - 68 - struct omap_nand_platform_data { 69 - int cs; 70 - struct mtd_partition *parts; 71 - int nr_parts; 72 - bool flash_bbt; 73 - enum nand_io xfer_type; 74 - int devsize; 75 - enum omap_ecc ecc_opt; 76 - 77 - struct device_node *elm_of_node; 78 - 79 - /* deprecated */ 80 - struct gpmc_nand_regs reg; 81 - struct device_node *of_node; 82 - bool dev_ready; 83 - }; 84 67 #endif
-4
lib/Kconfig
··· 46 46 bool 47 47 select GENERIC_PCI_IOMAP 48 48 49 - config GENERIC_IO 50 - bool 51 - default n 52 - 53 49 config STMP_DEVICE 54 50 bool 55 51