Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (58 commits)
mtd: jedec_probe: add PSD4256G6V id
mtd: OneNand support for Nomadik 8815 SoC (on NHK8815 board)
mtd: nand: driver for Nomadik 8815 SoC (on NHK8815 board)
m25p80: Add Spansion S25FL129P serial flashes
jffs2: Use SLAB_HWCACHE_ALIGN for jffs2_raw_{dirent,inode} slabs
mtd: sh_flctl: register sh_flctl using platform_driver_probe()
mtd: nand: txx9ndfmc: transfer 512 byte at a time if possible
mtd: nand: fix tmio_nand ecc correction
mtd: nand: add __nand_correct_data helper function
mtd: cfi_cmdset_0002: add 0xFF intolerance for M29W128G
mtd: inftl: fix fold chain block number
mtd: jedec: fix compilation problem with I28F640C3B definition
mtd: nand: fix ECC Correction bug for SMC ordering for NDFC driver
mtd: ofpart: Check availability of reg property instead of name property
driver/Makefile: Initialize "mtd" and "spi" before "net"
mtd: omap: adding DMA mode support in nand prefetch/post-write
mtd: omap: add support for nand prefetch-read and post-write
mtd: add nand support for w90p910 (v2)
mtd: maps: add mtd-ram support to physmap_of
mtd: pxa3xx_nand: add single-bit error corrections reporting
...

+2798 -254
+26 -16
Documentation/powerpc/dts-bindings/mtd-physmap.txt
··· 1 - CFI or JEDEC memory-mapped NOR flash 1 + CFI or JEDEC memory-mapped NOR flash, MTD-RAM (NVRAM...) 2 2 3 3 Flash chips (Memory Technology Devices) are often used for solid state 4 4 file systems on embedded devices. 5 5 6 - - compatible : should contain the specific model of flash chip(s) 7 - used, if known, followed by either "cfi-flash" or "jedec-flash" 8 - - reg : Address range(s) of the flash chip(s) 6 + - compatible : should contain the specific model of mtd chip(s) 7 + used, if known, followed by either "cfi-flash", "jedec-flash" 8 + or "mtd-ram". 9 + - reg : Address range(s) of the mtd chip(s) 9 10 It's possible to (optionally) define multiple "reg" tuples so that 10 - non-identical NOR chips can be described in one flash node. 11 - - bank-width : Width (in bytes) of the flash bank. Equal to the 11 + non-identical chips can be described in one node. 12 + - bank-width : Width (in bytes) of the bank. Equal to the 12 13 device width times the number of interleaved chips. 13 - - device-width : (optional) Width of a single flash chip. If 14 + - device-width : (optional) Width of a single mtd chip. If 14 15 omitted, assumed to be equal to 'bank-width'. 15 - - #address-cells, #size-cells : Must be present if the flash has 16 + - #address-cells, #size-cells : Must be present if the device has 16 17 sub-nodes representing partitions (see below). In this case 17 18 both #address-cells and #size-cells must be equal to 1. 18 19 ··· 23 22 - vendor-id : Contains the flash chip's vendor id (1 byte). 24 23 - device-id : Contains the flash chip's device id (1 byte). 25 24 26 - In addition to the information on the flash bank itself, the 25 + In addition to the information on the mtd bank itself, the 27 26 device tree may optionally contain additional information 28 - describing partitions of the flash address space. This can be 27 + describing partitions of the address space. This can be 29 28 used on platforms which have strong conventions about which 30 - portions of the flash are used for what purposes, but which don't 29 + portions of a flash are used for what purposes, but which don't 31 30 use an on-flash partition table such as RedBoot. 32 31 33 - Each partition is represented as a sub-node of the flash device. 32 + Each partition is represented as a sub-node of the mtd device. 34 33 Each node's name represents the name of the corresponding 35 - partition of the flash device. 34 + partition of the mtd device. 36 35 37 36 Flash partitions 38 - - reg : The partition's offset and size within the flash bank. 39 - - label : (optional) The label / name for this flash partition. 37 + - reg : The partition's offset and size within the mtd bank. 38 + - label : (optional) The label / name for this partition. 40 39 If omitted, the label is taken from the node name (excluding 41 40 the unit address). 42 41 - read-only : (optional) This parameter, if present, is a hint to 43 - Linux that this flash partition should only be mounted 42 + Linux that this partition should only be mounted 44 43 read-only. This is usually used for flash partitions 45 44 containing early-boot firmware images or data which should not 46 45 be clobbered. ··· 79 78 reg = <0 0x04000000>; 80 79 }; 81 80 }; 81 + 82 + An example using SRAM: 83 + 84 + sram@2,0 { 85 + compatible = "samsung,k6f1616u6a", "mtd-ram"; 86 + reg = <2 0 0x00200000>; 87 + bank-width = <2>; 88 + }; 89 +
+1 -1
arch/arm/configs/nhk8815_defconfig
··· 498 498 # CONFIG_MTD_DOC2001PLUS is not set 499 499 CONFIG_MTD_NAND=y 500 500 CONFIG_MTD_NAND_VERIFY_WRITE=y 501 - # CONFIG_MTD_NAND_ECC_SMC is not set 501 + CONFIG_MTD_NAND_ECC_SMC=y 502 502 # CONFIG_MTD_NAND_MUSEUM_IDS is not set 503 503 # CONFIG_MTD_NAND_GPIO is not set 504 504 CONFIG_MTD_NAND_IDS=y
+155
arch/arm/mach-nomadik/board-nhk8815.c
··· 16 16 #include <linux/amba/bus.h> 17 17 #include <linux/interrupt.h> 18 18 #include <linux/gpio.h> 19 + #include <linux/mtd/mtd.h> 20 + #include <linux/mtd/nand.h> 21 + #include <linux/mtd/partitions.h> 22 + #include <linux/io.h> 23 + #include <asm/sizes.h> 19 24 #include <asm/mach-types.h> 20 25 #include <asm/mach/arch.h> 21 26 #include <asm/mach/irq.h> 27 + #include <asm/mach/flash.h> 22 28 #include <mach/setup.h> 29 + #include <mach/nand.h> 30 + #include <mach/fsmc.h> 23 31 #include "clock.h" 32 + 33 + /* These adresses span 16MB, so use three individual pages */ 34 + static struct resource nhk8815_nand_resources[] = { 35 + { 36 + .name = "nand_addr", 37 + .start = NAND_IO_ADDR, 38 + .end = NAND_IO_ADDR + 0xfff, 39 + .flags = IORESOURCE_MEM, 40 + }, { 41 + .name = "nand_cmd", 42 + .start = NAND_IO_CMD, 43 + .end = NAND_IO_CMD + 0xfff, 44 + .flags = IORESOURCE_MEM, 45 + }, { 46 + .name = "nand_data", 47 + .start = NAND_IO_DATA, 48 + .end = NAND_IO_DATA + 0xfff, 49 + .flags = IORESOURCE_MEM, 50 + } 51 + }; 52 + 53 + static int nhk8815_nand_init(void) 54 + { 55 + /* FSMC setup for nand chip select (8-bit nand in 8815NHK) */ 56 + writel(0x0000000E, FSMC_PCR(0)); 57 + writel(0x000D0A00, FSMC_PMEM(0)); 58 + writel(0x00100A00, FSMC_PATT(0)); 59 + 60 + /* enable access to the chip select area */ 61 + writel(readl(FSMC_PCR(0)) | 0x04, FSMC_PCR(0)); 62 + 63 + return 0; 64 + } 65 + 66 + /* 67 + * These partitions are the same as those used in the 2.6.20 release 68 + * shipped by the vendor; the first two partitions are mandated 69 + * by the boot ROM, and the bootloader area is somehow oversized... 70 + */ 71 + static struct mtd_partition nhk8815_partitions[] = { 72 + { 73 + .name = "X-Loader(NAND)", 74 + .offset = 0, 75 + .size = SZ_256K, 76 + }, { 77 + .name = "MemInit(NAND)", 78 + .offset = MTDPART_OFS_APPEND, 79 + .size = SZ_256K, 80 + }, { 81 + .name = "BootLoader(NAND)", 82 + .offset = MTDPART_OFS_APPEND, 83 + .size = SZ_2M, 84 + }, { 85 + .name = "Kernel zImage(NAND)", 86 + .offset = MTDPART_OFS_APPEND, 87 + .size = 3 * SZ_1M, 88 + }, { 89 + .name = "Root Filesystem(NAND)", 90 + .offset = MTDPART_OFS_APPEND, 91 + .size = 22 * SZ_1M, 92 + }, { 93 + .name = "User Filesystem(NAND)", 94 + .offset = MTDPART_OFS_APPEND, 95 + .size = MTDPART_SIZ_FULL, 96 + } 97 + }; 98 + 99 + static struct nomadik_nand_platform_data nhk8815_nand_data = { 100 + .parts = nhk8815_partitions, 101 + .nparts = ARRAY_SIZE(nhk8815_partitions), 102 + .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \ 103 + | NAND_NO_READRDY | NAND_NO_AUTOINCR, 104 + .init = nhk8815_nand_init, 105 + }; 106 + 107 + static struct platform_device nhk8815_nand_device = { 108 + .name = "nomadik_nand", 109 + .dev = { 110 + .platform_data = &nhk8815_nand_data, 111 + }, 112 + .resource = nhk8815_nand_resources, 113 + .num_resources = ARRAY_SIZE(nhk8815_nand_resources), 114 + }; 115 + 116 + /* These are the partitions for the OneNand device, different from above */ 117 + static struct mtd_partition nhk8815_onenand_partitions[] = { 118 + { 119 + .name = "X-Loader(OneNAND)", 120 + .offset = 0, 121 + .size = SZ_256K, 122 + }, { 123 + .name = "MemInit(OneNAND)", 124 + .offset = MTDPART_OFS_APPEND, 125 + .size = SZ_256K, 126 + }, { 127 + .name = "BootLoader(OneNAND)", 128 + .offset = MTDPART_OFS_APPEND, 129 + .size = SZ_2M-SZ_256K, 130 + }, { 131 + .name = "SysImage(OneNAND)", 132 + .offset = MTDPART_OFS_APPEND, 133 + .size = 4 * SZ_1M, 134 + }, { 135 + .name = "Root Filesystem(OneNAND)", 136 + .offset = MTDPART_OFS_APPEND, 137 + .size = 22 * SZ_1M, 138 + }, { 139 + .name = "User Filesystem(OneNAND)", 140 + .offset = MTDPART_OFS_APPEND, 141 + .size = MTDPART_SIZ_FULL, 142 + } 143 + }; 144 + 145 + static struct flash_platform_data nhk8815_onenand_data = { 146 + .parts = nhk8815_onenand_partitions, 147 + .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions), 148 + }; 149 + 150 + static struct resource nhk8815_onenand_resource[] = { 151 + { 152 + .start = 0x30000000, 153 + .end = 0x30000000 + SZ_128K - 1, 154 + .flags = IORESOURCE_MEM, 155 + }, 156 + }; 157 + 158 + static struct platform_device nhk8815_onenand_device = { 159 + .name = "onenand", 160 + .id = -1, 161 + .dev = { 162 + .platform_data = &nhk8815_onenand_data, 163 + }, 164 + .resource = nhk8815_onenand_resource, 165 + .num_resources = ARRAY_SIZE(nhk8815_onenand_resource), 166 + }; 167 + 168 + static void __init nhk8815_onenand_init(void) 169 + { 170 + #ifdef CONFIG_ONENAND 171 + /* Set up SMCS0 for OneNand */ 172 + writel(0x000030db, FSMC_BCR0); 173 + writel(0x02100551, FSMC_BTR0); 174 + #endif 175 + } 24 176 25 177 #define __MEM_4K_RESOURCE(x) \ 26 178 .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM} ··· 233 81 device_initcall(nhk8815_eth_init); 234 82 235 83 static struct platform_device *nhk8815_platform_devices[] __initdata = { 84 + &nhk8815_nand_device, 85 + &nhk8815_onenand_device, 236 86 &nhk8815_eth_device, 237 87 /* will add more devices */ 238 88 }; ··· 244 90 int i; 245 91 246 92 cpu8815_platform_init(); 93 + nhk8815_onenand_init(); 247 94 platform_add_devices(nhk8815_platform_devices, 248 95 ARRAY_SIZE(nhk8815_platform_devices)); 249 96
+29
arch/arm/mach-nomadik/include/mach/fsmc.h
··· 1 + 2 + /* Definitions for the Nomadik FSMC "Flexible Static Memory controller" */ 3 + 4 + #ifndef __ASM_ARCH_FSMC_H 5 + #define __ASM_ARCH_FSMC_H 6 + 7 + #include <mach/hardware.h> 8 + /* 9 + * Register list 10 + */ 11 + 12 + /* bus control reg. and bus timing reg. for CS0..CS3 */ 13 + #define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3)) 14 + #define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04) 15 + 16 + /* PC-card and NAND: 17 + * PCR = control register 18 + * PMEM = memory timing 19 + * PATT = attribute timing 20 + * PIO = I/O timing 21 + * PECCR = ECC result 22 + */ 23 + #define FSMC_PCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x00) 24 + #define FSMC_PMEM(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x08) 25 + #define FSMC_PATT(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x0c) 26 + #define FSMC_PIO(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x10) 27 + #define FSMC_PECCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x14) 28 + 29 + #endif /* __ASM_ARCH_FSMC_H */
+16
arch/arm/mach-nomadik/include/mach/nand.h
··· 1 + #ifndef __ASM_ARCH_NAND_H 2 + #define __ASM_ARCH_NAND_H 3 + 4 + struct nomadik_nand_platform_data { 5 + struct mtd_partition *parts; 6 + int nparts; 7 + int options; 8 + int (*init) (void); 9 + int (*exit) (void); 10 + }; 11 + 12 + #define NAND_IO_DATA 0x40000000 13 + #define NAND_IO_CMD 0x40800000 14 + #define NAND_IO_ADDR 0x41000000 15 + 16 + #endif /* __ASM_ARCH_NAND_H */
+2 -2
arch/arm/mach-omap2/board-apollon.c
··· 87 87 }, 88 88 }; 89 89 90 - static struct flash_platform_data apollon_flash_data = { 90 + static struct onenand_platform_data apollon_flash_data = { 91 91 .parts = apollon_partitions, 92 92 .nr_parts = ARRAY_SIZE(apollon_partitions), 93 93 }; ··· 99 99 }; 100 100 101 101 static struct platform_device apollon_onenand_device = { 102 - .name = "onenand", 102 + .name = "onenand-flash", 103 103 .id = -1, 104 104 .dev = { 105 105 .platform_data = &apollon_flash_data,
+62 -1
arch/arm/mach-omap2/gpmc.c
··· 57 57 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */ 58 58 #define GPMC_SECTION_SHIFT 28 /* 128 MB */ 59 59 60 + #define PREFETCH_FIFOTHRESHOLD (0x40 << 8) 61 + #define CS_NUM_SHIFT 24 62 + #define ENABLE_PREFETCH (0x1 << 7) 63 + #define DMA_MPU_MODE 2 64 + 60 65 static struct resource gpmc_mem_root; 61 66 static struct resource gpmc_cs_mem[GPMC_CS_NUM]; 62 67 static DEFINE_SPINLOCK(gpmc_mem_lock); ··· 391 386 } 392 387 EXPORT_SYMBOL(gpmc_cs_free); 393 388 389 + /** 390 + * gpmc_prefetch_enable - configures and starts prefetch transfer 391 + * @cs: nand cs (chip select) number 392 + * @dma_mode: dma mode enable (1) or disable (0) 393 + * @u32_count: number of bytes to be transferred 394 + * @is_write: prefetch read(0) or write post(1) mode 395 + */ 396 + int gpmc_prefetch_enable(int cs, int dma_mode, 397 + unsigned int u32_count, int is_write) 398 + { 399 + uint32_t prefetch_config1; 400 + 401 + if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) { 402 + /* Set the amount of bytes to be prefetched */ 403 + gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count); 404 + 405 + /* Set dma/mpu mode, the prefetch read / post write and 406 + * enable the engine. Set which cs is has requested for. 407 + */ 408 + prefetch_config1 = ((cs << CS_NUM_SHIFT) | 409 + PREFETCH_FIFOTHRESHOLD | 410 + ENABLE_PREFETCH | 411 + (dma_mode << DMA_MPU_MODE) | 412 + (0x1 & is_write)); 413 + gpmc_write_reg(GPMC_PREFETCH_CONFIG1, prefetch_config1); 414 + } else { 415 + return -EBUSY; 416 + } 417 + /* Start the prefetch engine */ 418 + gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x1); 419 + 420 + return 0; 421 + } 422 + EXPORT_SYMBOL(gpmc_prefetch_enable); 423 + 424 + /** 425 + * gpmc_prefetch_reset - disables and stops the prefetch engine 426 + */ 427 + void gpmc_prefetch_reset(void) 428 + { 429 + /* Stop the PFPW engine */ 430 + gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x0); 431 + 432 + /* Reset/disable the PFPW engine */ 433 + gpmc_write_reg(GPMC_PREFETCH_CONFIG1, 0x0); 434 + } 435 + EXPORT_SYMBOL(gpmc_prefetch_reset); 436 + 437 + /** 438 + * gpmc_prefetch_status - reads prefetch status of engine 439 + */ 440 + int gpmc_prefetch_status(void) 441 + { 442 + return gpmc_read_reg(GPMC_PREFETCH_STATUS); 443 + } 444 + EXPORT_SYMBOL(gpmc_prefetch_status); 445 + 394 446 static void __init gpmc_mem_init(void) 395 447 { 396 448 int cs; ··· 514 452 l &= 0x03 << 3; 515 453 l |= (0x02 << 3) | (1 << 0); 516 454 gpmc_write_reg(GPMC_SYSCONFIG, l); 517 - 518 455 gpmc_mem_init(); 519 456 }
+4
arch/arm/plat-omap/include/mach/gpmc.h
··· 103 103 extern void gpmc_cs_free(int cs); 104 104 extern int gpmc_cs_set_reserved(int cs, int reserved); 105 105 extern int gpmc_cs_reserved(int cs); 106 + extern int gpmc_prefetch_enable(int cs, int dma_mode, 107 + unsigned int u32_count, int is_write); 108 + extern void gpmc_prefetch_reset(void); 109 + extern int gpmc_prefetch_status(void); 106 110 extern void __init gpmc_init(void); 107 111 108 112 #endif
+2 -2
drivers/Makefile
··· 43 43 obj-$(CONFIG_IDE) += ide/ 44 44 obj-$(CONFIG_SCSI) += scsi/ 45 45 obj-$(CONFIG_ATA) += ata/ 46 + obj-$(CONFIG_MTD) += mtd/ 47 + obj-$(CONFIG_SPI) += spi/ 46 48 obj-y += net/ 47 49 obj-$(CONFIG_ATM) += atm/ 48 50 obj-$(CONFIG_FUSION) += message/ ··· 53 51 obj-$(CONFIG_UIO) += uio/ 54 52 obj-y += cdrom/ 55 53 obj-y += auxdisplay/ 56 - obj-$(CONFIG_MTD) += mtd/ 57 - obj-$(CONFIG_SPI) += spi/ 58 54 obj-$(CONFIG_PCCARD) += pcmcia/ 59 55 obj-$(CONFIG_DIO) += dio/ 60 56 obj-$(CONFIG_SBUS) += sbus/
+8 -8
drivers/mtd/Kconfig
··· 25 25 help 26 26 Determines the verbosity level of the MTD debugging messages. 27 27 28 + config MTD_TESTS 29 + tristate "MTD tests support" 30 + depends on m 31 + help 32 + This option includes various MTD tests into compilation. The tests 33 + should normally be compiled as kernel modules. The modules perform 34 + various checks and verifications when loaded. 35 + 28 36 config MTD_CONCAT 29 37 tristate "MTD concatenating support" 30 38 help ··· 52 44 Note, however, that you don't need this option for the DiskOnChip 53 45 devices. Partitioning on NFTL 'devices' is a different - that's the 54 46 'normal' form of partitioning used on a block device. 55 - 56 - config MTD_TESTS 57 - tristate "MTD tests support" 58 - depends on m 59 - help 60 - This option includes various MTD tests into compilation. The tests 61 - should normally be compiled as kernel modules. The modules perform 62 - various checks and verifications when loaded. 63 47 64 48 config MTD_REDBOOT_PARTS 65 49 tristate "RedBoot partition table parsing"
+1 -1
drivers/mtd/afs.c
··· 239 239 parts[idx].offset = img_ptr; 240 240 parts[idx].mask_flags = 0; 241 241 242 - printk(" mtd%d: at 0x%08x, %5dKB, %8u, %s\n", 242 + printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n", 243 243 idx, img_ptr, parts[idx].size / 1024, 244 244 iis.imageNumber, str); 245 245
-11
drivers/mtd/chips/cfi_cmdset_0002.c
··· 282 282 } 283 283 } 284 284 285 - static void fixup_M29W128G_write_buffer(struct mtd_info *mtd, void *param) 286 - { 287 - struct map_info *map = mtd->priv; 288 - struct cfi_private *cfi = map->fldrv_priv; 289 - if (cfi->cfiq->BufWriteTimeoutTyp) { 290 - pr_warning("Don't use write buffer on ST flash M29W128G\n"); 291 - cfi->cfiq->BufWriteTimeoutTyp = 0; 292 - } 293 - } 294 - 295 285 static struct cfi_fixup cfi_fixup_table[] = { 296 286 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 297 287 #ifdef AMD_BOOTLOC_BUG ··· 298 308 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 299 309 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 300 310 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 301 - { CFI_MFR_ST, 0x227E, fixup_M29W128G_write_buffer, NULL, }, 302 311 #if !FORCE_WORD_WRITE 303 312 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 304 313 #endif
+4
drivers/mtd/chips/cfi_util.c
··· 81 81 { 82 82 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 83 83 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 84 + /* M29W128G flashes require an additional reset command 85 + when exit qry mode */ 86 + if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E)) 87 + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 84 88 } 85 89 EXPORT_SYMBOL_GPL(cfi_qry_mode_off); 86 90
+39 -2
drivers/mtd/chips/jedec_probe.c
··· 111 111 #define I28F320B3B 0x8897 112 112 #define I28F640B3T 0x8898 113 113 #define I28F640B3B 0x8899 114 + #define I28F640C3B 0x88CD 115 + #define I28F160F3T 0x88F3 116 + #define I28F160F3B 0x88F4 117 + #define I28F160C3T 0x88C2 118 + #define I28F160C3B 0x88C3 114 119 #define I82802AB 0x00ad 115 120 #define I82802AC 0x00ac 116 121 ··· 155 150 #define M50LPW080 0x002F 156 151 #define M50FLW080A 0x0080 157 152 #define M50FLW080B 0x0081 153 + #define PSD4256G6V 0x00e9 158 154 159 155 /* SST */ 160 156 #define SST29EE020 0x0010 ··· 207 201 MTD_UADDR_0x0555_0x02AA, 208 202 MTD_UADDR_0x0555_0x0AAA, 209 203 MTD_UADDR_0x5555_0x2AAA, 204 + MTD_UADDR_0x0AAA_0x0554, 210 205 MTD_UADDR_0x0AAA_0x0555, 211 206 MTD_UADDR_0xAAAA_0x5555, 212 207 MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */ ··· 250 243 [MTD_UADDR_0x5555_0x2AAA] = { 251 244 .addr1 = 0x5555, 252 245 .addr2 = 0x2aaa 246 + }, 247 + 248 + [MTD_UADDR_0x0AAA_0x0554] = { 249 + .addr1 = 0x0AAA, 250 + .addr2 = 0x0554 253 251 }, 254 252 255 253 [MTD_UADDR_0x0AAA_0x0555] = { ··· 1115 1103 } 1116 1104 }, { 1117 1105 .mfr_id = MANUFACTURER_INTEL, 1106 + .dev_id = I28F640C3B, 1107 + .name = "Intel 28F640C3B", 1108 + .devtypes = CFI_DEVICETYPE_X16, 1109 + .uaddr = MTD_UADDR_UNNECESSARY, 1110 + .dev_size = SIZE_8MiB, 1111 + .cmd_set = P_ID_INTEL_STD, 1112 + .nr_regions = 2, 1113 + .regions = { 1114 + ERASEINFO(0x02000, 8), 1115 + ERASEINFO(0x10000, 127), 1116 + } 1117 + }, { 1118 + .mfr_id = MANUFACTURER_INTEL, 1118 1119 .dev_id = I82802AB, 1119 1120 .name = "Intel 82802AB", 1120 1121 .devtypes = CFI_DEVICETYPE_X8, ··· 1181 1156 .mfr_id = MANUFACTURER_NEC, 1182 1157 .dev_id = UPD29F064115, 1183 1158 .name = "NEC uPD29F064115", 1184 - .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1185 - .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1159 + .devtypes = CFI_DEVICETYPE_X16, 1160 + .uaddr = MTD_UADDR_0xAAAA_0x5555, 1186 1161 .dev_size = SIZE_8MiB, 1187 1162 .cmd_set = P_ID_AMD_STD, 1188 1163 .nr_regions = 3, ··· 1749 1724 ERASEINFO(0x1000,16), 1750 1725 ERASEINFO(0x10000,13), 1751 1726 ERASEINFO(0x1000,16), 1727 + } 1728 + }, { 1729 + .mfr_id = 0xff00 | MANUFACTURER_ST, 1730 + .dev_id = 0xff00 | PSD4256G6V, 1731 + .name = "ST PSD4256G6V", 1732 + .devtypes = CFI_DEVICETYPE_X16, 1733 + .uaddr = MTD_UADDR_0x0AAA_0x0554, 1734 + .dev_size = SIZE_1MiB, 1735 + .cmd_set = P_ID_AMD_STD, 1736 + .nr_regions = 1, 1737 + .regions = { 1738 + ERASEINFO(0x10000,16), 1752 1739 } 1753 1740 }, { 1754 1741 .mfr_id = MANUFACTURER_TOSHIBA,
+10
drivers/mtd/devices/Kconfig
··· 104 104 help 105 105 This option enables FAST_READ access supported by ST M25Pxx. 106 106 107 + config MTD_SST25L 108 + tristate "Support SST25L (non JEDEC) SPI Flash chips" 109 + depends on SPI_MASTER 110 + help 111 + This enables access to the non JEDEC SST25L SPI flash chips, used 112 + for program and data storage. 113 + 114 + Set up your spi devices with the right board-specific platform data, 115 + if you want to specify device partitioning. 116 + 107 117 config MTD_SLRAM 108 118 tristate "Uncached system RAM" 109 119 help
+1
drivers/mtd/devices/Makefile
··· 16 16 obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17 17 obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18 18 obj-$(CONFIG_MTD_M25P80) += m25p80.o 19 + obj-$(CONFIG_MTD_SST25L) += sst25l.o
+4 -2
drivers/mtd/devices/lart.c
··· 393 393 * erase range is aligned with the erase size which is in 394 394 * effect here. 395 395 */ 396 - if (instr->addr & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL); 396 + if (i < 0 || (instr->addr & (mtd->eraseregions[i].erasesize - 1))) 397 + return -EINVAL; 397 398 398 399 /* Remember the erase region we start on */ 399 400 first = i; ··· 410 409 i--; 411 410 412 411 /* is the end aligned on a block boundary? */ 413 - if ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL); 412 + if (i < 0 || ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1))) 413 + return -EINVAL; 414 414 415 415 addr = instr->addr; 416 416 len = instr->len;
+135 -2
drivers/mtd/devices/m25p80.c
··· 44 44 #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ 45 45 #define OPCODE_RDID 0x9f /* Read JEDEC ID */ 46 46 47 + /* Used for SST flashes only. */ 48 + #define OPCODE_BP 0x02 /* Byte program */ 49 + #define OPCODE_WRDI 0x04 /* Write disable */ 50 + #define OPCODE_AAI_WP 0xad /* Auto address increment word program */ 51 + 47 52 /* Status Register bits. */ 48 53 #define SR_WIP 1 /* Write in progress */ 49 54 #define SR_WEL 2 /* Write enable latch */ ··· 137 132 return spi_write_then_read(flash->spi, &code, 1, NULL, 0); 138 133 } 139 134 135 + /* 136 + * Send write disble instruction to the chip. 137 + */ 138 + static inline int write_disable(struct m25p *flash) 139 + { 140 + u8 code = OPCODE_WRDI; 141 + 142 + return spi_write_then_read(flash->spi, &code, 1, NULL, 0); 143 + } 140 144 141 145 /* 142 146 * Service routine to read status register until ready, or timeout occurs. ··· 468 454 return 0; 469 455 } 470 456 457 + static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, 458 + size_t *retlen, const u_char *buf) 459 + { 460 + struct m25p *flash = mtd_to_m25p(mtd); 461 + struct spi_transfer t[2]; 462 + struct spi_message m; 463 + size_t actual; 464 + int cmd_sz, ret; 465 + 466 + if (retlen) 467 + *retlen = 0; 468 + 469 + /* sanity checks */ 470 + if (!len) 471 + return 0; 472 + 473 + if (to + len > flash->mtd.size) 474 + return -EINVAL; 475 + 476 + spi_message_init(&m); 477 + memset(t, 0, (sizeof t)); 478 + 479 + t[0].tx_buf = flash->command; 480 + t[0].len = CMD_SIZE; 481 + spi_message_add_tail(&t[0], &m); 482 + 483 + t[1].tx_buf = buf; 484 + spi_message_add_tail(&t[1], &m); 485 + 486 + mutex_lock(&flash->lock); 487 + 488 + /* Wait until finished previous write command. */ 489 + ret = wait_till_ready(flash); 490 + if (ret) 491 + goto time_out; 492 + 493 + write_enable(flash); 494 + 495 + actual = to % 2; 496 + /* Start write from odd address. */ 497 + if (actual) { 498 + flash->command[0] = OPCODE_BP; 499 + flash->command[1] = to >> 16; 500 + flash->command[2] = to >> 8; 501 + flash->command[3] = to; 502 + 503 + /* write one byte. */ 504 + t[1].len = 1; 505 + spi_sync(flash->spi, &m); 506 + ret = wait_till_ready(flash); 507 + if (ret) 508 + goto time_out; 509 + *retlen += m.actual_length - CMD_SIZE; 510 + } 511 + to += actual; 512 + 513 + flash->command[0] = OPCODE_AAI_WP; 514 + flash->command[1] = to >> 16; 515 + flash->command[2] = to >> 8; 516 + flash->command[3] = to; 517 + 518 + /* Write out most of the data here. */ 519 + cmd_sz = CMD_SIZE; 520 + for (; actual < len - 1; actual += 2) { 521 + t[0].len = cmd_sz; 522 + /* write two bytes. */ 523 + t[1].len = 2; 524 + t[1].tx_buf = buf + actual; 525 + 526 + spi_sync(flash->spi, &m); 527 + ret = wait_till_ready(flash); 528 + if (ret) 529 + goto time_out; 530 + *retlen += m.actual_length - cmd_sz; 531 + cmd_sz = 1; 532 + to += 2; 533 + } 534 + write_disable(flash); 535 + ret = wait_till_ready(flash); 536 + if (ret) 537 + goto time_out; 538 + 539 + /* Write out trailing byte if it exists. */ 540 + if (actual != len) { 541 + write_enable(flash); 542 + flash->command[0] = OPCODE_BP; 543 + flash->command[1] = to >> 16; 544 + flash->command[2] = to >> 8; 545 + flash->command[3] = to; 546 + t[0].len = CMD_SIZE; 547 + t[1].len = 1; 548 + t[1].tx_buf = buf + actual; 549 + 550 + spi_sync(flash->spi, &m); 551 + ret = wait_till_ready(flash); 552 + if (ret) 553 + goto time_out; 554 + *retlen += m.actual_length - CMD_SIZE; 555 + write_disable(flash); 556 + } 557 + 558 + time_out: 559 + mutex_unlock(&flash->lock); 560 + return ret; 561 + } 471 562 472 563 /****************************************************************************/ 473 564 ··· 620 501 { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, 621 502 622 503 /* Macronix */ 504 + { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, }, 505 + { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, }, 623 506 { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, }, 507 + { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, }, 624 508 625 509 /* Spansion -- single (large) sector size only, at least 626 510 * for the chips listed here (without boot sectors). ··· 633 511 { "s25sl016a", 0x010214, 0, 64 * 1024, 32, }, 634 512 { "s25sl032a", 0x010215, 0, 64 * 1024, 64, }, 635 513 { "s25sl064a", 0x010216, 0, 64 * 1024, 128, }, 636 - { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, 514 + { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, 637 515 { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, }, 516 + { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, }, 517 + { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, }, 638 518 639 519 /* SST -- large erase sizes are "overlays", "sectors" are 4K */ 640 520 { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, }, 641 521 { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, }, 642 522 { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, }, 643 523 { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, }, 524 + { "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, }, 525 + { "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, }, 526 + { "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, }, 527 + { "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, }, 644 528 645 529 /* ST Microelectronics -- newer production may have feature updates */ 646 530 { "m25p05", 0x202010, 0, 32 * 1024, 2, }, ··· 795 667 flash->mtd.size = info->sector_size * info->n_sectors; 796 668 flash->mtd.erase = m25p80_erase; 797 669 flash->mtd.read = m25p80_read; 798 - flash->mtd.write = m25p80_write; 670 + 671 + /* sst flash chips use AAI word program */ 672 + if (info->jedec_id >> 16 == 0xbf) 673 + flash->mtd.write = sst_write; 674 + else 675 + flash->mtd.write = m25p80_write; 799 676 800 677 /* prefer "small sector" erase if possible */ 801 678 if (info->flags & SECT_4K) {
+2 -2
drivers/mtd/devices/mtd_dataflash.c
··· 401 401 (void) dataflash_waitready(priv->spi); 402 402 403 403 404 - #ifdef CONFIG_MTD_DATAFLASH_VERIFY_WRITE 404 + #ifdef CONFIG_MTD_DATAFLASH_WRITE_VERIFY 405 405 406 406 /* (3) Compare to Buffer1 */ 407 407 addr = pageaddr << priv->page_offset; ··· 430 430 } else 431 431 status = 0; 432 432 433 - #endif /* CONFIG_MTD_DATAFLASH_VERIFY_WRITE */ 433 + #endif /* CONFIG_MTD_DATAFLASH_WRITE_VERIFY */ 434 434 435 435 remaining = remaining - writelen; 436 436 pageaddr++;
+12 -13
drivers/mtd/devices/phram.c
··· 14 14 * Example: 15 15 * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi 16 16 */ 17 + 18 + #define pr_fmt(fmt) "phram: " fmt 19 + 17 20 #include <asm/io.h> 18 21 #include <linux/init.h> 19 22 #include <linux/kernel.h> ··· 25 22 #include <linux/moduleparam.h> 26 23 #include <linux/slab.h> 27 24 #include <linux/mtd/mtd.h> 28 - 29 - #define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args) 30 25 31 26 struct phram_mtd_list { 32 27 struct mtd_info mtd; ··· 133 132 ret = -EIO; 134 133 new->mtd.priv = ioremap(start, len); 135 134 if (!new->mtd.priv) { 136 - ERROR("ioremap failed\n"); 135 + pr_err("ioremap failed\n"); 137 136 goto out1; 138 137 } 139 138 ··· 153 152 154 153 ret = -EAGAIN; 155 154 if (add_mtd_device(&new->mtd)) { 156 - ERROR("Failed to register new device\n"); 155 + pr_err("Failed to register new device\n"); 157 156 goto out2; 158 157 } 159 158 ··· 228 227 229 228 230 229 #define parse_err(fmt, args...) do { \ 231 - ERROR(fmt , ## args); \ 232 - return 0; \ 230 + pr_err(fmt , ## args); \ 231 + return 1; \ 233 232 } while (0) 234 233 235 234 static int phram_setup(const char *val, struct kernel_param *kp) ··· 257 256 parse_err("not enough arguments\n"); 258 257 259 258 ret = parse_name(&name, token[0]); 260 - if (ret == -ENOMEM) 261 - parse_err("out of memory\n"); 262 - if (ret == -ENOSPC) 263 - parse_err("name too long\n"); 264 259 if (ret) 265 - return 0; 260 + return ret; 266 261 267 262 ret = parse_num32(&start, token[1]); 268 263 if (ret) { ··· 272 275 parse_err("illegal device length\n"); 273 276 } 274 277 275 - register_device(name, start, len); 278 + ret = register_device(name, start, len); 279 + if (!ret) 280 + pr_info("%s device: %#x at %#x\n", name, len, start); 276 281 277 - return 0; 282 + return ret; 278 283 } 279 284 280 285 module_param_call(phram, phram_setup, NULL, NULL, 000);
+1 -1
drivers/mtd/devices/slram.c
··· 341 341 #else 342 342 int count; 343 343 344 - for (count = 0; (map[count]) && (count < SLRAM_MAX_DEVICES_PARAMS); 344 + for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count]; 345 345 count++) { 346 346 } 347 347
+512
drivers/mtd/devices/sst25l.c
··· 1 + /* 2 + * sst25l.c 3 + * 4 + * Driver for SST25L SPI Flash chips 5 + * 6 + * Copyright © 2009 Bluewater Systems Ltd 7 + * Author: Andre Renaud <andre@bluewatersys.com> 8 + * Author: Ryan Mallon <ryan@bluewatersys.com> 9 + * 10 + * Based on m25p80.c 11 + * 12 + * This code is free software; you can redistribute it and/or modify 13 + * it under the terms of the GNU General Public License version 2 as 14 + * published by the Free Software Foundation. 15 + * 16 + */ 17 + 18 + #include <linux/init.h> 19 + #include <linux/module.h> 20 + #include <linux/device.h> 21 + #include <linux/mutex.h> 22 + #include <linux/interrupt.h> 23 + 24 + #include <linux/mtd/mtd.h> 25 + #include <linux/mtd/partitions.h> 26 + 27 + #include <linux/spi/spi.h> 28 + #include <linux/spi/flash.h> 29 + 30 + /* Erases can take up to 3 seconds! */ 31 + #define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000) 32 + 33 + #define SST25L_CMD_WRSR 0x01 /* Write status register */ 34 + #define SST25L_CMD_WRDI 0x04 /* Write disable */ 35 + #define SST25L_CMD_RDSR 0x05 /* Read status register */ 36 + #define SST25L_CMD_WREN 0x06 /* Write enable */ 37 + #define SST25L_CMD_READ 0x03 /* High speed read */ 38 + 39 + #define SST25L_CMD_EWSR 0x50 /* Enable write status register */ 40 + #define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */ 41 + #define SST25L_CMD_READ_ID 0x90 /* Read device ID */ 42 + #define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */ 43 + 44 + #define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */ 45 + #define SST25L_STATUS_WREN (1 << 1) /* Write enabled */ 46 + #define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */ 47 + #define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */ 48 + 49 + struct sst25l_flash { 50 + struct spi_device *spi; 51 + struct mutex lock; 52 + struct mtd_info mtd; 53 + 54 + int partitioned; 55 + }; 56 + 57 + struct flash_info { 58 + const char *name; 59 + uint16_t device_id; 60 + unsigned page_size; 61 + unsigned nr_pages; 62 + unsigned erase_size; 63 + }; 64 + 65 + #define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) 66 + 67 + static struct flash_info __initdata sst25l_flash_info[] = { 68 + {"sst25lf020a", 0xbf43, 256, 1024, 4096}, 69 + {"sst25lf040a", 0xbf44, 256, 2048, 4096}, 70 + }; 71 + 72 + static int sst25l_status(struct sst25l_flash *flash, int *status) 73 + { 74 + unsigned char command, response; 75 + int err; 76 + 77 + command = SST25L_CMD_RDSR; 78 + err = spi_write_then_read(flash->spi, &command, 1, &response, 1); 79 + if (err < 0) 80 + return err; 81 + 82 + *status = response; 83 + return 0; 84 + } 85 + 86 + static int sst25l_write_enable(struct sst25l_flash *flash, int enable) 87 + { 88 + unsigned char command[2]; 89 + int status, err; 90 + 91 + command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI; 92 + err = spi_write(flash->spi, command, 1); 93 + if (err) 94 + return err; 95 + 96 + command[0] = SST25L_CMD_EWSR; 97 + err = spi_write(flash->spi, command, 1); 98 + if (err) 99 + return err; 100 + 101 + command[0] = SST25L_CMD_WRSR; 102 + command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1; 103 + err = spi_write(flash->spi, command, 2); 104 + if (err) 105 + return err; 106 + 107 + if (enable) { 108 + err = sst25l_status(flash, &status); 109 + if (err) 110 + return err; 111 + if (!(status & SST25L_STATUS_WREN)) 112 + return -EROFS; 113 + } 114 + 115 + return 0; 116 + } 117 + 118 + static int sst25l_wait_till_ready(struct sst25l_flash *flash) 119 + { 120 + unsigned long deadline; 121 + int status, err; 122 + 123 + deadline = jiffies + MAX_READY_WAIT_JIFFIES; 124 + do { 125 + err = sst25l_status(flash, &status); 126 + if (err) 127 + return err; 128 + if (!(status & SST25L_STATUS_BUSY)) 129 + return 0; 130 + 131 + cond_resched(); 132 + } while (!time_after_eq(jiffies, deadline)); 133 + 134 + return -ETIMEDOUT; 135 + } 136 + 137 + static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset) 138 + { 139 + unsigned char command[4]; 140 + int err; 141 + 142 + err = sst25l_write_enable(flash, 1); 143 + if (err) 144 + return err; 145 + 146 + command[0] = SST25L_CMD_SECTOR_ERASE; 147 + command[1] = offset >> 16; 148 + command[2] = offset >> 8; 149 + command[3] = offset; 150 + err = spi_write(flash->spi, command, 4); 151 + if (err) 152 + return err; 153 + 154 + err = sst25l_wait_till_ready(flash); 155 + if (err) 156 + return err; 157 + 158 + return sst25l_write_enable(flash, 0); 159 + } 160 + 161 + static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr) 162 + { 163 + struct sst25l_flash *flash = to_sst25l_flash(mtd); 164 + uint32_t addr, end; 165 + int err; 166 + 167 + /* Sanity checks */ 168 + if (instr->addr + instr->len > flash->mtd.size) 169 + return -EINVAL; 170 + 171 + if ((uint32_t)instr->len % mtd->erasesize) 172 + return -EINVAL; 173 + 174 + if ((uint32_t)instr->addr % mtd->erasesize) 175 + return -EINVAL; 176 + 177 + addr = instr->addr; 178 + end = addr + instr->len; 179 + 180 + mutex_lock(&flash->lock); 181 + 182 + err = sst25l_wait_till_ready(flash); 183 + if (err) { 184 + mutex_unlock(&flash->lock); 185 + return err; 186 + } 187 + 188 + while (addr < end) { 189 + err = sst25l_erase_sector(flash, addr); 190 + if (err) { 191 + mutex_unlock(&flash->lock); 192 + instr->state = MTD_ERASE_FAILED; 193 + dev_err(&flash->spi->dev, "Erase failed\n"); 194 + return err; 195 + } 196 + 197 + addr += mtd->erasesize; 198 + } 199 + 200 + mutex_unlock(&flash->lock); 201 + 202 + instr->state = MTD_ERASE_DONE; 203 + mtd_erase_callback(instr); 204 + return 0; 205 + } 206 + 207 + static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len, 208 + size_t *retlen, unsigned char *buf) 209 + { 210 + struct sst25l_flash *flash = to_sst25l_flash(mtd); 211 + struct spi_transfer transfer[2]; 212 + struct spi_message message; 213 + unsigned char command[4]; 214 + int ret; 215 + 216 + /* Sanity checking */ 217 + if (len == 0) 218 + return 0; 219 + 220 + if (from + len > flash->mtd.size) 221 + return -EINVAL; 222 + 223 + if (retlen) 224 + *retlen = 0; 225 + 226 + spi_message_init(&message); 227 + memset(&transfer, 0, sizeof(transfer)); 228 + 229 + command[0] = SST25L_CMD_READ; 230 + command[1] = from >> 16; 231 + command[2] = from >> 8; 232 + command[3] = from; 233 + 234 + transfer[0].tx_buf = command; 235 + transfer[0].len = sizeof(command); 236 + spi_message_add_tail(&transfer[0], &message); 237 + 238 + transfer[1].rx_buf = buf; 239 + transfer[1].len = len; 240 + spi_message_add_tail(&transfer[1], &message); 241 + 242 + mutex_lock(&flash->lock); 243 + 244 + /* Wait for previous write/erase to complete */ 245 + ret = sst25l_wait_till_ready(flash); 246 + if (ret) { 247 + mutex_unlock(&flash->lock); 248 + return ret; 249 + } 250 + 251 + spi_sync(flash->spi, &message); 252 + 253 + if (retlen && message.actual_length > sizeof(command)) 254 + *retlen += message.actual_length - sizeof(command); 255 + 256 + mutex_unlock(&flash->lock); 257 + return 0; 258 + } 259 + 260 + static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len, 261 + size_t *retlen, const unsigned char *buf) 262 + { 263 + struct sst25l_flash *flash = to_sst25l_flash(mtd); 264 + int i, j, ret, bytes, copied = 0; 265 + unsigned char command[5]; 266 + 267 + /* Sanity checks */ 268 + if (!len) 269 + return 0; 270 + 271 + if (to + len > flash->mtd.size) 272 + return -EINVAL; 273 + 274 + if ((uint32_t)to % mtd->writesize) 275 + return -EINVAL; 276 + 277 + mutex_lock(&flash->lock); 278 + 279 + ret = sst25l_write_enable(flash, 1); 280 + if (ret) 281 + goto out; 282 + 283 + for (i = 0; i < len; i += mtd->writesize) { 284 + ret = sst25l_wait_till_ready(flash); 285 + if (ret) 286 + goto out; 287 + 288 + /* Write the first byte of the page */ 289 + command[0] = SST25L_CMD_AAI_PROGRAM; 290 + command[1] = (to + i) >> 16; 291 + command[2] = (to + i) >> 8; 292 + command[3] = (to + i); 293 + command[4] = buf[i]; 294 + ret = spi_write(flash->spi, command, 5); 295 + if (ret < 0) 296 + goto out; 297 + copied++; 298 + 299 + /* 300 + * Write the remaining bytes using auto address 301 + * increment mode 302 + */ 303 + bytes = min_t(uint32_t, mtd->writesize, len - i); 304 + for (j = 1; j < bytes; j++, copied++) { 305 + ret = sst25l_wait_till_ready(flash); 306 + if (ret) 307 + goto out; 308 + 309 + command[1] = buf[i + j]; 310 + ret = spi_write(flash->spi, command, 2); 311 + if (ret) 312 + goto out; 313 + } 314 + } 315 + 316 + out: 317 + ret = sst25l_write_enable(flash, 0); 318 + 319 + if (retlen) 320 + *retlen = copied; 321 + 322 + mutex_unlock(&flash->lock); 323 + return ret; 324 + } 325 + 326 + static struct flash_info *__init sst25l_match_device(struct spi_device *spi) 327 + { 328 + struct flash_info *flash_info = NULL; 329 + unsigned char command[4], response; 330 + int i, err; 331 + uint16_t id; 332 + 333 + command[0] = SST25L_CMD_READ_ID; 334 + command[1] = 0; 335 + command[2] = 0; 336 + command[3] = 0; 337 + err = spi_write_then_read(spi, command, sizeof(command), &response, 1); 338 + if (err < 0) { 339 + dev_err(&spi->dev, "error reading device id msb\n"); 340 + return NULL; 341 + } 342 + 343 + id = response << 8; 344 + 345 + command[0] = SST25L_CMD_READ_ID; 346 + command[1] = 0; 347 + command[2] = 0; 348 + command[3] = 1; 349 + err = spi_write_then_read(spi, command, sizeof(command), &response, 1); 350 + if (err < 0) { 351 + dev_err(&spi->dev, "error reading device id lsb\n"); 352 + return NULL; 353 + } 354 + 355 + id |= response; 356 + 357 + for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++) 358 + if (sst25l_flash_info[i].device_id == id) 359 + flash_info = &sst25l_flash_info[i]; 360 + 361 + if (!flash_info) 362 + dev_err(&spi->dev, "unknown id %.4x\n", id); 363 + 364 + return flash_info; 365 + } 366 + 367 + static int __init sst25l_probe(struct spi_device *spi) 368 + { 369 + struct flash_info *flash_info; 370 + struct sst25l_flash *flash; 371 + struct flash_platform_data *data; 372 + int ret, i; 373 + 374 + flash_info = sst25l_match_device(spi); 375 + if (!flash_info) 376 + return -ENODEV; 377 + 378 + flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL); 379 + if (!flash) 380 + return -ENOMEM; 381 + 382 + flash->spi = spi; 383 + mutex_init(&flash->lock); 384 + dev_set_drvdata(&spi->dev, flash); 385 + 386 + data = spi->dev.platform_data; 387 + if (data && data->name) 388 + flash->mtd.name = data->name; 389 + else 390 + flash->mtd.name = dev_name(&spi->dev); 391 + 392 + flash->mtd.type = MTD_NORFLASH; 393 + flash->mtd.flags = MTD_CAP_NORFLASH; 394 + flash->mtd.erasesize = flash_info->erase_size; 395 + flash->mtd.writesize = flash_info->page_size; 396 + flash->mtd.size = flash_info->page_size * flash_info->nr_pages; 397 + flash->mtd.erase = sst25l_erase; 398 + flash->mtd.read = sst25l_read; 399 + flash->mtd.write = sst25l_write; 400 + 401 + dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, 402 + (long long)flash->mtd.size >> 10); 403 + 404 + DEBUG(MTD_DEBUG_LEVEL2, 405 + "mtd .name = %s, .size = 0x%llx (%lldMiB) " 406 + ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 407 + flash->mtd.name, 408 + (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), 409 + flash->mtd.erasesize, flash->mtd.erasesize / 1024, 410 + flash->mtd.numeraseregions); 411 + 412 + if (flash->mtd.numeraseregions) 413 + for (i = 0; i < flash->mtd.numeraseregions; i++) 414 + DEBUG(MTD_DEBUG_LEVEL2, 415 + "mtd.eraseregions[%d] = { .offset = 0x%llx, " 416 + ".erasesize = 0x%.8x (%uKiB), " 417 + ".numblocks = %d }\n", 418 + i, (long long)flash->mtd.eraseregions[i].offset, 419 + flash->mtd.eraseregions[i].erasesize, 420 + flash->mtd.eraseregions[i].erasesize / 1024, 421 + flash->mtd.eraseregions[i].numblocks); 422 + 423 + if (mtd_has_partitions()) { 424 + struct mtd_partition *parts = NULL; 425 + int nr_parts = 0; 426 + 427 + if (mtd_has_cmdlinepart()) { 428 + static const char *part_probes[] = 429 + {"cmdlinepart", NULL}; 430 + 431 + nr_parts = parse_mtd_partitions(&flash->mtd, 432 + part_probes, 433 + &parts, 0); 434 + } 435 + 436 + if (nr_parts <= 0 && data && data->parts) { 437 + parts = data->parts; 438 + nr_parts = data->nr_parts; 439 + } 440 + 441 + if (nr_parts > 0) { 442 + for (i = 0; i < nr_parts; i++) { 443 + DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 444 + "{.name = %s, .offset = 0x%llx, " 445 + ".size = 0x%llx (%lldKiB) }\n", 446 + i, parts[i].name, 447 + (long long)parts[i].offset, 448 + (long long)parts[i].size, 449 + (long long)(parts[i].size >> 10)); 450 + } 451 + 452 + flash->partitioned = 1; 453 + return add_mtd_partitions(&flash->mtd, 454 + parts, nr_parts); 455 + } 456 + 457 + } else if (data->nr_parts) { 458 + dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 459 + data->nr_parts, data->name); 460 + } 461 + 462 + ret = add_mtd_device(&flash->mtd); 463 + if (ret == 1) { 464 + kfree(flash); 465 + dev_set_drvdata(&spi->dev, NULL); 466 + return -ENODEV; 467 + } 468 + 469 + return 0; 470 + } 471 + 472 + static int __exit sst25l_remove(struct spi_device *spi) 473 + { 474 + struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); 475 + int ret; 476 + 477 + if (mtd_has_partitions() && flash->partitioned) 478 + ret = del_mtd_partitions(&flash->mtd); 479 + else 480 + ret = del_mtd_device(&flash->mtd); 481 + if (ret == 0) 482 + kfree(flash); 483 + return ret; 484 + } 485 + 486 + static struct spi_driver sst25l_driver = { 487 + .driver = { 488 + .name = "sst25l", 489 + .bus = &spi_bus_type, 490 + .owner = THIS_MODULE, 491 + }, 492 + .probe = sst25l_probe, 493 + .remove = __exit_p(sst25l_remove), 494 + }; 495 + 496 + static int __init sst25l_init(void) 497 + { 498 + return spi_register_driver(&sst25l_driver); 499 + } 500 + 501 + static void __exit sst25l_exit(void) 502 + { 503 + spi_unregister_driver(&sst25l_driver); 504 + } 505 + 506 + module_init(sst25l_init); 507 + module_exit(sst25l_exit); 508 + 509 + MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); 510 + MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " 511 + "Ryan Mallon <ryan@bluewatersys.com>"); 512 + MODULE_LICENSE("GPL");
+1 -1
drivers/mtd/inftlcore.c
··· 550 550 * waiting to be picked up. We're going to have to fold 551 551 * a chain to make room. 552 552 */ 553 - thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL); 553 + thisEUN = INFTL_makefreeblock(inftl, block); 554 554 555 555 /* 556 556 * Hopefully we free something, lets try again.
+11 -1
drivers/mtd/maps/Kconfig
··· 484 484 485 485 If compiled as a module, it will be called bfin-async-flash. 486 486 487 + config MTD_GPIO_ADDR 488 + tristate "GPIO-assisted Flash Chip Support" 489 + depends on MTD_COMPLEX_MAPPINGS 490 + select MTD_PARTITIONS 491 + help 492 + Map driver which allows flashes to be partially physically addressed 493 + and assisted by GPIOs. 494 + 495 + If compiled as a module, it will be called gpio-addr-flash. 496 + 487 497 config MTD_UCLINUX 488 498 bool "Generic uClinux RAM/ROM filesystem support" 489 - depends on MTD_PARTITIONS && MTD_RAM && !MMU 499 + depends on MTD_PARTITIONS && MTD_RAM=y && !MMU 490 500 help 491 501 Map driver to support image based filesystems for uClinux. 492 502
+1 -2
drivers/mtd/maps/Makefile
··· 58 58 obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o 59 59 obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o 60 60 obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o 61 - obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o 62 - obj-$(CONFIG_MTD_VMU) += vmu-flash.o 61 + obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
+311
drivers/mtd/maps/gpio-addr-flash.c
··· 1 + /* 2 + * drivers/mtd/maps/gpio-addr-flash.c 3 + * 4 + * Handle the case where a flash device is mostly addressed using physical 5 + * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash 6 + * to a 2MiB memory range and use the GPIOs to select a particular range. 7 + * 8 + * Copyright © 2000 Nicolas Pitre <nico@cam.org> 9 + * Copyright © 2005-2009 Analog Devices Inc. 10 + * 11 + * Enter bugs at http://blackfin.uclinux.org/ 12 + * 13 + * Licensed under the GPL-2 or later. 14 + */ 15 + 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/mtd/mtd.h> 20 + #include <linux/mtd/map.h> 21 + #include <linux/mtd/partitions.h> 22 + #include <linux/mtd/physmap.h> 23 + #include <linux/platform_device.h> 24 + #include <linux/types.h> 25 + 26 + #include <asm/gpio.h> 27 + #include <asm/io.h> 28 + 29 + #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 30 + 31 + #define DRIVER_NAME "gpio-addr-flash" 32 + #define PFX DRIVER_NAME ": " 33 + 34 + /** 35 + * struct async_state - keep GPIO flash state 36 + * @mtd: MTD state for this mapping 37 + * @map: MTD map state for this flash 38 + * @gpio_count: number of GPIOs used to address 39 + * @gpio_addrs: array of GPIOs to twiddle 40 + * @gpio_values: cached GPIO values 41 + * @win_size: dedicated memory size (if no GPIOs) 42 + */ 43 + struct async_state { 44 + struct mtd_info *mtd; 45 + struct map_info map; 46 + size_t gpio_count; 47 + unsigned *gpio_addrs; 48 + int *gpio_values; 49 + unsigned long win_size; 50 + }; 51 + #define gf_map_info_to_state(mi) ((struct async_state *)(mi)->map_priv_1) 52 + 53 + /** 54 + * gf_set_gpios() - set GPIO address lines to access specified flash offset 55 + * @state: GPIO flash state 56 + * @ofs: desired offset to access 57 + * 58 + * Rather than call the GPIO framework every time, cache the last-programmed 59 + * value. This speeds up sequential accesses (which are by far the most common 60 + * type). We rely on the GPIO framework to treat non-zero value as high so 61 + * that we don't have to normalize the bits. 62 + */ 63 + static void gf_set_gpios(struct async_state *state, unsigned long ofs) 64 + { 65 + size_t i = 0; 66 + int value; 67 + ofs /= state->win_size; 68 + do { 69 + value = ofs & (1 << i); 70 + if (state->gpio_values[i] != value) { 71 + gpio_set_value(state->gpio_addrs[i], value); 72 + state->gpio_values[i] = value; 73 + } 74 + } while (++i < state->gpio_count); 75 + } 76 + 77 + /** 78 + * gf_read() - read a word at the specified offset 79 + * @map: MTD map state 80 + * @ofs: desired offset to read 81 + */ 82 + static map_word gf_read(struct map_info *map, unsigned long ofs) 83 + { 84 + struct async_state *state = gf_map_info_to_state(map); 85 + uint16_t word; 86 + map_word test; 87 + 88 + gf_set_gpios(state, ofs); 89 + 90 + word = readw(map->virt + (ofs % state->win_size)); 91 + test.x[0] = word; 92 + return test; 93 + } 94 + 95 + /** 96 + * gf_copy_from() - copy a chunk of data from the flash 97 + * @map: MTD map state 98 + * @to: memory to copy to 99 + * @from: flash offset to copy from 100 + * @len: how much to copy 101 + * 102 + * We rely on the MTD layer to chunk up copies such that a single request here 103 + * will not cross a window size. This allows us to only wiggle the GPIOs once 104 + * before falling back to a normal memcpy. Reading the higher layer code shows 105 + * that this is indeed the case, but add a BUG_ON() to future proof. 106 + */ 107 + static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 108 + { 109 + struct async_state *state = gf_map_info_to_state(map); 110 + 111 + gf_set_gpios(state, from); 112 + 113 + /* BUG if operation crosses the win_size */ 114 + BUG_ON(!((from + len) % state->win_size <= (from + len))); 115 + 116 + /* operation does not cross the win_size, so one shot it */ 117 + memcpy_fromio(to, map->virt + (from % state->win_size), len); 118 + } 119 + 120 + /** 121 + * gf_write() - write a word at the specified offset 122 + * @map: MTD map state 123 + * @ofs: desired offset to write 124 + */ 125 + static void gf_write(struct map_info *map, map_word d1, unsigned long ofs) 126 + { 127 + struct async_state *state = gf_map_info_to_state(map); 128 + uint16_t d; 129 + 130 + gf_set_gpios(state, ofs); 131 + 132 + d = d1.x[0]; 133 + writew(d, map->virt + (ofs % state->win_size)); 134 + } 135 + 136 + /** 137 + * gf_copy_to() - copy a chunk of data to the flash 138 + * @map: MTD map state 139 + * @to: flash offset to copy to 140 + * @from: memory to copy from 141 + * @len: how much to copy 142 + * 143 + * See gf_copy_from() caveat. 144 + */ 145 + static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 146 + { 147 + struct async_state *state = gf_map_info_to_state(map); 148 + 149 + gf_set_gpios(state, to); 150 + 151 + /* BUG if operation crosses the win_size */ 152 + BUG_ON(!((to + len) % state->win_size <= (to + len))); 153 + 154 + /* operation does not cross the win_size, so one shot it */ 155 + memcpy_toio(map->virt + (to % state->win_size), from, len); 156 + } 157 + 158 + #ifdef CONFIG_MTD_PARTITIONS 159 + static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 160 + #endif 161 + 162 + /** 163 + * gpio_flash_probe() - setup a mapping for a GPIO assisted flash 164 + * @pdev: platform device 165 + * 166 + * The platform resource layout expected looks something like: 167 + * struct mtd_partition partitions[] = { ... }; 168 + * struct physmap_flash_data flash_data = { ... }; 169 + * unsigned flash_gpios[] = { GPIO_XX, GPIO_XX, ... }; 170 + * struct resource flash_resource[] = { 171 + * { 172 + * .name = "cfi_probe", 173 + * .start = 0x20000000, 174 + * .end = 0x201fffff, 175 + * .flags = IORESOURCE_MEM, 176 + * }, { 177 + * .start = (unsigned long)flash_gpios, 178 + * .end = ARRAY_SIZE(flash_gpios), 179 + * .flags = IORESOURCE_IRQ, 180 + * } 181 + * }; 182 + * struct platform_device flash_device = { 183 + * .name = "gpio-addr-flash", 184 + * .dev = { .platform_data = &flash_data, }, 185 + * .num_resources = ARRAY_SIZE(flash_resource), 186 + * .resource = flash_resource, 187 + * ... 188 + * }; 189 + */ 190 + static int __devinit gpio_flash_probe(struct platform_device *pdev) 191 + { 192 + int ret; 193 + size_t i, arr_size; 194 + struct physmap_flash_data *pdata; 195 + struct resource *memory; 196 + struct resource *gpios; 197 + struct async_state *state; 198 + 199 + pdata = pdev->dev.platform_data; 200 + memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); 201 + gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 202 + 203 + if (!memory || !gpios || !gpios->end) 204 + return -EINVAL; 205 + 206 + arr_size = sizeof(int) * gpios->end; 207 + state = kzalloc(sizeof(*state) + arr_size, GFP_KERNEL); 208 + if (!state) 209 + return -ENOMEM; 210 + 211 + state->gpio_count = gpios->end; 212 + state->gpio_addrs = (void *)gpios->start; 213 + state->gpio_values = (void *)(state + 1); 214 + state->win_size = memory->end - memory->start + 1; 215 + memset(state->gpio_values, 0xff, arr_size); 216 + 217 + state->map.name = DRIVER_NAME; 218 + state->map.read = gf_read; 219 + state->map.copy_from = gf_copy_from; 220 + state->map.write = gf_write; 221 + state->map.copy_to = gf_copy_to; 222 + state->map.bankwidth = pdata->width; 223 + state->map.size = state->win_size * (1 << state->gpio_count); 224 + state->map.virt = (void __iomem *)memory->start; 225 + state->map.phys = NO_XIP; 226 + state->map.map_priv_1 = (unsigned long)state; 227 + 228 + platform_set_drvdata(pdev, state); 229 + 230 + i = 0; 231 + do { 232 + if (gpio_request(state->gpio_addrs[i], DRIVER_NAME)) { 233 + pr_devinit(KERN_ERR PFX "failed to request gpio %d\n", 234 + state->gpio_addrs[i]); 235 + while (i--) 236 + gpio_free(state->gpio_addrs[i]); 237 + kfree(state); 238 + return -EBUSY; 239 + } 240 + gpio_direction_output(state->gpio_addrs[i], 0); 241 + } while (++i < state->gpio_count); 242 + 243 + pr_devinit(KERN_NOTICE PFX "probing %d-bit flash bus\n", 244 + state->map.bankwidth * 8); 245 + state->mtd = do_map_probe(memory->name, &state->map); 246 + if (!state->mtd) { 247 + for (i = 0; i < state->gpio_count; ++i) 248 + gpio_free(state->gpio_addrs[i]); 249 + kfree(state); 250 + return -ENXIO; 251 + } 252 + 253 + #ifdef CONFIG_MTD_PARTITIONS 254 + ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 255 + if (ret > 0) { 256 + pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n"); 257 + add_mtd_partitions(state->mtd, pdata->parts, ret); 258 + kfree(pdata->parts); 259 + 260 + } else if (pdata->nr_parts) { 261 + pr_devinit(KERN_NOTICE PFX "Using board partition definition\n"); 262 + add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); 263 + 264 + } else 265 + #endif 266 + { 267 + pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n"); 268 + add_mtd_device(state->mtd); 269 + } 270 + 271 + return 0; 272 + } 273 + 274 + static int __devexit gpio_flash_remove(struct platform_device *pdev) 275 + { 276 + struct async_state *state = platform_get_drvdata(pdev); 277 + size_t i = 0; 278 + do { 279 + gpio_free(state->gpio_addrs[i]); 280 + } while (++i < state->gpio_count); 281 + #ifdef CONFIG_MTD_PARTITIONS 282 + del_mtd_partitions(state->mtd); 283 + #endif 284 + map_destroy(state->mtd); 285 + kfree(state); 286 + return 0; 287 + } 288 + 289 + static struct platform_driver gpio_flash_driver = { 290 + .probe = gpio_flash_probe, 291 + .remove = __devexit_p(gpio_flash_remove), 292 + .driver = { 293 + .name = DRIVER_NAME, 294 + }, 295 + }; 296 + 297 + static int __init gpio_flash_init(void) 298 + { 299 + return platform_driver_register(&gpio_flash_driver); 300 + } 301 + module_init(gpio_flash_init); 302 + 303 + static void __exit gpio_flash_exit(void) 304 + { 305 + platform_driver_unregister(&gpio_flash_driver); 306 + } 307 + module_exit(gpio_flash_exit); 308 + 309 + MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>"); 310 + MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios"); 311 + MODULE_LICENSE("GPL");
+15 -9
drivers/mtd/maps/physmap_of.c
··· 190 190 const u32 *p; 191 191 int reg_tuple_size; 192 192 struct mtd_info **mtd_list = NULL; 193 + resource_size_t res_size; 193 194 194 195 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 195 196 ··· 205 204 dev_err(&dev->dev, "Malformed reg property on %s\n", 206 205 dev->node->full_name); 207 206 err = -EINVAL; 208 - goto err_out; 207 + goto err_flash_remove; 209 208 } 210 209 count /= reg_tuple_size; 211 210 ··· 213 212 info = kzalloc(sizeof(struct of_flash) + 214 213 sizeof(struct of_flash_list) * count, GFP_KERNEL); 215 214 if (!info) 216 - goto err_out; 217 - 218 - mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); 219 - if (!info) 220 - goto err_out; 215 + goto err_flash_remove; 221 216 222 217 dev_set_drvdata(&dev->dev, info); 218 + 219 + mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); 220 + if (!mtd_list) 221 + goto err_flash_remove; 223 222 224 223 for (i = 0; i < count; i++) { 225 224 err = -ENXIO; ··· 234 233 (unsigned long long)res.end); 235 234 236 235 err = -EBUSY; 237 - info->list[i].res = request_mem_region(res.start, res.end - 238 - res.start + 1, 236 + res_size = resource_size(&res); 237 + info->list[i].res = request_mem_region(res.start, res_size, 239 238 dev_name(&dev->dev)); 240 239 if (!info->list[i].res) 241 240 goto err_out; ··· 250 249 251 250 info->list[i].map.name = dev_name(&dev->dev); 252 251 info->list[i].map.phys = res.start; 253 - info->list[i].map.size = res.end - res.start + 1; 252 + info->list[i].map.size = res_size; 254 253 info->list[i].map.bankwidth = *width; 255 254 256 255 err = -ENOMEM; ··· 339 338 340 339 err_out: 341 340 kfree(mtd_list); 341 + err_flash_remove: 342 342 of_flash_remove(dev); 343 343 344 344 return err; ··· 360 358 * :(. */ 361 359 .compatible = "jedec-flash", 362 360 .data = (void *)"jedec_probe", 361 + }, 362 + { 363 + .compatible = "mtd-ram", 364 + .data = (void *)"map_ram", 363 365 }, 364 366 { 365 367 .type = "rom",
+1 -1
drivers/mtd/maps/plat-ram.c
··· 175 175 /* setup map parameters */ 176 176 177 177 info->map.phys = res->start; 178 - info->map.size = (res->end - res->start) + 1; 178 + info->map.size = resource_size(res); 179 179 info->map.name = pdata->mapname != NULL ? 180 180 (char *)pdata->mapname : (char *)pdev->name; 181 181 info->map.bankwidth = pdata->bankwidth;
+62 -20
drivers/mtd/maps/pmcmsp-flash.c
··· 50 50 51 51 static int __init init_msp_flash(void) 52 52 { 53 - int i, j; 53 + int i, j, ret = -ENOMEM; 54 54 int offset, coff; 55 55 char *env; 56 56 int pcnt; ··· 75 75 printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); 76 76 77 77 msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); 78 - msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); 79 - msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); 80 - if (!msp_flash || !msp_parts || !msp_maps) { 81 - kfree(msp_maps); 82 - kfree(msp_parts); 83 - kfree(msp_flash); 78 + if (!msp_flash) 84 79 return -ENOMEM; 85 - } 80 + 81 + msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); 82 + if (!msp_parts) 83 + goto free_msp_flash; 84 + 85 + msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); 86 + if (!msp_maps) 87 + goto free_msp_parts; 86 88 87 89 /* loop over the flash devices, initializing each */ 88 90 for (i = 0; i < fcnt; i++) { ··· 102 100 103 101 msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition), 104 102 GFP_KERNEL); 103 + if (!msp_parts[i]) 104 + goto cleanup_loop; 105 105 106 106 /* now initialize the devices proper */ 107 107 flash_name[5] = '0' + i; 108 108 env = prom_getenv(flash_name); 109 109 110 - if (sscanf(env, "%x:%x", &addr, &size) < 2) 111 - return -ENXIO; 110 + if (sscanf(env, "%x:%x", &addr, &size) < 2) { 111 + ret = -ENXIO; 112 + kfree(msp_parts[i]); 113 + goto cleanup_loop; 114 + } 112 115 addr = CPHYSADDR(addr); 113 116 114 117 printk(KERN_NOTICE ··· 129 122 */ 130 123 if (size > CONFIG_MSP_FLASH_MAP_LIMIT) 131 124 size = CONFIG_MSP_FLASH_MAP_LIMIT; 132 - msp_maps[i].virt = ioremap(addr, size); 133 - msp_maps[i].bankwidth = 1; 134 - msp_maps[i].name = strncpy(kmalloc(7, GFP_KERNEL), 135 - flash_name, 7); 136 125 137 - if (msp_maps[i].virt == NULL) 138 - return -ENXIO; 126 + msp_maps[i].virt = ioremap(addr, size); 127 + if (msp_maps[i].virt == NULL) { 128 + ret = -ENXIO; 129 + kfree(msp_parts[i]); 130 + goto cleanup_loop; 131 + } 132 + 133 + msp_maps[i].bankwidth = 1; 134 + msp_maps[i].name = kmalloc(7, GFP_KERNEL); 135 + if (!msp_maps[i].name) { 136 + iounmap(msp_maps[i].virt); 137 + kfree(msp_parts[i]); 138 + goto cleanup_loop; 139 + } 140 + 141 + msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); 139 142 140 143 for (j = 0; j < pcnt; j++) { 141 144 part_name[5] = '0' + i; ··· 153 136 154 137 env = prom_getenv(part_name); 155 138 156 - if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2) 157 - return -ENXIO; 139 + if (sscanf(env, "%x:%x:%n", &offset, &size, 140 + &coff) < 2) { 141 + ret = -ENXIO; 142 + kfree(msp_maps[i].name); 143 + iounmap(msp_maps[i].virt); 144 + kfree(msp_parts[i]); 145 + goto cleanup_loop; 146 + } 158 147 159 148 msp_parts[i][j].size = size; 160 149 msp_parts[i][j].offset = offset; ··· 175 152 add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt); 176 153 } else { 177 154 printk(KERN_ERR "map probe failed for flash\n"); 178 - return -ENXIO; 155 + ret = -ENXIO; 156 + kfree(msp_maps[i].name); 157 + iounmap(msp_maps[i].virt); 158 + kfree(msp_parts[i]); 159 + goto cleanup_loop; 179 160 } 180 161 } 181 162 182 163 return 0; 164 + 165 + cleanup_loop: 166 + while (i--) { 167 + del_mtd_partitions(msp_flash[i]); 168 + map_destroy(msp_flash[i]); 169 + kfree(msp_maps[i].name); 170 + iounmap(msp_maps[i].virt); 171 + kfree(msp_parts[i]); 172 + } 173 + kfree(msp_maps); 174 + free_msp_parts: 175 + kfree(msp_parts); 176 + free_msp_flash: 177 + kfree(msp_flash); 178 + return ret; 183 179 } 184 180 185 181 static void __exit cleanup_msp_flash(void) 186 182 { 187 183 int i; 188 184 189 - for (i = 0; i < sizeof(msp_flash) / sizeof(struct mtd_info **); i++) { 185 + for (i = 0; i < fcnt; i++) { 190 186 del_mtd_partitions(msp_flash[i]); 191 187 map_destroy(msp_flash[i]); 192 188 iounmap((void *)msp_maps[i].virt);
+8
drivers/mtd/maps/uclinux.c
··· 89 89 mtd->priv = mapp; 90 90 91 91 uclinux_ram_mtdinfo = mtd; 92 + #ifdef CONFIG_MTD_PARTITIONS 92 93 add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS); 94 + #else 95 + add_mtd_device(mtd); 96 + #endif 93 97 94 98 return(0); 95 99 } ··· 103 99 static void __exit uclinux_mtd_cleanup(void) 104 100 { 105 101 if (uclinux_ram_mtdinfo) { 102 + #ifdef CONFIG_MTD_PARTITIONS 106 103 del_mtd_partitions(uclinux_ram_mtdinfo); 104 + #else 105 + del_mtd_device(uclinux_ram_mtdinfo); 106 + #endif 107 107 map_destroy(uclinux_ram_mtdinfo); 108 108 uclinux_ram_mtdinfo = NULL; 109 109 }
+1 -1
drivers/mtd/mtdblock.c
··· 84 84 remove_wait_queue(&wait_q, &wait); 85 85 86 86 /* 87 - * Next, writhe data to flash. 87 + * Next, write the data to flash. 88 88 */ 89 89 90 90 ret = mtd->write(mtd, pos, len, &retlen, buf);
+3 -3
drivers/mtd/mtdconcat.c
··· 427 427 * to-be-erased area begins. Verify that the starting 428 428 * offset is aligned to this region's erase size: 429 429 */ 430 - if (instr->addr & (erase_regions[i].erasesize - 1)) 430 + if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1)) 431 431 return -EINVAL; 432 432 433 433 /* ··· 440 440 /* 441 441 * check if the ending offset is aligned to this region's erase size 442 442 */ 443 - if ((instr->addr + instr->len) & (erase_regions[i].erasesize - 444 - 1)) 443 + if (i < 0 || ((instr->addr + instr->len) & 444 + (erase_regions[i].erasesize - 1))) 445 445 return -EINVAL; 446 446 } 447 447
+2 -2
drivers/mtd/mtdcore.c
··· 213 213 NULL, 214 214 }; 215 215 216 - struct attribute_group mtd_group = { 216 + static struct attribute_group mtd_group = { 217 217 .attrs = mtd_attrs, 218 218 }; 219 219 220 - const struct attribute_group *mtd_groups[] = { 220 + static const struct attribute_group *mtd_groups[] = { 221 221 &mtd_group, 222 222 NULL, 223 223 };
+2 -1
drivers/mtd/mtdpart.c
··· 453 453 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 454 454 ; 455 455 /* The loop searched for the region _behind_ the first one */ 456 - i--; 456 + if (i > 0) 457 + i--; 457 458 458 459 /* Pick biggest erasesize */ 459 460 for (; i < max && regions[i].offset < end; i++) {
+30
drivers/mtd/nand/Kconfig
··· 80 80 help 81 81 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 82 82 83 + config MTD_NAND_OMAP_PREFETCH 84 + bool "GPMC prefetch support for NAND Flash device" 85 + depends on MTD_NAND && MTD_NAND_OMAP2 86 + default y 87 + help 88 + The NAND device can be accessed for Read/Write using GPMC PREFETCH engine 89 + to improve the performance. 90 + 91 + config MTD_NAND_OMAP_PREFETCH_DMA 92 + depends on MTD_NAND_OMAP_PREFETCH 93 + bool "DMA mode" 94 + default n 95 + help 96 + The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode 97 + or in DMA interrupt mode. 98 + Say y for DMA mode or MPU mode will be used 99 + 83 100 config MTD_NAND_TS7250 84 101 tristate "NAND Flash device on TS-7250 board" 85 102 depends on MACH_TS72XX ··· 443 426 This enables the driver for the NAND flash controller on the 444 427 MXC processors. 445 428 429 + config MTD_NAND_NOMADIK 430 + tristate "ST Nomadik 8815 NAND support" 431 + depends on ARCH_NOMADIK 432 + help 433 + Driver for the NAND flash controller on the Nomadik, with ECC. 434 + 446 435 config MTD_NAND_SH_FLCTL 447 436 tristate "Support for NAND on Renesas SuperH FLCTL" 448 437 depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723 ··· 474 451 depends on MTD_NAND && SOCRATES 475 452 help 476 453 Enables support for NAND Flash chips wired onto Socrates board. 454 + 455 + config MTD_NAND_W90P910 456 + tristate "Support for NAND on w90p910 evaluation board." 457 + depends on ARCH_W90X900 && MTD_PARTITIONS 458 + help 459 + This enables the driver for the NAND Flash on evaluation board based 460 + on w90p910. 477 461 478 462 endif # MTD_NAND
+2
drivers/mtd/nand/Makefile
··· 40 40 obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 41 41 obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 42 42 obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 43 + obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o 44 + obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 43 45 44 46 nand-objs := nand_base.o nand_bbt.o
+1 -1
drivers/mtd/nand/atmel_nand.c
··· 218 218 * buf: buffer to store read data 219 219 */ 220 220 static int atmel_nand_read_page(struct mtd_info *mtd, 221 - struct nand_chip *chip, uint8_t *buf) 221 + struct nand_chip *chip, uint8_t *buf, int page) 222 222 { 223 223 int eccsize = chip->ecc.size; 224 224 int eccbytes = chip->ecc.bytes;
+1 -1
drivers/mtd/nand/cafe_nand.c
··· 381 381 * we need a special oob layout and handling. 382 382 */ 383 383 static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, 384 - uint8_t *buf) 384 + uint8_t *buf, int page) 385 385 { 386 386 struct cafe_priv *cafe = mtd->priv; 387 387
+39 -6
drivers/mtd/nand/davinci_nand.c
··· 348 348 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3])) 349 349 return 0; 350 350 351 + /* 352 + * Clear any previous address calculation by doing a dummy read of an 353 + * error address register. 354 + */ 355 + davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET); 356 + 351 357 /* Start address calculation, and wait for it to complete. 352 358 * We _could_ start reading more data while this is working, 353 359 * to speed up the overall page read. ··· 365 359 366 360 switch ((fsr >> 8) & 0x0f) { 367 361 case 0: /* no error, should not happen */ 362 + davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET); 368 363 return 0; 369 364 case 1: /* five or more errors detected */ 365 + davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET); 370 366 return -EIO; 371 367 case 2: /* error addresses computed */ 372 368 case 3: ··· 508 500 }, 509 501 }; 510 502 503 + /* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash, 504 + * storing ten ECC bytes plus the manufacturer's bad block marker byte, 505 + * and not overlapping the default BBT markers. 506 + */ 507 + static struct nand_ecclayout hwecc4_2048 __initconst = { 508 + .eccbytes = 40, 509 + .eccpos = { 510 + /* at the end of spare sector */ 511 + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 512 + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 513 + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 514 + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 515 + }, 516 + .oobfree = { 517 + /* 2 bytes at offset 0 hold manufacturer badblock markers */ 518 + {.offset = 2, .length = 22, }, 519 + /* 5 bytes at offset 8 hold BBT markers */ 520 + /* 8 bytes at offset 16 hold JFFS2 clean markers */ 521 + }, 522 + }; 511 523 512 524 static int __init nand_davinci_probe(struct platform_device *pdev) 513 525 { ··· 718 690 info->mtd.oobsize - 16; 719 691 goto syndrome_done; 720 692 } 693 + if (chunks == 4) { 694 + info->ecclayout = hwecc4_2048; 695 + info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; 696 + goto syndrome_done; 697 + } 721 698 722 - /* For large page chips we'll be wanting to use a 723 - * not-yet-implemented mode that reads OOB data 724 - * before reading the body of the page, to avoid 725 - * the "infix OOB" model of NAND_ECC_HW_SYNDROME 726 - * (and preserve manufacturer badblock markings). 699 + /* 4KiB page chips are not yet supported. The eccpos from 700 + * nand_ecclayout cannot hold 80 bytes and change to eccpos[] 701 + * breaks userspace ioctl interface with mtd-utils. Once we 702 + * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used 703 + * for the 4KiB page chips. 727 704 */ 728 705 dev_warn(&pdev->dev, "no 4-bit ECC support yet " 729 - "for large page NAND\n"); 706 + "for 4KiB-page NAND\n"); 730 707 ret = -EIO; 731 708 goto err_scan; 732 709
+2 -1
drivers/mtd/nand/fsl_elbc_nand.c
··· 739 739 740 740 static int fsl_elbc_read_page(struct mtd_info *mtd, 741 741 struct nand_chip *chip, 742 - uint8_t *buf) 742 + uint8_t *buf, 743 + int page) 743 744 { 744 745 fsl_elbc_read_buf(mtd, buf, mtd->writesize); 745 746 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+15 -1
drivers/mtd/nand/mxc_nand.c
··· 857 857 } 858 858 } 859 859 860 + /* Define some generic bad / good block scan pattern which are used 861 + * while scanning a device for factory marked good / bad blocks. */ 862 + static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 863 + 864 + static struct nand_bbt_descr smallpage_memorybased = { 865 + .options = NAND_BBT_SCAN2NDPAGE, 866 + .offs = 5, 867 + .len = 1, 868 + .pattern = scan_ff_pattern 869 + }; 870 + 860 871 static int __init mxcnd_probe(struct platform_device *pdev) 861 872 { 862 873 struct nand_chip *this; ··· 984 973 goto escan; 985 974 } 986 975 987 - host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0; 976 + if (mtd->writesize == 2048) { 977 + host->pagesize_2k = 1; 978 + this->badblock_pattern = &smallpage_memorybased; 979 + } 988 980 989 981 if (this->ecc.mode == NAND_ECC_HW) { 990 982 switch (mtd->oobsize) {
+115 -52
drivers/mtd/nand/nand_base.c
··· 688 688 retry: 689 689 spin_lock(lock); 690 690 691 - /* Hardware controller shared among independend devices */ 692 - /* Hardware controller shared among independend devices */ 691 + /* Hardware controller shared among independent devices */ 693 692 if (!chip->controller->active) 694 693 chip->controller->active = chip; 695 694 ··· 765 766 * Not for syndrome calculating ecc controllers, which use a special oob layout 766 767 */ 767 768 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 768 - uint8_t *buf) 769 + uint8_t *buf, int page) 769 770 { 770 771 chip->read_buf(mtd, buf, mtd->writesize); 771 772 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); ··· 781 782 * We need a special oob layout and handling even when OOB isn't used. 782 783 */ 783 784 static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 784 - uint8_t *buf) 785 + uint8_t *buf, int page) 785 786 { 786 787 int eccsize = chip->ecc.size; 787 788 int eccbytes = chip->ecc.bytes; ··· 820 821 * @buf: buffer to store read data 821 822 */ 822 823 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 823 - uint8_t *buf) 824 + uint8_t *buf, int page) 824 825 { 825 826 int i, eccsize = chip->ecc.size; 826 827 int eccbytes = chip->ecc.bytes; ··· 830 831 uint8_t *ecc_code = chip->buffers->ecccode; 831 832 uint32_t *eccpos = chip->ecc.layout->eccpos; 832 833 833 - chip->ecc.read_page_raw(mtd, chip, buf); 834 + chip->ecc.read_page_raw(mtd, chip, buf, page); 834 835 835 836 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 836 837 chip->ecc.calculate(mtd, p, &ecc_calc[i]); ··· 943 944 * Not for syndrome calculating ecc controllers which need a special oob layout 944 945 */ 945 946 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 946 - uint8_t *buf) 947 + uint8_t *buf, int page) 947 948 { 948 949 int i, eccsize = chip->ecc.size; 949 950 int eccbytes = chip->ecc.bytes; ··· 979 980 } 980 981 981 982 /** 983 + * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first 984 + * @mtd: mtd info structure 985 + * @chip: nand chip info structure 986 + * @buf: buffer to store read data 987 + * 988 + * Hardware ECC for large page chips, require OOB to be read first. 989 + * For this ECC mode, the write_page method is re-used from ECC_HW. 990 + * These methods read/write ECC from the OOB area, unlike the 991 + * ECC_HW_SYNDROME support with multiple ECC steps, follows the 992 + * "infix ECC" scheme and reads/writes ECC from the data area, by 993 + * overwriting the NAND manufacturer bad block markings. 994 + */ 995 + static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 996 + struct nand_chip *chip, uint8_t *buf, int page) 997 + { 998 + int i, eccsize = chip->ecc.size; 999 + int eccbytes = chip->ecc.bytes; 1000 + int eccsteps = chip->ecc.steps; 1001 + uint8_t *p = buf; 1002 + uint8_t *ecc_code = chip->buffers->ecccode; 1003 + uint32_t *eccpos = chip->ecc.layout->eccpos; 1004 + uint8_t *ecc_calc = chip->buffers->ecccalc; 1005 + 1006 + /* Read the OOB area first */ 1007 + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 1008 + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1009 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 1010 + 1011 + for (i = 0; i < chip->ecc.total; i++) 1012 + ecc_code[i] = chip->oob_poi[eccpos[i]]; 1013 + 1014 + for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1015 + int stat; 1016 + 1017 + chip->ecc.hwctl(mtd, NAND_ECC_READ); 1018 + chip->read_buf(mtd, p, eccsize); 1019 + chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1020 + 1021 + stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); 1022 + if (stat < 0) 1023 + mtd->ecc_stats.failed++; 1024 + else 1025 + mtd->ecc_stats.corrected += stat; 1026 + } 1027 + return 0; 1028 + } 1029 + 1030 + /** 982 1031 * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read 983 1032 * @mtd: mtd info structure 984 1033 * @chip: nand chip info structure ··· 1036 989 * we need a special oob layout and handling. 1037 990 */ 1038 991 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1039 - uint8_t *buf) 992 + uint8_t *buf, int page) 1040 993 { 1041 994 int i, eccsize = chip->ecc.size; 1042 995 int eccbytes = chip->ecc.bytes; ··· 1178 1131 1179 1132 /* Now read the page into the buffer */ 1180 1133 if (unlikely(ops->mode == MTD_OOB_RAW)) 1181 - ret = chip->ecc.read_page_raw(mtd, chip, bufpoi); 1134 + ret = chip->ecc.read_page_raw(mtd, chip, 1135 + bufpoi, page); 1182 1136 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1183 1137 ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi); 1184 1138 else 1185 - ret = chip->ecc.read_page(mtd, chip, bufpoi); 1139 + ret = chip->ecc.read_page(mtd, chip, bufpoi, 1140 + page); 1186 1141 if (ret < 0) 1187 1142 break; 1188 1143 ··· 1462 1413 int len; 1463 1414 uint8_t *buf = ops->oobbuf; 1464 1415 1465 - DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n", 1466 - (unsigned long long)from, readlen); 1416 + DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", 1417 + __func__, (unsigned long long)from, readlen); 1467 1418 1468 1419 if (ops->mode == MTD_OOB_AUTO) 1469 1420 len = chip->ecc.layout->oobavail; ··· 1471 1422 len = mtd->oobsize; 1472 1423 1473 1424 if (unlikely(ops->ooboffs >= len)) { 1474 - DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " 1475 - "Attempt to start read outside oob\n"); 1425 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " 1426 + "outside oob\n", __func__); 1476 1427 return -EINVAL; 1477 1428 } 1478 1429 ··· 1480 1431 if (unlikely(from >= mtd->size || 1481 1432 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - 1482 1433 (from >> chip->page_shift)) * len)) { 1483 - DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " 1484 - "Attempt read beyond end of device\n"); 1434 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " 1435 + "of device\n", __func__); 1485 1436 return -EINVAL; 1486 1437 } 1487 1438 ··· 1555 1506 1556 1507 /* Do not allow reads past end of device */ 1557 1508 if (ops->datbuf && (from + ops->len) > mtd->size) { 1558 - DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " 1559 - "Attempt read beyond end of device\n"); 1509 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " 1510 + "beyond end of device\n", __func__); 1560 1511 return -EINVAL; 1561 1512 } 1562 1513 ··· 1865 1816 1866 1817 /* reject writes, which are not page aligned */ 1867 1818 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 1868 - printk(KERN_NOTICE "nand_write: " 1869 - "Attempt to write not page aligned data\n"); 1819 + printk(KERN_NOTICE "%s: Attempt to write not " 1820 + "page aligned data\n", __func__); 1870 1821 return -EINVAL; 1871 1822 } 1872 1823 ··· 1993 1944 int chipnr, page, status, len; 1994 1945 struct nand_chip *chip = mtd->priv; 1995 1946 1996 - DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", 1997 - (unsigned int)to, (int)ops->ooblen); 1947 + DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1948 + __func__, (unsigned int)to, (int)ops->ooblen); 1998 1949 1999 1950 if (ops->mode == MTD_OOB_AUTO) 2000 1951 len = chip->ecc.layout->oobavail; ··· 2003 1954 2004 1955 /* Do not allow write past end of page */ 2005 1956 if ((ops->ooboffs + ops->ooblen) > len) { 2006 - DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: " 2007 - "Attempt to write past end of page\n"); 1957 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " 1958 + "past end of page\n", __func__); 2008 1959 return -EINVAL; 2009 1960 } 2010 1961 2011 1962 if (unlikely(ops->ooboffs >= len)) { 2012 - DEBUG(MTD_DEBUG_LEVEL0, "nand_do_write_oob: " 2013 - "Attempt to start write outside oob\n"); 1963 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " 1964 + "write outside oob\n", __func__); 2014 1965 return -EINVAL; 2015 1966 } 2016 1967 ··· 2019 1970 ops->ooboffs + ops->ooblen > 2020 1971 ((mtd->size >> chip->page_shift) - 2021 1972 (to >> chip->page_shift)) * len)) { 2022 - DEBUG(MTD_DEBUG_LEVEL0, "nand_do_write_oob: " 2023 - "Attempt write beyond end of device\n"); 1973 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 1974 + "end of device\n", __func__); 2024 1975 return -EINVAL; 2025 1976 } 2026 1977 ··· 2075 2026 2076 2027 /* Do not allow writes past end of device */ 2077 2028 if (ops->datbuf && (to + ops->len) > mtd->size) { 2078 - DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: " 2079 - "Attempt write beyond end of device\n"); 2029 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2030 + "end of device\n", __func__); 2080 2031 return -EINVAL; 2081 2032 } 2082 2033 ··· 2166 2117 unsigned int bbt_masked_page = 0xffffffff; 2167 2118 loff_t len; 2168 2119 2169 - DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n", 2170 - (unsigned long long)instr->addr, (unsigned long long)instr->len); 2120 + DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 2121 + __func__, (unsigned long long)instr->addr, 2122 + (unsigned long long)instr->len); 2171 2123 2172 2124 /* Start address must align on block boundary */ 2173 2125 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { 2174 - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n"); 2126 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); 2175 2127 return -EINVAL; 2176 2128 } 2177 2129 2178 2130 /* Length must align on block boundary */ 2179 2131 if (instr->len & ((1 << chip->phys_erase_shift) - 1)) { 2180 - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2181 - "Length not block aligned\n"); 2132 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", 2133 + __func__); 2182 2134 return -EINVAL; 2183 2135 } 2184 2136 2185 2137 /* Do not allow erase past end of device */ 2186 2138 if ((instr->len + instr->addr) > mtd->size) { 2187 - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2188 - "Erase past end of device\n"); 2139 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n", 2140 + __func__); 2189 2141 return -EINVAL; 2190 2142 } 2191 2143 ··· 2207 2157 2208 2158 /* Check, if it is write protected */ 2209 2159 if (nand_check_wp(mtd)) { 2210 - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2211 - "Device is write protected!!!\n"); 2160 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 2161 + __func__); 2212 2162 instr->state = MTD_ERASE_FAILED; 2213 2163 goto erase_exit; 2214 2164 } ··· 2233 2183 */ 2234 2184 if (nand_block_checkbad(mtd, ((loff_t) page) << 2235 2185 chip->page_shift, 0, allowbbt)) { 2236 - printk(KERN_WARNING "nand_erase: attempt to erase a " 2237 - "bad block at page 0x%08x\n", page); 2186 + printk(KERN_WARNING "%s: attempt to erase a bad block " 2187 + "at page 0x%08x\n", __func__, page); 2238 2188 instr->state = MTD_ERASE_FAILED; 2239 2189 goto erase_exit; 2240 2190 } ··· 2261 2211 2262 2212 /* See if block erase succeeded */ 2263 2213 if (status & NAND_STATUS_FAIL) { 2264 - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2265 - "Failed erase, page 0x%08x\n", page); 2214 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " 2215 + "page 0x%08x\n", __func__, page); 2266 2216 instr->state = MTD_ERASE_FAILED; 2267 2217 instr->fail_addr = 2268 2218 ((loff_t)page << chip->page_shift); ··· 2322 2272 if (!rewrite_bbt[chipnr]) 2323 2273 continue; 2324 2274 /* update the BBT for chip */ 2325 - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " 2326 - "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr], 2327 - chip->bbt_td->pages[chipnr]); 2275 + DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " 2276 + "(%d:0x%0llx 0x%0x)\n", __func__, chipnr, 2277 + rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); 2328 2278 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2329 2279 } 2330 2280 ··· 2342 2292 { 2343 2293 struct nand_chip *chip = mtd->priv; 2344 2294 2345 - DEBUG(MTD_DEBUG_LEVEL3, "nand_sync: called\n"); 2295 + DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); 2346 2296 2347 2297 /* Grab the lock and see if the device is available */ 2348 2298 nand_get_device(chip, mtd, FL_SYNCING); ··· 2406 2356 if (chip->state == FL_PM_SUSPENDED) 2407 2357 nand_release_device(mtd); 2408 2358 else 2409 - printk(KERN_ERR "nand_resume() called for a chip which is not " 2410 - "in suspended state\n"); 2359 + printk(KERN_ERR "%s called for a chip which is not " 2360 + "in suspended state\n", __func__); 2411 2361 } 2412 2362 2413 2363 /* ··· 2721 2671 */ 2722 2672 2723 2673 switch (chip->ecc.mode) { 2674 + case NAND_ECC_HW_OOB_FIRST: 2675 + /* Similar to NAND_ECC_HW, but a separate read_page handle */ 2676 + if (!chip->ecc.calculate || !chip->ecc.correct || 2677 + !chip->ecc.hwctl) { 2678 + printk(KERN_WARNING "No ECC functions supplied; " 2679 + "Hardware ECC not possible\n"); 2680 + BUG(); 2681 + } 2682 + if (!chip->ecc.read_page) 2683 + chip->ecc.read_page = nand_read_page_hwecc_oob_first; 2684 + 2724 2685 case NAND_ECC_HW: 2725 2686 /* Use standard hwecc read page function ? */ 2726 2687 if (!chip->ecc.read_page) ··· 2754 2693 chip->ecc.read_page == nand_read_page_hwecc || 2755 2694 !chip->ecc.write_page || 2756 2695 chip->ecc.write_page == nand_write_page_hwecc)) { 2757 - printk(KERN_WARNING "No ECC functions supplied, " 2696 + printk(KERN_WARNING "No ECC functions supplied; " 2758 2697 "Hardware ECC not possible\n"); 2759 2698 BUG(); 2760 2699 } ··· 2789 2728 chip->ecc.write_page_raw = nand_write_page_raw; 2790 2729 chip->ecc.read_oob = nand_read_oob_std; 2791 2730 chip->ecc.write_oob = nand_write_oob_std; 2792 - chip->ecc.size = 256; 2731 + if (!chip->ecc.size) 2732 + chip->ecc.size = 256; 2793 2733 chip->ecc.bytes = 3; 2794 2734 break; 2795 2735 ··· 2920 2858 2921 2859 /* Many callers got this wrong, so check for it for a while... */ 2922 2860 if (!mtd->owner && caller_is_module()) { 2923 - printk(KERN_CRIT "nand_scan() called with NULL mtd->owner!\n"); 2861 + printk(KERN_CRIT "%s called with NULL mtd->owner!\n", 2862 + __func__); 2924 2863 BUG(); 2925 2864 } 2926 2865
+24 -7
drivers/mtd/nand/nand_ecc.c
··· 417 417 EXPORT_SYMBOL(nand_calculate_ecc); 418 418 419 419 /** 420 - * nand_correct_data - [NAND Interface] Detect and correct bit error(s) 421 - * @mtd: MTD block structure 420 + * __nand_correct_data - [NAND Interface] Detect and correct bit error(s) 422 421 * @buf: raw data read from the chip 423 422 * @read_ecc: ECC from the chip 424 423 * @calc_ecc: the ECC calculated from raw data 424 + * @eccsize: data bytes per ecc step (256 or 512) 425 425 * 426 - * Detect and correct a 1 bit error for 256/512 byte block 426 + * Detect and correct a 1 bit error for eccsize byte block 427 427 */ 428 - int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, 429 - unsigned char *read_ecc, unsigned char *calc_ecc) 428 + int __nand_correct_data(unsigned char *buf, 429 + unsigned char *read_ecc, unsigned char *calc_ecc, 430 + unsigned int eccsize) 430 431 { 431 432 unsigned char b0, b1, b2, bit_addr; 432 433 unsigned int byte_addr; 433 434 /* 256 or 512 bytes/ecc */ 434 - const uint32_t eccsize_mult = 435 - (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; 435 + const uint32_t eccsize_mult = eccsize >> 8; 436 436 437 437 /* 438 438 * b0 to b2 indicate which bit is faulty (if any) ··· 494 494 495 495 printk(KERN_ERR "uncorrectable error : "); 496 496 return -1; 497 + } 498 + EXPORT_SYMBOL(__nand_correct_data); 499 + 500 + /** 501 + * nand_correct_data - [NAND Interface] Detect and correct bit error(s) 502 + * @mtd: MTD block structure 503 + * @buf: raw data read from the chip 504 + * @read_ecc: ECC from the chip 505 + * @calc_ecc: the ECC calculated from raw data 506 + * 507 + * Detect and correct a 1 bit error for 256/512 byte block 508 + */ 509 + int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, 510 + unsigned char *read_ecc, unsigned char *calc_ecc) 511 + { 512 + return __nand_correct_data(buf, read_ecc, calc_ecc, 513 + ((struct nand_chip *)mtd->priv)->ecc.size); 497 514 } 498 515 EXPORT_SYMBOL(nand_correct_data); 499 516
+2 -2
drivers/mtd/nand/ndfc.c
··· 102 102 wmb(); 103 103 ecc = in_be32(ndfc->ndfcbase + NDFC_ECC); 104 104 /* The NDFC uses Smart Media (SMC) bytes order */ 105 - ecc_code[0] = p[2]; 106 - ecc_code[1] = p[1]; 105 + ecc_code[0] = p[1]; 106 + ecc_code[1] = p[2]; 107 107 ecc_code[2] = p[3]; 108 108 109 109 return 0;
+250
drivers/mtd/nand/nomadik_nand.c
··· 1 + /* 2 + * drivers/mtd/nand/nomadik_nand.c 3 + * 4 + * Overview: 5 + * Driver for on-board NAND flash on Nomadik Platforms 6 + * 7 + * Copyright © 2007 STMicroelectronics Pvt. Ltd. 8 + * Author: Sachin Verma <sachin.verma@st.com> 9 + * 10 + * Copyright © 2009 Alessandro Rubini 11 + * 12 + * This program is free software; you can redistribute it and/or modify 13 + * it under the terms of the GNU General Public License as published by 14 + * the Free Software Foundation; either version 2 of the License, or 15 + * (at your option) any later version. 16 + * 17 + * This program is distributed in the hope that it will be useful, 18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 + * GNU General Public License for more details. 21 + * 22 + */ 23 + 24 + #include <linux/init.h> 25 + #include <linux/module.h> 26 + #include <linux/types.h> 27 + #include <linux/mtd/mtd.h> 28 + #include <linux/mtd/nand.h> 29 + #include <linux/mtd/nand_ecc.h> 30 + #include <linux/platform_device.h> 31 + #include <linux/mtd/partitions.h> 32 + #include <linux/io.h> 33 + #include <mach/nand.h> 34 + #include <mach/fsmc.h> 35 + 36 + #include <mtd/mtd-abi.h> 37 + 38 + struct nomadik_nand_host { 39 + struct mtd_info mtd; 40 + struct nand_chip nand; 41 + void __iomem *data_va; 42 + void __iomem *cmd_va; 43 + void __iomem *addr_va; 44 + struct nand_bbt_descr *bbt_desc; 45 + }; 46 + 47 + static struct nand_ecclayout nomadik_ecc_layout = { 48 + .eccbytes = 3 * 4, 49 + .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */ 50 + 0x02, 0x03, 0x04, 51 + 0x12, 0x13, 0x14, 52 + 0x22, 0x23, 0x24, 53 + 0x32, 0x33, 0x34}, 54 + /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */ 55 + .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} }, 56 + }; 57 + 58 + static void nomadik_ecc_control(struct mtd_info *mtd, int mode) 59 + { 60 + /* No need to enable hw ecc, it's on by default */ 61 + } 62 + 63 + static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 64 + { 65 + struct nand_chip *nand = mtd->priv; 66 + struct nomadik_nand_host *host = nand->priv; 67 + 68 + if (cmd == NAND_CMD_NONE) 69 + return; 70 + 71 + if (ctrl & NAND_CLE) 72 + writeb(cmd, host->cmd_va); 73 + else 74 + writeb(cmd, host->addr_va); 75 + } 76 + 77 + static int nomadik_nand_probe(struct platform_device *pdev) 78 + { 79 + struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data; 80 + struct nomadik_nand_host *host; 81 + struct mtd_info *mtd; 82 + struct nand_chip *nand; 83 + struct resource *res; 84 + int ret = 0; 85 + 86 + /* Allocate memory for the device structure (and zero it) */ 87 + host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL); 88 + if (!host) { 89 + dev_err(&pdev->dev, "Failed to allocate device structure.\n"); 90 + return -ENOMEM; 91 + } 92 + 93 + /* Call the client's init function, if any */ 94 + if (pdata->init) 95 + ret = pdata->init(); 96 + if (ret < 0) { 97 + dev_err(&pdev->dev, "Init function failed\n"); 98 + goto err; 99 + } 100 + 101 + /* ioremap three regions */ 102 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); 103 + if (!res) { 104 + ret = -EIO; 105 + goto err_unmap; 106 + } 107 + host->addr_va = ioremap(res->start, res->end - res->start + 1); 108 + 109 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 110 + if (!res) { 111 + ret = -EIO; 112 + goto err_unmap; 113 + } 114 + host->data_va = ioremap(res->start, res->end - res->start + 1); 115 + 116 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 117 + if (!res) { 118 + ret = -EIO; 119 + goto err_unmap; 120 + } 121 + host->cmd_va = ioremap(res->start, res->end - res->start + 1); 122 + 123 + if (!host->addr_va || !host->data_va || !host->cmd_va) { 124 + ret = -ENOMEM; 125 + goto err_unmap; 126 + } 127 + 128 + /* Link all private pointers */ 129 + mtd = &host->mtd; 130 + nand = &host->nand; 131 + mtd->priv = nand; 132 + nand->priv = host; 133 + 134 + host->mtd.owner = THIS_MODULE; 135 + nand->IO_ADDR_R = host->data_va; 136 + nand->IO_ADDR_W = host->data_va; 137 + nand->cmd_ctrl = nomadik_cmd_ctrl; 138 + 139 + /* 140 + * This stanza declares ECC_HW but uses soft routines. It's because 141 + * HW claims to make the calculation but not the correction. However, 142 + * I haven't managed to get the desired data out of it until now. 143 + */ 144 + nand->ecc.mode = NAND_ECC_SOFT; 145 + nand->ecc.layout = &nomadik_ecc_layout; 146 + nand->ecc.hwctl = nomadik_ecc_control; 147 + nand->ecc.size = 512; 148 + nand->ecc.bytes = 3; 149 + 150 + nand->options = pdata->options; 151 + 152 + /* 153 + * Scan to find existance of the device 154 + */ 155 + if (nand_scan(&host->mtd, 1)) { 156 + ret = -ENXIO; 157 + goto err_unmap; 158 + } 159 + 160 + #ifdef CONFIG_MTD_PARTITIONS 161 + add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts); 162 + #else 163 + pr_info("Registering %s as whole device\n", mtd->name); 164 + add_mtd_device(mtd); 165 + #endif 166 + 167 + platform_set_drvdata(pdev, host); 168 + return 0; 169 + 170 + err_unmap: 171 + if (host->cmd_va) 172 + iounmap(host->cmd_va); 173 + if (host->data_va) 174 + iounmap(host->data_va); 175 + if (host->addr_va) 176 + iounmap(host->addr_va); 177 + err: 178 + kfree(host); 179 + return ret; 180 + } 181 + 182 + /* 183 + * Clean up routine 184 + */ 185 + static int nomadik_nand_remove(struct platform_device *pdev) 186 + { 187 + struct nomadik_nand_host *host = platform_get_drvdata(pdev); 188 + struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data; 189 + 190 + if (pdata->exit) 191 + pdata->exit(); 192 + 193 + if (host) { 194 + iounmap(host->cmd_va); 195 + iounmap(host->data_va); 196 + iounmap(host->addr_va); 197 + kfree(host); 198 + } 199 + return 0; 200 + } 201 + 202 + static int nomadik_nand_suspend(struct device *dev) 203 + { 204 + struct nomadik_nand_host *host = dev_get_drvdata(dev); 205 + int ret = 0; 206 + if (host) 207 + ret = host->mtd.suspend(&host->mtd); 208 + return ret; 209 + } 210 + 211 + static int nomadik_nand_resume(struct device *dev) 212 + { 213 + struct nomadik_nand_host *host = dev_get_drvdata(dev); 214 + if (host) 215 + host->mtd.resume(&host->mtd); 216 + return 0; 217 + } 218 + 219 + static struct dev_pm_ops nomadik_nand_pm_ops = { 220 + .suspend = nomadik_nand_suspend, 221 + .resume = nomadik_nand_resume, 222 + }; 223 + 224 + static struct platform_driver nomadik_nand_driver = { 225 + .probe = nomadik_nand_probe, 226 + .remove = nomadik_nand_remove, 227 + .driver = { 228 + .owner = THIS_MODULE, 229 + .name = "nomadik_nand", 230 + .pm = &nomadik_nand_pm_ops, 231 + }, 232 + }; 233 + 234 + static int __init nand_nomadik_init(void) 235 + { 236 + pr_info("Nomadik NAND driver\n"); 237 + return platform_driver_register(&nomadik_nand_driver); 238 + } 239 + 240 + static void __exit nand_nomadik_exit(void) 241 + { 242 + platform_driver_unregister(&nomadik_nand_driver); 243 + } 244 + 245 + module_init(nand_nomadik_init); 246 + module_exit(nand_nomadik_exit); 247 + 248 + MODULE_LICENSE("GPL"); 249 + MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)"); 250 + MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
+336 -11
drivers/mtd/nand/omap2.c
··· 18 18 #include <linux/mtd/partitions.h> 19 19 #include <linux/io.h> 20 20 21 - #include <asm/dma.h> 22 - 21 + #include <mach/dma.h> 23 22 #include <mach/gpmc.h> 24 23 #include <mach/nand.h> 25 24 ··· 111 112 static const char *part_probes[] = { "cmdlinepart", NULL }; 112 113 #endif 113 114 115 + #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH 116 + static int use_prefetch = 1; 117 + 118 + /* "modprobe ... use_prefetch=0" etc */ 119 + module_param(use_prefetch, bool, 0); 120 + MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); 121 + 122 + #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA 123 + static int use_dma = 1; 124 + 125 + /* "modprobe ... use_dma=0" etc */ 126 + module_param(use_dma, bool, 0); 127 + MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 128 + #else 129 + const int use_dma; 130 + #endif 131 + #else 132 + const int use_prefetch; 133 + const int use_dma; 134 + #endif 135 + 114 136 struct omap_nand_info { 115 137 struct nand_hw_control controller; 116 138 struct omap_nand_platform_data *pdata; ··· 144 124 unsigned long phys_base; 145 125 void __iomem *gpmc_cs_baseaddr; 146 126 void __iomem *gpmc_baseaddr; 127 + void __iomem *nand_pref_fifo_add; 128 + struct completion comp; 129 + int dma_ch; 147 130 }; 148 131 149 132 /** ··· 212 189 } 213 190 214 191 /** 192 + * omap_read_buf8 - read data from NAND controller into buffer 193 + * @mtd: MTD device structure 194 + * @buf: buffer to store date 195 + * @len: number of bytes to read 196 + */ 197 + static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len) 198 + { 199 + struct nand_chip *nand = mtd->priv; 200 + 201 + ioread8_rep(nand->IO_ADDR_R, buf, len); 202 + } 203 + 204 + /** 205 + * omap_write_buf8 - write buffer to NAND controller 206 + * @mtd: MTD device structure 207 + * @buf: data buffer 208 + * @len: number of bytes to write 209 + */ 210 + static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) 211 + { 212 + struct omap_nand_info *info = container_of(mtd, 213 + struct omap_nand_info, mtd); 214 + u_char *p = (u_char *)buf; 215 + 216 + while (len--) { 217 + iowrite8(*p++, info->nand.IO_ADDR_W); 218 + while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr + 219 + GPMC_STATUS) & GPMC_BUF_FULL)); 220 + } 221 + } 222 + 223 + /** 215 224 * omap_read_buf16 - read data from NAND controller into buffer 216 225 * @mtd: MTD device structure 217 226 * @buf: buffer to store date ··· 253 198 { 254 199 struct nand_chip *nand = mtd->priv; 255 200 256 - __raw_readsw(nand->IO_ADDR_R, buf, len / 2); 201 + ioread16_rep(nand->IO_ADDR_R, buf, len / 2); 257 202 } 258 203 259 204 /** ··· 272 217 len >>= 1; 273 218 274 219 while (len--) { 275 - writew(*p++, info->nand.IO_ADDR_W); 220 + iowrite16(*p++, info->nand.IO_ADDR_W); 276 221 277 222 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr + 278 223 GPMC_STATUS) & GPMC_BUF_FULL)) 279 224 ; 280 225 } 281 226 } 227 + 228 + /** 229 + * omap_read_buf_pref - read data from NAND controller into buffer 230 + * @mtd: MTD device structure 231 + * @buf: buffer to store date 232 + * @len: number of bytes to read 233 + */ 234 + static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) 235 + { 236 + struct omap_nand_info *info = container_of(mtd, 237 + struct omap_nand_info, mtd); 238 + uint32_t pfpw_status = 0, r_count = 0; 239 + int ret = 0; 240 + u32 *p = (u32 *)buf; 241 + 242 + /* take care of subpage reads */ 243 + for (; len % 4 != 0; ) { 244 + *buf++ = __raw_readb(info->nand.IO_ADDR_R); 245 + len--; 246 + } 247 + p = (u32 *) buf; 248 + 249 + /* configure and start prefetch transfer */ 250 + ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); 251 + if (ret) { 252 + /* PFPW engine is busy, use cpu copy method */ 253 + if (info->nand.options & NAND_BUSWIDTH_16) 254 + omap_read_buf16(mtd, buf, len); 255 + else 256 + omap_read_buf8(mtd, buf, len); 257 + } else { 258 + do { 259 + pfpw_status = gpmc_prefetch_status(); 260 + r_count = ((pfpw_status >> 24) & 0x7F) >> 2; 261 + ioread32_rep(info->nand_pref_fifo_add, p, r_count); 262 + p += r_count; 263 + len -= r_count << 2; 264 + } while (len); 265 + 266 + /* disable and stop the PFPW engine */ 267 + gpmc_prefetch_reset(); 268 + } 269 + } 270 + 271 + /** 272 + * omap_write_buf_pref - write buffer to NAND controller 273 + * @mtd: MTD device structure 274 + * @buf: data buffer 275 + * @len: number of bytes to write 276 + */ 277 + static void omap_write_buf_pref(struct mtd_info *mtd, 278 + const u_char *buf, int len) 279 + { 280 + struct omap_nand_info *info = container_of(mtd, 281 + struct omap_nand_info, mtd); 282 + uint32_t pfpw_status = 0, w_count = 0; 283 + int i = 0, ret = 0; 284 + u16 *p = (u16 *) buf; 285 + 286 + /* take care of subpage writes */ 287 + if (len % 2 != 0) { 288 + writeb(*buf, info->nand.IO_ADDR_R); 289 + p = (u16 *)(buf + 1); 290 + len--; 291 + } 292 + 293 + /* configure and start prefetch transfer */ 294 + ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); 295 + if (ret) { 296 + /* PFPW engine is busy, use cpu copy method */ 297 + if (info->nand.options & NAND_BUSWIDTH_16) 298 + omap_write_buf16(mtd, buf, len); 299 + else 300 + omap_write_buf8(mtd, buf, len); 301 + } else { 302 + pfpw_status = gpmc_prefetch_status(); 303 + while (pfpw_status & 0x3FFF) { 304 + w_count = ((pfpw_status >> 24) & 0x7F) >> 1; 305 + for (i = 0; (i < w_count) && len; i++, len -= 2) 306 + iowrite16(*p++, info->nand_pref_fifo_add); 307 + pfpw_status = gpmc_prefetch_status(); 308 + } 309 + 310 + /* disable and stop the PFPW engine */ 311 + gpmc_prefetch_reset(); 312 + } 313 + } 314 + 315 + #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA 316 + /* 317 + * omap_nand_dma_cb: callback on the completion of dma transfer 318 + * @lch: logical channel 319 + * @ch_satuts: channel status 320 + * @data: pointer to completion data structure 321 + */ 322 + static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) 323 + { 324 + complete((struct completion *) data); 325 + } 326 + 327 + /* 328 + * omap_nand_dma_transfer: configer and start dma transfer 329 + * @mtd: MTD device structure 330 + * @addr: virtual address in RAM of source/destination 331 + * @len: number of data bytes to be transferred 332 + * @is_write: flag for read/write operation 333 + */ 334 + static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, 335 + unsigned int len, int is_write) 336 + { 337 + struct omap_nand_info *info = container_of(mtd, 338 + struct omap_nand_info, mtd); 339 + uint32_t prefetch_status = 0; 340 + enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 341 + DMA_FROM_DEVICE; 342 + dma_addr_t dma_addr; 343 + int ret; 344 + 345 + /* The fifo depth is 64 bytes. We have a sync at each frame and frame 346 + * length is 64 bytes. 347 + */ 348 + int buf_len = len >> 6; 349 + 350 + if (addr >= high_memory) { 351 + struct page *p1; 352 + 353 + if (((size_t)addr & PAGE_MASK) != 354 + ((size_t)(addr + len - 1) & PAGE_MASK)) 355 + goto out_copy; 356 + p1 = vmalloc_to_page(addr); 357 + if (!p1) 358 + goto out_copy; 359 + addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 360 + } 361 + 362 + dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); 363 + if (dma_mapping_error(&info->pdev->dev, dma_addr)) { 364 + dev_err(&info->pdev->dev, 365 + "Couldn't DMA map a %d byte buffer\n", len); 366 + goto out_copy; 367 + } 368 + 369 + if (is_write) { 370 + omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 371 + info->phys_base, 0, 0); 372 + omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 373 + dma_addr, 0, 0); 374 + omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 375 + 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 376 + OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); 377 + } else { 378 + omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 379 + info->phys_base, 0, 0); 380 + omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 381 + dma_addr, 0, 0); 382 + omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 383 + 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 384 + OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); 385 + } 386 + /* configure and start prefetch transfer */ 387 + ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); 388 + if (ret) 389 + /* PFPW engine is busy, use cpu copy methode */ 390 + goto out_copy; 391 + 392 + init_completion(&info->comp); 393 + 394 + omap_start_dma(info->dma_ch); 395 + 396 + /* setup and start DMA using dma_addr */ 397 + wait_for_completion(&info->comp); 398 + 399 + while (0x3fff & (prefetch_status = gpmc_prefetch_status())) 400 + ; 401 + /* disable and stop the PFPW engine */ 402 + gpmc_prefetch_reset(); 403 + 404 + dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 405 + return 0; 406 + 407 + out_copy: 408 + if (info->nand.options & NAND_BUSWIDTH_16) 409 + is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 410 + : omap_write_buf16(mtd, (u_char *) addr, len); 411 + else 412 + is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len) 413 + : omap_write_buf8(mtd, (u_char *) addr, len); 414 + return 0; 415 + } 416 + #else 417 + static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} 418 + static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, 419 + unsigned int len, int is_write) 420 + { 421 + return 0; 422 + } 423 + #endif 424 + 425 + /** 426 + * omap_read_buf_dma_pref - read data from NAND controller into buffer 427 + * @mtd: MTD device structure 428 + * @buf: buffer to store date 429 + * @len: number of bytes to read 430 + */ 431 + static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len) 432 + { 433 + if (len <= mtd->oobsize) 434 + omap_read_buf_pref(mtd, buf, len); 435 + else 436 + /* start transfer in DMA mode */ 437 + omap_nand_dma_transfer(mtd, buf, len, 0x0); 438 + } 439 + 440 + /** 441 + * omap_write_buf_dma_pref - write buffer to NAND controller 442 + * @mtd: MTD device structure 443 + * @buf: data buffer 444 + * @len: number of bytes to write 445 + */ 446 + static void omap_write_buf_dma_pref(struct mtd_info *mtd, 447 + const u_char *buf, int len) 448 + { 449 + if (len <= mtd->oobsize) 450 + omap_write_buf_pref(mtd, buf, len); 451 + else 452 + /* start transfer in DMA mode */ 453 + omap_nand_dma_transfer(mtd, buf, len, 0x1); 454 + } 455 + 282 456 /** 283 457 * omap_verify_buf - Verify chip data against buffer 284 458 * @mtd: MTD device structure ··· 942 658 err = -ENOMEM; 943 659 goto out_release_mem_region; 944 660 } 661 + 945 662 info->nand.controller = &info->controller; 946 663 947 664 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R; 948 665 info->nand.cmd_ctrl = omap_hwcontrol; 949 - 950 - /* REVISIT: only supports 16-bit NAND flash */ 951 - 952 - info->nand.read_buf = omap_read_buf16; 953 - info->nand.write_buf = omap_write_buf16; 954 - info->nand.verify_buf = omap_verify_buf; 955 666 956 667 /* 957 668 * If RDY/BSY line is connected to OMAP then use the omap ready ··· 967 688 if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000) 968 689 == 0x1000) 969 690 info->nand.options |= NAND_BUSWIDTH_16; 691 + 692 + if (use_prefetch) { 693 + /* copy the virtual address of nand base for fifo access */ 694 + info->nand_pref_fifo_add = info->nand.IO_ADDR_R; 695 + 696 + info->nand.read_buf = omap_read_buf_pref; 697 + info->nand.write_buf = omap_write_buf_pref; 698 + if (use_dma) { 699 + err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 700 + omap_nand_dma_cb, &info->comp, &info->dma_ch); 701 + if (err < 0) { 702 + info->dma_ch = -1; 703 + printk(KERN_WARNING "DMA request failed." 704 + " Non-dma data transfer mode\n"); 705 + } else { 706 + omap_set_dma_dest_burst_mode(info->dma_ch, 707 + OMAP_DMA_DATA_BURST_16); 708 + omap_set_dma_src_burst_mode(info->dma_ch, 709 + OMAP_DMA_DATA_BURST_16); 710 + 711 + info->nand.read_buf = omap_read_buf_dma_pref; 712 + info->nand.write_buf = omap_write_buf_dma_pref; 713 + } 714 + } 715 + } else { 716 + if (info->nand.options & NAND_BUSWIDTH_16) { 717 + info->nand.read_buf = omap_read_buf16; 718 + info->nand.write_buf = omap_write_buf16; 719 + } else { 720 + info->nand.read_buf = omap_read_buf8; 721 + info->nand.write_buf = omap_write_buf8; 722 + } 723 + } 724 + info->nand.verify_buf = omap_verify_buf; 970 725 971 726 #ifdef CONFIG_MTD_NAND_OMAP_HWECC 972 727 info->nand.ecc.bytes = 3; ··· 1057 744 struct omap_nand_info *info = mtd->priv; 1058 745 1059 746 platform_set_drvdata(pdev, NULL); 747 + if (use_dma) 748 + omap_free_dma(info->dma_ch); 749 + 1060 750 /* Release NAND device, its internal structures and partitions */ 1061 751 nand_release(&info->mtd); 1062 - iounmap(info->nand.IO_ADDR_R); 752 + iounmap(info->nand_pref_fifo_add); 1063 753 kfree(&info->mtd); 1064 754 return 0; 1065 755 } ··· 1079 763 static int __init omap_nand_init(void) 1080 764 { 1081 765 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); 766 + 767 + /* This check is required if driver is being 768 + * loaded run time as a module 769 + */ 770 + if ((1 == use_dma) && (0 == use_prefetch)) { 771 + printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " 772 + "without use_prefetch'. Prefetch will not be" 773 + " used in either mode (mpu or dma)\n"); 774 + } 1082 775 return platform_driver_register(&omap_nand_driver); 1083 776 } 1084 777
+1 -2
drivers/mtd/nand/orion_nand.c
··· 171 171 } 172 172 173 173 static struct platform_driver orion_nand_driver = { 174 - .probe = orion_nand_probe, 175 174 .remove = __devexit_p(orion_nand_remove), 176 175 .driver = { 177 176 .name = "orion_nand", ··· 180 181 181 182 static int __init orion_nand_init(void) 182 183 { 183 - return platform_driver_register(&orion_nand_driver); 184 + return platform_driver_probe(&orion_nand_driver, orion_nand_probe); 184 185 } 185 186 186 187 static void __exit orion_nand_exit(void)
+12 -5
drivers/mtd/nand/pxa3xx_nand.c
··· 102 102 ERR_SENDCMD = -2, 103 103 ERR_DBERR = -3, 104 104 ERR_BBERR = -4, 105 + ERR_SBERR = -5, 105 106 }; 106 107 107 108 enum { ··· 565 564 566 565 status = nand_readl(info, NDSR); 567 566 568 - if (status & (NDSR_RDDREQ | NDSR_DBERR)) { 567 + if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) { 569 568 if (status & NDSR_DBERR) 570 569 info->retcode = ERR_DBERR; 570 + else if (status & NDSR_SBERR) 571 + info->retcode = ERR_SBERR; 571 572 572 - disable_int(info, NDSR_RDDREQ | NDSR_DBERR); 573 + disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); 573 574 574 575 if (info->use_dma) { 575 576 info->state = STATE_DMA_READING; ··· 673 670 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) 674 671 break; 675 672 676 - pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR); 673 + pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); 677 674 678 675 /* We only are OOB, so if the data has error, does not matter */ 679 676 if (info->retcode == ERR_DBERR) ··· 690 687 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) 691 688 break; 692 689 693 - pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR); 690 + pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); 694 691 695 692 if (info->retcode == ERR_DBERR) { 696 693 /* for blank page (all 0xff), HW will calculate its ECC as ··· 864 861 * consider it as a ecc error which will tell the caller the 865 862 * read fail We have distinguish all the errors, but the 866 863 * nand_read_ecc only check this function return value 864 + * 865 + * Corrected (single-bit) errors must also be noted. 867 866 */ 868 - if (info->retcode != ERR_NONE) 867 + if (info->retcode == ERR_SBERR) 868 + return 1; 869 + else if (info->retcode != ERR_NONE) 869 870 return -1; 870 871 871 872 return 0;
+2 -3
drivers/mtd/nand/sh_flctl.c
··· 329 329 } 330 330 331 331 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 332 - uint8_t *buf) 332 + uint8_t *buf, int page) 333 333 { 334 334 int i, eccsize = chip->ecc.size; 335 335 int eccbytes = chip->ecc.bytes; ··· 857 857 } 858 858 859 859 static struct platform_driver flctl_driver = { 860 - .probe = flctl_probe, 861 860 .remove = flctl_remove, 862 861 .driver = { 863 862 .name = "sh_flctl", ··· 866 867 867 868 static int __init flctl_nand_init(void) 868 869 { 869 - return platform_driver_register(&flctl_driver); 870 + return platform_driver_probe(&flctl_driver, flctl_probe); 870 871 } 871 872 872 873 static void __exit flctl_nand_cleanup(void)
+16 -1
drivers/mtd/nand/tmio_nand.c
··· 301 301 return 0; 302 302 } 303 303 304 + static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf, 305 + unsigned char *read_ecc, unsigned char *calc_ecc) 306 + { 307 + int r0, r1; 308 + 309 + /* assume ecc.size = 512 and ecc.bytes = 6 */ 310 + r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256); 311 + if (r0 < 0) 312 + return r0; 313 + r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256); 314 + if (r1 < 0) 315 + return r1; 316 + return r0 + r1; 317 + } 318 + 304 319 static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) 305 320 { 306 321 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; ··· 439 424 nand_chip->ecc.bytes = 6; 440 425 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc; 441 426 nand_chip->ecc.calculate = tmio_nand_calculate_ecc; 442 - nand_chip->ecc.correct = nand_correct_data; 427 + nand_chip->ecc.correct = tmio_nand_correct_data; 443 428 444 429 if (data) 445 430 nand_chip->badblock_pattern = data->badblock_pattern;
+47 -5
drivers/mtd/nand/txx9ndfmc.c
··· 189 189 uint8_t *ecc_code) 190 190 { 191 191 struct platform_device *dev = mtd_to_platdev(mtd); 192 + struct nand_chip *chip = mtd->priv; 193 + int eccbytes; 192 194 u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR); 193 195 194 196 mcr &= ~TXX9_NDFMCR_ECC_ALL; 195 197 txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR); 196 198 txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR); 197 - ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR); 198 - ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR); 199 - ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR); 199 + for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) { 200 + ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR); 201 + ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR); 202 + ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR); 203 + ecc_code += 3; 204 + } 200 205 txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR); 201 206 return 0; 207 + } 208 + 209 + static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf, 210 + unsigned char *read_ecc, unsigned char *calc_ecc) 211 + { 212 + struct nand_chip *chip = mtd->priv; 213 + int eccsize; 214 + int corrected = 0; 215 + int stat; 216 + 217 + for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) { 218 + stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256); 219 + if (stat < 0) 220 + return stat; 221 + corrected += stat; 222 + buf += 256; 223 + read_ecc += 3; 224 + calc_ecc += 3; 225 + } 226 + return corrected; 202 227 } 203 228 204 229 static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode) ··· 268 243 269 244 #define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \ 270 245 DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000) 246 + 247 + static int txx9ndfmc_nand_scan(struct mtd_info *mtd) 248 + { 249 + struct nand_chip *chip = mtd->priv; 250 + int ret; 251 + 252 + ret = nand_scan_ident(mtd, 1); 253 + if (!ret) { 254 + if (mtd->writesize >= 512) { 255 + chip->ecc.size = mtd->writesize; 256 + chip->ecc.bytes = 3 * (mtd->writesize / 256); 257 + } 258 + ret = nand_scan_tail(mtd); 259 + } 260 + return ret; 261 + } 271 262 272 263 static int __init txx9ndfmc_probe(struct platform_device *dev) 273 264 { ··· 362 321 chip->cmd_ctrl = txx9ndfmc_cmd_ctrl; 363 322 chip->dev_ready = txx9ndfmc_dev_ready; 364 323 chip->ecc.calculate = txx9ndfmc_calculate_ecc; 365 - chip->ecc.correct = nand_correct_data; 324 + chip->ecc.correct = txx9ndfmc_correct_data; 366 325 chip->ecc.hwctl = txx9ndfmc_enable_hwecc; 367 326 chip->ecc.mode = NAND_ECC_HW; 327 + /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */ 368 328 chip->ecc.size = 256; 369 329 chip->ecc.bytes = 3; 370 330 chip->chip_delay = 100; ··· 391 349 if (plat->wide_mask & (1 << i)) 392 350 chip->options |= NAND_BUSWIDTH_16; 393 351 394 - if (nand_scan(mtd, 1)) { 352 + if (txx9ndfmc_nand_scan(mtd)) { 395 353 kfree(txx9_priv->mtdname); 396 354 kfree(txx9_priv); 397 355 continue;
+382
drivers/mtd/nand/w90p910_nand.c
··· 1 + /* 2 + * Copyright (c) 2009 Nuvoton technology corporation. 3 + * 4 + * Wan ZongShun <mcuos.com@gmail.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation;version 2 of the License. 9 + * 10 + */ 11 + 12 + #include <linux/slab.h> 13 + #include <linux/init.h> 14 + #include <linux/module.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/io.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/delay.h> 19 + #include <linux/clk.h> 20 + #include <linux/err.h> 21 + 22 + #include <linux/mtd/mtd.h> 23 + #include <linux/mtd/nand.h> 24 + #include <linux/mtd/partitions.h> 25 + 26 + #define REG_FMICSR 0x00 27 + #define REG_SMCSR 0xa0 28 + #define REG_SMISR 0xac 29 + #define REG_SMCMD 0xb0 30 + #define REG_SMADDR 0xb4 31 + #define REG_SMDATA 0xb8 32 + 33 + #define RESET_FMI 0x01 34 + #define NAND_EN 0x08 35 + #define READYBUSY (0x01 << 18) 36 + 37 + #define SWRST 0x01 38 + #define PSIZE (0x01 << 3) 39 + #define DMARWEN (0x03 << 1) 40 + #define BUSWID (0x01 << 4) 41 + #define ECC4EN (0x01 << 5) 42 + #define WP (0x01 << 24) 43 + #define NANDCS (0x01 << 25) 44 + #define ENDADDR (0x01 << 31) 45 + 46 + #define read_data_reg(dev) \ 47 + __raw_readl((dev)->reg + REG_SMDATA) 48 + 49 + #define write_data_reg(dev, val) \ 50 + __raw_writel((val), (dev)->reg + REG_SMDATA) 51 + 52 + #define write_cmd_reg(dev, val) \ 53 + __raw_writel((val), (dev)->reg + REG_SMCMD) 54 + 55 + #define write_addr_reg(dev, val) \ 56 + __raw_writel((val), (dev)->reg + REG_SMADDR) 57 + 58 + struct w90p910_nand { 59 + struct mtd_info mtd; 60 + struct nand_chip chip; 61 + void __iomem *reg; 62 + struct clk *clk; 63 + spinlock_t lock; 64 + }; 65 + 66 + static const struct mtd_partition partitions[] = { 67 + { 68 + .name = "NAND FS 0", 69 + .offset = 0, 70 + .size = 8 * 1024 * 1024 71 + }, 72 + { 73 + .name = "NAND FS 1", 74 + .offset = MTDPART_OFS_APPEND, 75 + .size = MTDPART_SIZ_FULL 76 + } 77 + }; 78 + 79 + static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd) 80 + { 81 + unsigned char ret; 82 + struct w90p910_nand *nand; 83 + 84 + nand = container_of(mtd, struct w90p910_nand, mtd); 85 + 86 + ret = (unsigned char)read_data_reg(nand); 87 + 88 + return ret; 89 + } 90 + 91 + static void w90p910_nand_read_buf(struct mtd_info *mtd, 92 + unsigned char *buf, int len) 93 + { 94 + int i; 95 + struct w90p910_nand *nand; 96 + 97 + nand = container_of(mtd, struct w90p910_nand, mtd); 98 + 99 + for (i = 0; i < len; i++) 100 + buf[i] = (unsigned char)read_data_reg(nand); 101 + } 102 + 103 + static void w90p910_nand_write_buf(struct mtd_info *mtd, 104 + const unsigned char *buf, int len) 105 + { 106 + int i; 107 + struct w90p910_nand *nand; 108 + 109 + nand = container_of(mtd, struct w90p910_nand, mtd); 110 + 111 + for (i = 0; i < len; i++) 112 + write_data_reg(nand, buf[i]); 113 + } 114 + 115 + static int w90p910_verify_buf(struct mtd_info *mtd, 116 + const unsigned char *buf, int len) 117 + { 118 + int i; 119 + struct w90p910_nand *nand; 120 + 121 + nand = container_of(mtd, struct w90p910_nand, mtd); 122 + 123 + for (i = 0; i < len; i++) { 124 + if (buf[i] != (unsigned char)read_data_reg(nand)) 125 + return -EFAULT; 126 + } 127 + 128 + return 0; 129 + } 130 + 131 + static int w90p910_check_rb(struct w90p910_nand *nand) 132 + { 133 + unsigned int val; 134 + spin_lock(&nand->lock); 135 + val = __raw_readl(REG_SMISR); 136 + val &= READYBUSY; 137 + spin_unlock(&nand->lock); 138 + 139 + return val; 140 + } 141 + 142 + static int w90p910_nand_devready(struct mtd_info *mtd) 143 + { 144 + struct w90p910_nand *nand; 145 + int ready; 146 + 147 + nand = container_of(mtd, struct w90p910_nand, mtd); 148 + 149 + ready = (w90p910_check_rb(nand)) ? 1 : 0; 150 + return ready; 151 + } 152 + 153 + static void w90p910_nand_command_lp(struct mtd_info *mtd, 154 + unsigned int command, int column, int page_addr) 155 + { 156 + register struct nand_chip *chip = mtd->priv; 157 + struct w90p910_nand *nand; 158 + 159 + nand = container_of(mtd, struct w90p910_nand, mtd); 160 + 161 + if (command == NAND_CMD_READOOB) { 162 + column += mtd->writesize; 163 + command = NAND_CMD_READ0; 164 + } 165 + 166 + write_cmd_reg(nand, command & 0xff); 167 + 168 + if (column != -1 || page_addr != -1) { 169 + 170 + if (column != -1) { 171 + if (chip->options & NAND_BUSWIDTH_16) 172 + column >>= 1; 173 + write_addr_reg(nand, column); 174 + write_addr_reg(nand, column >> 8 | ENDADDR); 175 + } 176 + if (page_addr != -1) { 177 + write_addr_reg(nand, page_addr); 178 + 179 + if (chip->chipsize > (128 << 20)) { 180 + write_addr_reg(nand, page_addr >> 8); 181 + write_addr_reg(nand, page_addr >> 16 | ENDADDR); 182 + } else { 183 + write_addr_reg(nand, page_addr >> 8 | ENDADDR); 184 + } 185 + } 186 + } 187 + 188 + switch (command) { 189 + case NAND_CMD_CACHEDPROG: 190 + case NAND_CMD_PAGEPROG: 191 + case NAND_CMD_ERASE1: 192 + case NAND_CMD_ERASE2: 193 + case NAND_CMD_SEQIN: 194 + case NAND_CMD_RNDIN: 195 + case NAND_CMD_STATUS: 196 + case NAND_CMD_DEPLETE1: 197 + return; 198 + 199 + case NAND_CMD_STATUS_ERROR: 200 + case NAND_CMD_STATUS_ERROR0: 201 + case NAND_CMD_STATUS_ERROR1: 202 + case NAND_CMD_STATUS_ERROR2: 203 + case NAND_CMD_STATUS_ERROR3: 204 + udelay(chip->chip_delay); 205 + return; 206 + 207 + case NAND_CMD_RESET: 208 + if (chip->dev_ready) 209 + break; 210 + udelay(chip->chip_delay); 211 + 212 + write_cmd_reg(nand, NAND_CMD_STATUS); 213 + write_cmd_reg(nand, command); 214 + 215 + while (!w90p910_check_rb(nand)) 216 + ; 217 + 218 + return; 219 + 220 + case NAND_CMD_RNDOUT: 221 + write_cmd_reg(nand, NAND_CMD_RNDOUTSTART); 222 + return; 223 + 224 + case NAND_CMD_READ0: 225 + 226 + write_cmd_reg(nand, NAND_CMD_READSTART); 227 + default: 228 + 229 + if (!chip->dev_ready) { 230 + udelay(chip->chip_delay); 231 + return; 232 + } 233 + } 234 + 235 + /* Apply this short delay always to ensure that we do wait tWB in 236 + * any case on any machine. */ 237 + ndelay(100); 238 + 239 + while (!chip->dev_ready(mtd)) 240 + ; 241 + } 242 + 243 + 244 + static void w90p910_nand_enable(struct w90p910_nand *nand) 245 + { 246 + unsigned int val; 247 + spin_lock(&nand->lock); 248 + __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR)); 249 + 250 + val = __raw_readl(nand->reg + REG_FMICSR); 251 + 252 + if (!(val & NAND_EN)) 253 + __raw_writel(val | NAND_EN, REG_FMICSR); 254 + 255 + val = __raw_readl(nand->reg + REG_SMCSR); 256 + 257 + val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS); 258 + val |= WP; 259 + 260 + __raw_writel(val, nand->reg + REG_SMCSR); 261 + 262 + spin_unlock(&nand->lock); 263 + } 264 + 265 + static int __devinit w90p910_nand_probe(struct platform_device *pdev) 266 + { 267 + struct w90p910_nand *w90p910_nand; 268 + struct nand_chip *chip; 269 + int retval; 270 + struct resource *res; 271 + 272 + retval = 0; 273 + 274 + w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL); 275 + if (!w90p910_nand) 276 + return -ENOMEM; 277 + chip = &(w90p910_nand->chip); 278 + 279 + w90p910_nand->mtd.priv = chip; 280 + w90p910_nand->mtd.owner = THIS_MODULE; 281 + spin_lock_init(&w90p910_nand->lock); 282 + 283 + w90p910_nand->clk = clk_get(&pdev->dev, NULL); 284 + if (IS_ERR(w90p910_nand->clk)) { 285 + retval = -ENOENT; 286 + goto fail1; 287 + } 288 + clk_enable(w90p910_nand->clk); 289 + 290 + chip->cmdfunc = w90p910_nand_command_lp; 291 + chip->dev_ready = w90p910_nand_devready; 292 + chip->read_byte = w90p910_nand_read_byte; 293 + chip->write_buf = w90p910_nand_write_buf; 294 + chip->read_buf = w90p910_nand_read_buf; 295 + chip->verify_buf = w90p910_verify_buf; 296 + chip->chip_delay = 50; 297 + chip->options = 0; 298 + chip->ecc.mode = NAND_ECC_SOFT; 299 + 300 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 301 + if (!res) { 302 + retval = -ENXIO; 303 + goto fail1; 304 + } 305 + 306 + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 307 + retval = -EBUSY; 308 + goto fail1; 309 + } 310 + 311 + w90p910_nand->reg = ioremap(res->start, resource_size(res)); 312 + if (!w90p910_nand->reg) { 313 + retval = -ENOMEM; 314 + goto fail2; 315 + } 316 + 317 + w90p910_nand_enable(w90p910_nand); 318 + 319 + if (nand_scan(&(w90p910_nand->mtd), 1)) { 320 + retval = -ENXIO; 321 + goto fail3; 322 + } 323 + 324 + add_mtd_partitions(&(w90p910_nand->mtd), partitions, 325 + ARRAY_SIZE(partitions)); 326 + 327 + platform_set_drvdata(pdev, w90p910_nand); 328 + 329 + return retval; 330 + 331 + fail3: iounmap(w90p910_nand->reg); 332 + fail2: release_mem_region(res->start, resource_size(res)); 333 + fail1: kfree(w90p910_nand); 334 + return retval; 335 + } 336 + 337 + static int __devexit w90p910_nand_remove(struct platform_device *pdev) 338 + { 339 + struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev); 340 + struct resource *res; 341 + 342 + iounmap(w90p910_nand->reg); 343 + 344 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 345 + release_mem_region(res->start, resource_size(res)); 346 + 347 + clk_disable(w90p910_nand->clk); 348 + clk_put(w90p910_nand->clk); 349 + 350 + kfree(w90p910_nand); 351 + 352 + platform_set_drvdata(pdev, NULL); 353 + 354 + return 0; 355 + } 356 + 357 + static struct platform_driver w90p910_nand_driver = { 358 + .probe = w90p910_nand_probe, 359 + .remove = __devexit_p(w90p910_nand_remove), 360 + .driver = { 361 + .name = "w90p910-fmi", 362 + .owner = THIS_MODULE, 363 + }, 364 + }; 365 + 366 + static int __init w90p910_nand_init(void) 367 + { 368 + return platform_driver_register(&w90p910_nand_driver); 369 + } 370 + 371 + static void __exit w90p910_nand_exit(void) 372 + { 373 + platform_driver_unregister(&w90p910_nand_driver); 374 + } 375 + 376 + module_init(w90p910_nand_init); 377 + module_exit(w90p910_nand_exit); 378 + 379 + MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 380 + MODULE_DESCRIPTION("w90p910 nand driver!"); 381 + MODULE_LICENSE("GPL"); 382 + MODULE_ALIAS("platform:w90p910-fmi");
+10 -11
drivers/mtd/ofpart.c
··· 46 46 const u32 *reg; 47 47 int len; 48 48 49 - /* check if this is a partition node */ 50 - partname = of_get_property(pp, "name", &len); 51 - if (strcmp(partname, "partition") != 0) { 49 + reg = of_get_property(pp, "reg", &len); 50 + if (!reg) { 52 51 nr_parts--; 53 52 continue; 54 53 } 55 54 56 - reg = of_get_property(pp, "reg", &len); 57 - if (!reg || (len != 2 * sizeof(u32))) { 58 - of_node_put(pp); 59 - dev_err(dev, "Invalid 'reg' on %s\n", node->full_name); 60 - kfree(*pparts); 61 - *pparts = NULL; 62 - return -EINVAL; 63 - } 64 55 (*pparts)[i].offset = reg[0]; 65 56 (*pparts)[i].size = reg[1]; 66 57 ··· 64 73 (*pparts)[i].mask_flags = MTD_WRITEABLE; 65 74 66 75 i++; 76 + } 77 + 78 + if (!i) { 79 + of_node_put(pp); 80 + dev_err(dev, "No valid partition found on %s\n", node->full_name); 81 + kfree(*pparts); 82 + *pparts = NULL; 83 + return -EINVAL; 67 84 } 68 85 69 86 return nr_parts;
+1 -2
drivers/mtd/onenand/Kconfig
··· 5 5 menuconfig MTD_ONENAND 6 6 tristate "OneNAND Device Support" 7 7 depends on MTD 8 + select MTD_PARTITIONS 8 9 help 9 10 This enables support for accessing all type of OneNAND flash 10 11 devices. For further information see ··· 24 23 25 24 config MTD_ONENAND_GENERIC 26 25 tristate "OneNAND Flash device via platform device driver" 27 - depends on ARM 28 26 help 29 27 Support for OneNAND flash via platform device driver. 30 28 ··· 66 66 67 67 config MTD_ONENAND_SIM 68 68 tristate "OneNAND simulator support" 69 - depends on MTD_PARTITIONS 70 69 help 71 70 The simulator may simulate various OneNAND flash chips for the 72 71 OneNAND MTD layer.
+14 -10
drivers/mtd/onenand/generic.c
··· 19 19 #include <linux/mtd/mtd.h> 20 20 #include <linux/mtd/onenand.h> 21 21 #include <linux/mtd/partitions.h> 22 - 23 22 #include <asm/io.h> 24 - #include <asm/mach/flash.h> 25 23 26 - #define DRIVER_NAME "onenand" 27 - 24 + /* 25 + * Note: Driver name and platform data format have been updated! 26 + * 27 + * This version of the driver is named "onenand-flash" and takes struct 28 + * onenand_platform_data as platform data. The old ARM-specific version 29 + * with the name "onenand" used to take struct flash_platform_data. 30 + */ 31 + #define DRIVER_NAME "onenand-flash" 28 32 29 33 #ifdef CONFIG_MTD_PARTITIONS 30 34 static const char *part_probes[] = { "cmdlinepart", NULL, }; ··· 43 39 static int __devinit generic_onenand_probe(struct platform_device *pdev) 44 40 { 45 41 struct onenand_info *info; 46 - struct flash_platform_data *pdata = pdev->dev.platform_data; 42 + struct onenand_platform_data *pdata = pdev->dev.platform_data; 47 43 struct resource *res = pdev->resource; 48 - unsigned long size = res->end - res->start + 1; 44 + unsigned long size = resource_size(res); 49 45 int err; 50 46 51 47 info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL); 52 48 if (!info) 53 49 return -ENOMEM; 54 50 55 - if (!request_mem_region(res->start, size, pdev->dev.driver->name)) { 51 + if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) { 56 52 err = -EBUSY; 57 53 goto out_free_info; 58 54 } ··· 63 59 goto out_release_mem_region; 64 60 } 65 61 66 - info->onenand.mmcontrol = pdata->mmcontrol; 62 + info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0; 67 63 info->onenand.irq = platform_get_irq(pdev, 0); 68 64 69 65 info->mtd.name = dev_name(&pdev->dev); ··· 79 75 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 80 76 if (err > 0) 81 77 add_mtd_partitions(&info->mtd, info->parts, err); 82 - else if (err <= 0 && pdata->parts) 78 + else if (err <= 0 && pdata && pdata->parts) 83 79 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); 84 80 else 85 81 #endif ··· 103 99 { 104 100 struct onenand_info *info = platform_get_drvdata(pdev); 105 101 struct resource *res = pdev->resource; 106 - unsigned long size = res->end - res->start + 1; 102 + unsigned long size = resource_size(res); 107 103 108 104 platform_set_drvdata(pdev, NULL); 109 105
+10 -10
drivers/mtd/onenand/onenand_base.c
··· 1191 1191 /* 1192 1192 * Chip boundary handling in DDP 1193 1193 * Now we issued chip 1 read and pointed chip 1 1194 - * bufferam so we have to point chip 0 bufferam. 1194 + * bufferram so we have to point chip 0 bufferram. 1195 1195 */ 1196 1196 if (ONENAND_IS_DDP(this) && 1197 1197 unlikely(from == (this->chipsize >> 1))) { ··· 1867 1867 ONENAND_SET_NEXT_BUFFERRAM(this); 1868 1868 1869 1869 /* 1870 - * 2 PLANE, MLC, and Flex-OneNAND doesn't support 1871 - * write-while-programe feature. 1870 + * 2 PLANE, MLC, and Flex-OneNAND do not support 1871 + * write-while-program feature. 1872 1872 */ 1873 1873 if (!ONENAND_IS_2PLANE(this) && !first) { 1874 1874 ONENAND_SET_PREV_BUFFERRAM(this); ··· 1879 1879 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); 1880 1880 if (ret) { 1881 1881 written -= prevlen; 1882 - printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); 1882 + printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1883 1883 break; 1884 1884 } 1885 1885 ··· 1905 1905 /* In partial page write we don't update bufferram */ 1906 1906 onenand_update_bufferram(mtd, to, !ret && !subpage); 1907 1907 if (ret) { 1908 - printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); 1908 + printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1909 1909 break; 1910 1910 } 1911 1911 ··· 2201 2201 /* Grab the lock and see if the device is available */ 2202 2202 onenand_get_device(mtd, FL_ERASING); 2203 2203 2204 - /* Loop throught the pages */ 2204 + /* Loop through the blocks */ 2205 2205 instr->state = MTD_ERASING; 2206 2206 2207 2207 while (len) { ··· 2328 2328 if (bbm->bbt) 2329 2329 bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 2330 2330 2331 - /* We write two bytes, so we dont have to mess with 16 bit access */ 2331 + /* We write two bytes, so we don't have to mess with 16-bit access */ 2332 2332 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01); 2333 2333 /* FIXME : What to do when marking SLC block in partition 2334 2334 * with MLC erasesize? For now, it is not advisable to ··· 2557 2557 2558 2558 #ifdef CONFIG_MTD_ONENAND_OTP 2559 2559 2560 - /* Interal OTP operation */ 2560 + /* Internal OTP operation */ 2561 2561 typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, 2562 2562 size_t *retlen, u_char *buf); 2563 2563 ··· 2921 2921 this->options |= ONENAND_HAS_2PLANE; 2922 2922 2923 2923 case ONENAND_DEVICE_DENSITY_2Gb: 2924 - /* 2Gb DDP don't have 2 plane */ 2924 + /* 2Gb DDP does not have 2 plane */ 2925 2925 if (!ONENAND_IS_DDP(this)) 2926 2926 this->options |= ONENAND_HAS_2PLANE; 2927 2927 this->options |= ONENAND_HAS_UNLOCK_ALL; ··· 3364 3364 /* It's real page size */ 3365 3365 this->writesize = mtd->writesize; 3366 3366 3367 - /* REVIST: Multichip handling */ 3367 + /* REVISIT: Multichip handling */ 3368 3368 3369 3369 if (FLEXONENAND(this)) 3370 3370 flexonenand_get_size(mtd);
+1 -1
drivers/mtd/tests/mtd_oobtest.c
··· 512 512 goto out; 513 513 514 514 addr0 = 0; 515 - for (i = 0; bbt[i] && i < ebcnt; ++i) 515 + for (i = 0; i < ebcnt && bbt[i]; ++i) 516 516 addr0 += mtd->erasesize; 517 517 518 518 /* Attempt to write off end of OOB */
+6 -6
drivers/mtd/tests/mtd_pagetest.c
··· 116 116 loff_t addr = ebnum * mtd->erasesize; 117 117 118 118 addr0 = 0; 119 - for (i = 0; bbt[i] && i < ebcnt; ++i) 119 + for (i = 0; i < ebcnt && bbt[i]; ++i) 120 120 addr0 += mtd->erasesize; 121 121 122 122 addrn = mtd->size; 123 - for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i) 123 + for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i) 124 124 addrn -= mtd->erasesize; 125 125 126 126 set_random_data(writebuf, mtd->erasesize); ··· 219 219 memset(pp1, 0, pgsize * 4); 220 220 221 221 addr0 = 0; 222 - for (i = 0; bbt[i] && i < ebcnt; ++i) 222 + for (i = 0; i < ebcnt && bbt[i]; ++i) 223 223 addr0 += mtd->erasesize; 224 224 225 225 addrn = mtd->size; 226 - for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i) 226 + for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i) 227 227 addrn -= mtd->erasesize; 228 228 229 229 /* Read 2nd-to-last page to pp1 */ ··· 317 317 318 318 ebnum = 0; 319 319 addr0 = 0; 320 - for (i = 0; bbt[i] && i < ebcnt; ++i) { 320 + for (i = 0; i < ebcnt && bbt[i]; ++i) { 321 321 addr0 += mtd->erasesize; 322 322 ebnum += 1; 323 323 } ··· 413 413 414 414 ebnum = 0; 415 415 addr0 = 0; 416 - for (i = 0; bbt[i] && i < ebcnt; ++i) { 416 + for (i = 0; i < ebcnt && bbt[i]; ++i) { 417 417 addr0 += mtd->erasesize; 418 418 ebnum += 1; 419 419 }
+13 -7
fs/jffs2/background.c
··· 15 15 #include <linux/completion.h> 16 16 #include <linux/sched.h> 17 17 #include <linux/freezer.h> 18 + #include <linux/kthread.h> 18 19 #include "nodelist.h" 19 20 20 21 ··· 32 31 /* This must only ever be called when no GC thread is currently running */ 33 32 int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) 34 33 { 35 - pid_t pid; 34 + struct task_struct *tsk; 36 35 int ret = 0; 37 36 38 37 BUG_ON(c->gc_task); ··· 40 39 init_completion(&c->gc_thread_start); 41 40 init_completion(&c->gc_thread_exit); 42 41 43 - pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES); 44 - if (pid < 0) { 45 - printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid); 42 + tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); 43 + if (IS_ERR(tsk)) { 44 + printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); 46 45 complete(&c->gc_thread_exit); 47 - ret = pid; 46 + ret = PTR_ERR(tsk); 48 47 } else { 49 48 /* Wait for it... */ 50 - D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); 49 + D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid)); 51 50 wait_for_completion(&c->gc_thread_start); 51 + ret = tsk->pid; 52 52 } 53 53 54 54 return ret; ··· 73 71 { 74 72 struct jffs2_sb_info *c = _c; 75 73 76 - daemonize("jffs2_gcd_mtd%d", c->mtd->index); 77 74 allow_signal(SIGKILL); 78 75 allow_signal(SIGSTOP); 79 76 allow_signal(SIGCONT); ··· 107 106 * inode in with read_inode() is much preferable to having 108 107 * the GC thread get there first. */ 109 108 schedule_timeout_interruptible(msecs_to_jiffies(50)); 109 + 110 + if (kthread_should_stop()) { 111 + D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n")); 112 + goto die; 113 + } 110 114 111 115 /* Put_super will send a SIGKILL and then wait on the sem. 112 116 */
+2 -2
fs/jffs2/malloc.c
··· 39 39 40 40 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", 41 41 sizeof(struct jffs2_raw_dirent), 42 - 0, 0, NULL); 42 + 0, SLAB_HWCACHE_ALIGN, NULL); 43 43 if (!raw_dirent_slab) 44 44 goto err; 45 45 46 46 raw_inode_slab = kmem_cache_create("jffs2_raw_inode", 47 47 sizeof(struct jffs2_raw_inode), 48 - 0, 0, NULL); 48 + 0, SLAB_HWCACHE_ALIGN, NULL); 49 49 if (!raw_inode_slab) 50 50 goto err; 51 51
+3 -2
include/linux/mtd/nand.h
··· 121 121 NAND_ECC_SOFT, 122 122 NAND_ECC_HW, 123 123 NAND_ECC_HW_SYNDROME, 124 + NAND_ECC_HW_OOB_FIRST, 124 125 } nand_ecc_modes_t; 125 126 126 127 /* ··· 272 271 uint8_t *calc_ecc); 273 272 int (*read_page_raw)(struct mtd_info *mtd, 274 273 struct nand_chip *chip, 275 - uint8_t *buf); 274 + uint8_t *buf, int page); 276 275 void (*write_page_raw)(struct mtd_info *mtd, 277 276 struct nand_chip *chip, 278 277 const uint8_t *buf); 279 278 int (*read_page)(struct mtd_info *mtd, 280 279 struct nand_chip *chip, 281 - uint8_t *buf); 280 + uint8_t *buf, int page); 282 281 int (*read_subpage)(struct mtd_info *mtd, 283 282 struct nand_chip *chip, 284 283 uint32_t offs, uint32_t len,
+6
include/linux/mtd/nand_ecc.h
··· 21 21 int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); 22 22 23 23 /* 24 + * Detect and correct a 1 bit error for eccsize byte block 25 + */ 26 + int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, 27 + unsigned int eccsize); 28 + 29 + /* 24 30 * Detect and correct a 1 bit error for 256 byte block 25 31 */ 26 32 int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+8
include/linux/mtd/onenand.h
··· 214 214 loff_t onenand_addr(struct onenand_chip *this, int block); 215 215 int flexonenand_region(struct mtd_info *mtd, loff_t addr); 216 216 217 + struct mtd_partition; 218 + 219 + struct onenand_platform_data { 220 + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); 221 + struct mtd_partition *parts; 222 + unsigned int nr_parts; 223 + }; 224 + 217 225 #endif /* __LINUX_MTD_ONENAND_H */
+3
include/linux/mtd/onenand_regs.h
··· 207 207 #define ONENAND_ECC_2BIT (1 << 1) 208 208 #define ONENAND_ECC_2BIT_ALL (0xAAAA) 209 209 #define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) 210 + #define ONENAND_ECC_3BIT (1 << 2) 211 + #define ONENAND_ECC_4BIT (1 << 3) 212 + #define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) 210 213 211 214 /* 212 215 * One-Time Programmable (OTP)