Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-20121219' of git://git.infradead.org/linux-mtd

Pull MTD updates from David Woodhouse:
- Various cleanups especially in NAND tests
- Add support for NAND flash on BCMA bus
- DT support for sh_flctl and denali NAND drivers
- Kill obsolete/superceded drivers (fortunet, nomadik_nand)
- Fix JFFS2 locking bug in ENOMEM failure path
- New SPI flash chips, as usual
- Support writing in 'reliable mode' for DiskOnChip G4
- Debugfs support in nandsim

* tag 'for-linus-20121219' of git://git.infradead.org/linux-mtd: (96 commits)
mtd: nand: typo in nand_id_has_period() comments
mtd: nand/gpio: use io{read,write}*_rep accessors
mtd: block2mtd: throttle writes by calling balance_dirty_pages_ratelimited.
mtd: nand: gpmi: reset BCH earlier, too, to avoid NAND startup problems
mtd: nand/docg4: fix and improve read of factory bbt
mtd: nand/docg4: reserve bb marker area in ecclayout
mtd: nand/docg4: add support for writing in reliable mode
mtd: mxc_nand: reorder part_probes to let cmdline override other sources
mtd: mxc_nand: fix unbalanced clk_disable() in error path
mtd: nandsim: Introduce debugfs infrastructure
mtd: physmap_of: error checking to prevent a NULL pointer dereference
mtg: docg3: potential divide by zero in doc_write_oob()
mtd: bcm47xxnflash: writing support
mtd: tests/read: initialize buffer for whole next page
mtd: at91: atmel_nand: return bit flips for the PMECC read_page()
mtd: fix recovery after failed write-buffer operation in cfi_cmdset_0002.c
mtd: nand: onfi need to be probed in 8 bits mode
mtd: nand: add NAND_BUSWIDTH_AUTO to autodetect bus width
mtd: nand: print flash size during detection
mted: nand_wait_ready timeout fix
...

+2544 -1833
+8
Documentation/devicetree/bindings/arm/davinci/nand.txt
··· 23 23 - ti,davinci-nand-buswidth: buswidth 8 or 16 24 24 - ti,davinci-nand-use-bbt: use flash based bad block table support. 25 25 26 + nand device bindings may contain additional sub-nodes describing 27 + partitions of the address space. See partition.txt for more detail. 28 + 26 29 Example(da850 EVM ): 27 30 nand_cs3@62000000 { 28 31 compatible = "ti,davinci-nand"; ··· 38 35 ti,davinci-ecc-mode = "hw"; 39 36 ti,davinci-ecc-bits = <4>; 40 37 ti,davinci-nand-use-bbt; 38 + 39 + partition@180000 { 40 + label = "ubifs"; 41 + reg = <0x180000 0x7e80000>; 42 + }; 41 43 };
+23
Documentation/devicetree/bindings/mtd/denali-nand.txt
··· 1 + * Denali NAND controller 2 + 3 + Required properties: 4 + - compatible : should be "denali,denali-nand-dt" 5 + - reg : should contain registers location and length for data and reg. 6 + - reg-names: Should contain the reg names "nand_data" and "denali_reg" 7 + - interrupts : The interrupt number. 8 + - dm-mask : DMA bit mask 9 + 10 + The device tree may optionally contain sub-nodes describing partitions of the 11 + address space. See partition.txt for more detail. 12 + 13 + Examples: 14 + 15 + nand: nand@ff900000 { 16 + #address-cells = <1>; 17 + #size-cells = <1>; 18 + compatible = "denali,denali-nand-dt"; 19 + reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; 20 + reg-names = "nand_data", "denali_reg"; 21 + interrupts = <0 144 4>; 22 + dma-mask = <0xffffffff>; 23 + };
+49
Documentation/devicetree/bindings/mtd/flctl-nand.txt
··· 1 + FLCTL NAND controller 2 + 3 + Required properties: 4 + - compatible : "renesas,shmobile-flctl-sh7372" 5 + - reg : Address range of the FLCTL 6 + - interrupts : flste IRQ number 7 + - nand-bus-width : bus width to NAND chip 8 + 9 + Optional properties: 10 + - dmas: DMA specifier(s) 11 + - dma-names: name for each DMA specifier. Valid names are 12 + "data_tx", "data_rx", "ecc_tx", "ecc_rx" 13 + 14 + The DMA fields are not used yet in the driver but are listed here for 15 + completing the bindings. 16 + 17 + The device tree may optionally contain sub-nodes describing partitions of the 18 + address space. See partition.txt for more detail. 19 + 20 + Example: 21 + 22 + flctl@e6a30000 { 23 + #address-cells = <1>; 24 + #size-cells = <1>; 25 + compatible = "renesas,shmobile-flctl-sh7372"; 26 + reg = <0xe6a30000 0x100>; 27 + interrupts = <0x0d80>; 28 + 29 + nand-bus-width = <16>; 30 + 31 + dmas = <&dmac 1 /* data_tx */ 32 + &dmac 2;> /* data_rx */ 33 + dma-names = "data_tx", "data_rx"; 34 + 35 + system@0 { 36 + label = "system"; 37 + reg = <0x0 0x8000000>; 38 + }; 39 + 40 + userdata@8000000 { 41 + label = "userdata"; 42 + reg = <0x8000000 0x10000000>; 43 + }; 44 + 45 + cache@18000000 { 46 + label = "cache"; 47 + reg = <0x18000000 0x8000000>; 48 + }; 49 + };
+5 -7
Documentation/devicetree/bindings/mtd/fsmc-nand.txt
··· 3 3 Required properties: 4 4 - compatible : "st,spear600-fsmc-nand" 5 5 - reg : Address range of the mtd chip 6 - - reg-names: Should contain the reg names "fsmc_regs" and "nand_data" 7 - - st,ale-off : Chip specific offset to ALE 8 - - st,cle-off : Chip specific offset to CLE 6 + - reg-names: Should contain the reg names "fsmc_regs", "nand_data", "nand_addr" and "nand_cmd" 9 7 10 8 Optional properties: 11 9 - bank-width : Width (in bytes) of the device. If not present, the width ··· 17 19 #address-cells = <1>; 18 20 #size-cells = <1>; 19 21 reg = <0xd1800000 0x1000 /* FSMC Register */ 20 - 0xd2000000 0x4000>; /* NAND Base */ 21 - reg-names = "fsmc_regs", "nand_data"; 22 - st,ale-off = <0x20000>; 23 - st,cle-off = <0x10000>; 22 + 0xd2000000 0x0010 /* NAND Base DATA */ 23 + 0xd2020000 0x0010 /* NAND Base ADDR */ 24 + 0xd2010000 0x0010>; /* NAND Base CMD */ 25 + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; 24 26 25 27 bank-width = <1>; 26 28 nand-skip-bbtscan;
+29
Documentation/devicetree/bindings/mtd/m25p80.txt
··· 1 + * MTD SPI driver for ST M25Pxx (and similar) serial flash chips 2 + 3 + Required properties: 4 + - #address-cells, #size-cells : Must be present if the device has sub-nodes 5 + representing partitions. 6 + - compatible : Should be the manufacturer and the name of the chip. Bear in mind 7 + the DT binding is not Linux-only, but in case of Linux, see the 8 + "m25p_ids" table in drivers/mtd/devices/m25p80.c for the list of 9 + supported chips. 10 + - reg : Chip-Select number 11 + - spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at 12 + 13 + Optional properties: 14 + - m25p,fast-read : Use the "fast read" opcode to read data from the chip instead 15 + of the usual "read" opcode. This opcode is not supported by 16 + all chips and support for it can not be detected at runtime. 17 + Refer to your chips' datasheet to check if this is supported 18 + by your chip. 19 + 20 + Example: 21 + 22 + flash: m25p80@0 { 23 + #address-cells = <1>; 24 + #size-cells = <1>; 25 + compatible = "spansion,m25p80"; 26 + reg = <0>; 27 + spi-max-frequency = <40000000>; 28 + m25p,fast-read; 29 + };
+3
Documentation/devicetree/bindings/mtd/mtd-physmap.txt
··· 23 23 unaligned accesses as implemented in the JFFS2 code via memcpy(). 24 24 By defining "no-unaligned-direct-access", the flash will not be 25 25 exposed directly to the MTD users (e.g. JFFS2) any more. 26 + - linux,mtd-name: allow to specify the mtd name for retro capability with 27 + physmap-flash drivers as boot loader pass the mtd partition via the old 28 + device name physmap-flash. 26 29 27 30 For JEDEC compatible devices, the following additional properties 28 31 are defined:
+7 -7
arch/arm/boot/dts/spear13xx.dtsi
··· 73 73 400000 74 74 500000 75 75 600000 >; 76 - status = "disable"; 76 + status = "disabled"; 77 77 }; 78 78 79 79 ahb { ··· 118 118 compatible = "st,spear600-fsmc-nand"; 119 119 #address-cells = <1>; 120 120 #size-cells = <1>; 121 - reg = <0xb0000000 0x1000 /* FSMC Register */ 122 - 0xb0800000 0x0010>; /* NAND Base */ 123 - reg-names = "fsmc_regs", "nand_data"; 121 + reg = <0xb0000000 0x1000 /* FSMC Register*/ 122 + 0xb0800000 0x0010 /* NAND Base DATA */ 123 + 0xb0820000 0x0010 /* NAND Base ADDR */ 124 + 0xb0810000 0x0010>; /* NAND Base CMD */ 125 + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; 124 126 interrupts = <0 20 0x4 125 127 0 21 0x4 126 128 0 22 0x4 127 129 0 23 0x4>; 128 - st,ale-off = <0x20000>; 129 - st,cle-off = <0x10000>; 130 130 st,mode = <2>; 131 131 status = "disabled"; 132 132 }; ··· 144 144 compatible = "st,pcm-audio"; 145 145 #address-cells = <0>; 146 146 #size-cells = <0>; 147 - status = "disable"; 147 + status = "disabled"; 148 148 }; 149 149 150 150 smi: flash@ea000000 {
+4 -4
arch/arm/boot/dts/spear300.dtsi
··· 38 38 #address-cells = <1>; 39 39 #size-cells = <1>; 40 40 reg = <0x94000000 0x1000 /* FSMC Register */ 41 - 0x80000000 0x0010>; /* NAND Base */ 42 - reg-names = "fsmc_regs", "nand_data"; 43 - st,ale-off = <0x20000>; 44 - st,cle-off = <0x10000>; 41 + 0x80000000 0x0010 /* NAND Base DATA */ 42 + 0x80020000 0x0010 /* NAND Base ADDR */ 43 + 0x80010000 0x0010>; /* NAND Base CMD */ 44 + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; 45 45 status = "disabled"; 46 46 }; 47 47
+4 -4
arch/arm/boot/dts/spear310.dtsi
··· 33 33 #address-cells = <1>; 34 34 #size-cells = <1>; 35 35 reg = <0x44000000 0x1000 /* FSMC Register */ 36 - 0x40000000 0x0010>; /* NAND Base */ 37 - reg-names = "fsmc_regs", "nand_data"; 38 - st,ale-off = <0x10000>; 39 - st,cle-off = <0x20000>; 36 + 0x40000000 0x0010 /* NAND Base DATA */ 37 + 0x40020000 0x0010 /* NAND Base ADDR */ 38 + 0x40010000 0x0010>; /* NAND Base CMD */ 39 + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; 40 40 status = "disabled"; 41 41 }; 42 42
+4 -4
arch/arm/boot/dts/spear320.dtsi
··· 40 40 #address-cells = <1>; 41 41 #size-cells = <1>; 42 42 reg = <0x4c000000 0x1000 /* FSMC Register */ 43 - 0x50000000 0x0010>; /* NAND Base */ 44 - reg-names = "fsmc_regs", "nand_data"; 45 - st,ale-off = <0x20000>; 46 - st,cle-off = <0x10000>; 43 + 0x50000000 0x0010 /* NAND Base DATA */ 44 + 0x50020000 0x0010 /* NAND Base ADDR */ 45 + 0x50010000 0x0010>; /* NAND Base CMD */ 46 + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; 47 47 status = "disabled"; 48 48 }; 49 49
+4 -4
arch/arm/boot/dts/spear600.dtsi
··· 76 76 #address-cells = <1>; 77 77 #size-cells = <1>; 78 78 reg = <0xd1800000 0x1000 /* FSMC Register */ 79 - 0xd2000000 0x4000>; /* NAND Base */ 80 - reg-names = "fsmc_regs", "nand_data"; 81 - st,ale-off = <0x20000>; 82 - st,cle-off = <0x10000>; 79 + 0xd2000000 0x0010 /* NAND Base DATA */ 80 + 0xd2020000 0x0010 /* NAND Base ADDR */ 81 + 0xd2010000 0x0010>; /* NAND Base CMD */ 82 + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; 83 83 status = "disabled"; 84 84 }; 85 85
+1 -1
arch/arm/configs/nhk8815_defconfig
··· 57 57 CONFIG_MTD_BLOCK=y 58 58 CONFIG_MTD_NAND=y 59 59 CONFIG_MTD_NAND_ECC_SMC=y 60 - CONFIG_MTD_NAND_NOMADIK=y 60 + CONFIG_MTD_NAND_FSMC=y 61 61 CONFIG_MTD_ONENAND=y 62 62 CONFIG_MTD_ONENAND_VERIFY_WRITE=y 63 63 CONFIG_MTD_ONENAND_GENERIC=y
+39 -32
arch/arm/mach-nomadik/board-nhk8815.c
··· 19 19 #include <linux/gpio.h> 20 20 #include <linux/mtd/mtd.h> 21 21 #include <linux/mtd/nand.h> 22 + #include <linux/mtd/fsmc.h> 22 23 #include <linux/mtd/onenand.h> 23 24 #include <linux/mtd/partitions.h> 24 25 #include <linux/i2c.h> ··· 34 33 #include <asm/mach/arch.h> 35 34 #include <asm/mach/flash.h> 36 35 #include <asm/mach/time.h> 37 - #include <mach/fsmc.h> 38 36 #include <mach/irqs.h> 39 37 40 38 #include "cpu-8815.h" ··· 42 42 #define SRC_CR_INIT_MASK 0x00007fff 43 43 #define SRC_CR_INIT_VAL 0x2aaa8000 44 44 45 + #define ALE_OFF 0x1000000 46 + #define CLE_OFF 0x800000 47 + 45 48 /* These addresses span 16MB, so use three individual pages */ 46 49 static struct resource nhk8815_nand_resources[] = { 47 50 { 51 + .name = "nand_data", 52 + .start = 0x40000000, 53 + .end = 0x40000000 + SZ_16K - 1, 54 + .flags = IORESOURCE_MEM, 55 + }, { 48 56 .name = "nand_addr", 49 - .start = NAND_IO_ADDR, 50 - .end = NAND_IO_ADDR + 0xfff, 57 + .start = 0x40000000 + ALE_OFF, 58 + .end = 0x40000000 +ALE_OFF + SZ_16K - 1, 51 59 .flags = IORESOURCE_MEM, 52 60 }, { 53 61 .name = "nand_cmd", 54 - .start = NAND_IO_CMD, 55 - .end = NAND_IO_CMD + 0xfff, 62 + .start = 0x40000000 + CLE_OFF, 63 + .end = 0x40000000 + CLE_OFF + SZ_16K - 1, 56 64 .flags = IORESOURCE_MEM, 57 65 }, { 58 - .name = "nand_data", 59 - .start = NAND_IO_DATA, 60 - .end = NAND_IO_DATA + 0xfff, 66 + .name = "fsmc_regs", 67 + .start = NOMADIK_FSMC_BASE, 68 + .end = NOMADIK_FSMC_BASE + SZ_4K - 1, 61 69 .flags = IORESOURCE_MEM, 62 - } 70 + }, 63 71 }; 64 - 65 - static int nhk8815_nand_init(void) 66 - { 67 - /* FSMC setup for nand chip select (8-bit nand in 8815NHK) */ 68 - writel(0x0000000E, FSMC_PCR(0)); 69 - writel(0x000D0A00, FSMC_PMEM(0)); 70 - writel(0x00100A00, FSMC_PATT(0)); 71 - 72 - /* enable access to the chip select area */ 73 - writel(readl(FSMC_PCR(0)) | 0x04, FSMC_PCR(0)); 74 - 75 - return 0; 76 - } 77 72 78 73 /* 79 74 * These partitions are the same as those used in the 2.6.20 release ··· 103 108 } 104 109 }; 105 110 106 - static struct nomadik_nand_platform_data nhk8815_nand_data = { 107 - .parts = nhk8815_partitions, 108 - .nparts = ARRAY_SIZE(nhk8815_partitions), 109 - .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING, 110 - .init = nhk8815_nand_init, 111 + static struct fsmc_nand_timings nhk8815_nand_timings = { 112 + .thiz = 0, 113 + .thold = 0x10, 114 + .twait = 0x0A, 115 + .tset = 0, 116 + }; 117 + 118 + static struct fsmc_nand_platform_data nhk8815_nand_platform_data = { 119 + .nand_timings = &nhk8815_nand_timings, 120 + .partitions = nhk8815_partitions, 121 + .nr_partitions = ARRAY_SIZE(nhk8815_partitions), 122 + .width = FSMC_NAND_BW8, 111 123 }; 112 124 113 125 static struct platform_device nhk8815_nand_device = { 114 - .name = "nomadik_nand", 115 - .dev = { 116 - .platform_data = &nhk8815_nand_data, 126 + .name = "fsmc-nand", 127 + .id = -1, 128 + .resource = nhk8815_nand_resources, 129 + .num_resources = ARRAY_SIZE(nhk8815_nand_resources), 130 + .dev = { 131 + .platform_data = &nhk8815_nand_platform_data, 117 132 }, 118 - .resource = nhk8815_nand_resources, 119 - .num_resources = ARRAY_SIZE(nhk8815_nand_resources), 120 133 }; 121 134 122 135 /* These are the partitions for the OneNand device, different from above */ ··· 178 175 .resource = nhk8815_onenand_resource, 179 176 .num_resources = ARRAY_SIZE(nhk8815_onenand_resource), 180 177 }; 178 + 179 + /* bus control reg. and bus timing reg. for CS0..CS3 */ 180 + #define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3)) 181 + #define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04) 181 182 182 183 static void __init nhk8815_onenand_init(void) 183 184 {
-29
arch/arm/mach-nomadik/include/mach/fsmc.h
··· 1 - 2 - /* Definitions for the Nomadik FSMC "Flexible Static Memory controller" */ 3 - 4 - #ifndef __ASM_ARCH_FSMC_H 5 - #define __ASM_ARCH_FSMC_H 6 - 7 - #include <mach/hardware.h> 8 - /* 9 - * Register list 10 - */ 11 - 12 - /* bus control reg. and bus timing reg. for CS0..CS3 */ 13 - #define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3)) 14 - #define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04) 15 - 16 - /* PC-card and NAND: 17 - * PCR = control register 18 - * PMEM = memory timing 19 - * PATT = attribute timing 20 - * PIO = I/O timing 21 - * PECCR = ECC result 22 - */ 23 - #define FSMC_PCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x00) 24 - #define FSMC_PMEM(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x08) 25 - #define FSMC_PATT(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x0c) 26 - #define FSMC_PIO(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x10) 27 - #define FSMC_PECCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x14) 28 - 29 - #endif /* __ASM_ARCH_FSMC_H */
+12 -2
arch/arm/mach-u300/core.c
··· 250 250 */ 251 251 static struct resource fsmc_resources[] = { 252 252 { 253 + .name = "nand_addr", 254 + .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE, 255 + .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE + SZ_16K - 1, 256 + .flags = IORESOURCE_MEM, 257 + }, 258 + { 259 + .name = "nand_cmd", 260 + .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE, 261 + .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE + SZ_16K - 1, 262 + .flags = IORESOURCE_MEM, 263 + }, 264 + { 253 265 .name = "nand_data", 254 266 .start = U300_NAND_CS0_PHYS_BASE, 255 267 .end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1, ··· 1504 1492 .nr_partitions = ARRAY_SIZE(u300_partitions), 1505 1493 .options = NAND_SKIP_BBTSCAN, 1506 1494 .width = FSMC_NAND_BW8, 1507 - .ale_off = PLAT_NAND_ALE, 1508 - .cle_off = PLAT_NAND_CLE, 1509 1495 }; 1510 1496 1511 1497 static struct platform_device nand_device = {
+2 -1
drivers/bcma/driver_chipcommon_pmu.c
··· 13 13 #include <linux/export.h> 14 14 #include <linux/bcma/bcma.h> 15 15 16 - static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) 16 + u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) 17 17 { 18 18 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); 19 19 bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); 20 20 return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); 21 21 } 22 + EXPORT_SYMBOL_GPL(bcma_chipco_pll_read); 22 23 23 24 void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) 24 25 {
+1
drivers/clk/clk-nomadik.c
··· 20 20 clk_register_clkdev(clk, NULL, "gpio.2"); 21 21 clk_register_clkdev(clk, NULL, "gpio.3"); 22 22 clk_register_clkdev(clk, NULL, "rng"); 23 + clk_register_clkdev(clk, NULL, "fsmc-nand"); 23 24 24 25 /* 25 26 * The 2.4 MHz TIMCLK reference clock is active at boot time, this is
+2 -5
drivers/mtd/ar7part.c
··· 26 26 #include <linux/mtd/mtd.h> 27 27 #include <linux/mtd/partitions.h> 28 28 #include <linux/bootmem.h> 29 - #include <linux/magic.h> 30 29 #include <linux/module.h> 30 + 31 + #include <uapi/linux/magic.h> 31 32 32 33 #define AR7_PARTS 4 33 34 #define ROOT_OFFSET 0xe0000 34 35 35 36 #define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42) 36 37 #define LOADER_MAGIC2 le32_to_cpu(0xfeed1281) 37 - 38 - #ifndef SQUASHFS_MAGIC 39 - #define SQUASHFS_MAGIC 0x73717368 40 - #endif 41 38 42 39 struct ar7_bin_rec { 43 40 unsigned int checksum;
+14 -18
drivers/mtd/bcm63xxpart.c
··· 37 37 38 38 #define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */ 39 39 40 - #define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */ 41 - #define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */ 40 + #define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */ 42 41 43 42 #define BCM63XX_CFE_MAGIC_OFFSET 0x4e0 44 43 ··· 78 79 unsigned int rootfsaddr, kerneladdr, spareaddr; 79 80 unsigned int rootfslen, kernellen, sparelen, totallen; 80 81 unsigned int cfelen, nvramlen; 81 - int namelen = 0; 82 + unsigned int cfe_erasesize; 82 83 int i; 83 84 u32 computed_crc; 84 85 bool rootfs_first = false; ··· 86 87 if (bcm63xx_detect_cfe(master)) 87 88 return -EINVAL; 88 89 89 - cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE); 90 - nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE); 90 + cfe_erasesize = max_t(uint32_t, master->erasesize, 91 + BCM63XX_CFE_BLOCK_SIZE); 92 + 93 + cfelen = cfe_erasesize; 94 + nvramlen = cfe_erasesize; 91 95 92 96 /* Allocate memory for buffer */ 93 97 buf = vmalloc(sizeof(struct bcm_tag)); ··· 123 121 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; 124 122 rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE; 125 123 spareaddr = roundup(totallen, master->erasesize) + cfelen; 126 - sparelen = master->size - spareaddr - nvramlen; 127 124 128 125 if (rootfsaddr < kerneladdr) { 129 126 /* default Broadcom layout */ ··· 140 139 rootfslen = 0; 141 140 rootfsaddr = 0; 142 141 spareaddr = cfelen; 143 - sparelen = master->size - cfelen - nvramlen; 144 142 } 143 + sparelen = master->size - spareaddr - nvramlen; 145 144 146 145 /* Determine number of partitions */ 147 - namelen = 8; 148 - if (rootfslen > 0) { 146 + if (rootfslen > 0) 149 147 nrparts++; 150 - namelen += 6; 151 - } 152 - if (kernellen > 0) { 148 + 149 + if (kernellen > 0) 153 150 nrparts++; 154 - namelen += 6; 155 - } 156 151 157 152 /* Ask kernel for more memory */ 158 153 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); ··· 190 193 parts[curpart].name = "nvram"; 191 194 parts[curpart].offset = master->size - nvramlen; 192 195 parts[curpart].size = nvramlen; 196 + curpart++; 193 197 194 198 /* Global partition "linux" to make easy firmware upgrade */ 195 - curpart++; 196 199 parts[curpart].name = "linux"; 197 200 parts[curpart].offset = cfelen; 198 201 parts[curpart].size = master->size - cfelen - nvramlen; 199 202 200 203 for (i = 0; i < nrparts; i++) 201 - pr_info("Partition %d is %s offset %lx and length %lx\n", i, 202 - parts[i].name, (long unsigned int)(parts[i].offset), 203 - (long unsigned int)(parts[i].size)); 204 + pr_info("Partition %d is %s offset %llx and length %llx\n", i, 205 + parts[i].name, parts[i].offset, parts[i].size); 204 206 205 207 pr_info("Spare partition is offset %x and length %x\n", spareaddr, 206 208 sparelen);
+14 -2
drivers/mtd/chips/cfi_cmdset_0002.c
··· 1536 1536 UDELAY(map, chip, adr, 1); 1537 1537 } 1538 1538 1539 - /* reset on all failures. */ 1540 - map_write( map, CMD(0xF0), chip->start ); 1539 + /* 1540 + * Recovery from write-buffer programming failures requires 1541 + * the write-to-buffer-reset sequence. Since the last part 1542 + * of the sequence also works as a normal reset, we can run 1543 + * the same commands regardless of why we are here. 1544 + * See e.g. 1545 + * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1546 + */ 1547 + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1548 + cfi->device_type, NULL); 1549 + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1550 + cfi->device_type, NULL); 1551 + cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 1552 + cfi->device_type, NULL); 1541 1553 xip_enable(map, chip, adr); 1542 1554 /* FIXME - should have reset delay before continuing */ 1543 1555
+49 -44
drivers/mtd/cmdlinepart.c
··· 56 56 57 57 58 58 /* special size referring to all the remaining space in a partition */ 59 - #define SIZE_REMAINING UINT_MAX 60 - #define OFFSET_CONTINUOUS UINT_MAX 59 + #define SIZE_REMAINING ULLONG_MAX 60 + #define OFFSET_CONTINUOUS ULLONG_MAX 61 61 62 62 struct cmdline_mtd_partition { 63 63 struct cmdline_mtd_partition *next; ··· 89 89 int extra_mem_size) 90 90 { 91 91 struct mtd_partition *parts; 92 - unsigned long size, offset = OFFSET_CONTINUOUS; 92 + unsigned long long size, offset = OFFSET_CONTINUOUS; 93 93 char *name; 94 94 int name_len; 95 95 unsigned char *extra_mem; ··· 104 104 } else { 105 105 size = memparse(s, &s); 106 106 if (size < PAGE_SIZE) { 107 - printk(KERN_ERR ERRP "partition size too small (%lx)\n", size); 107 + printk(KERN_ERR ERRP "partition size too small (%llx)\n", 108 + size); 108 109 return ERR_PTR(-EINVAL); 109 110 } 110 111 } ··· 297 296 struct mtd_partition **pparts, 298 297 struct mtd_part_parser_data *data) 299 298 { 300 - unsigned long offset; 299 + unsigned long long offset; 301 300 int i, err; 302 301 struct cmdline_mtd_partition *part; 303 302 const char *mtd_id = master->name; ··· 309 308 return err; 310 309 } 311 310 311 + /* 312 + * Search for the partition definition matching master->name. 313 + * If master->name is not set, stop at first partition definition. 314 + */ 312 315 for (part = partitions; part; part = part->next) { 313 - if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) { 314 - for (i = 0, offset = 0; i < part->num_parts; i++) { 315 - if (part->parts[i].offset == OFFSET_CONTINUOUS) 316 - part->parts[i].offset = offset; 317 - else 318 - offset = part->parts[i].offset; 319 - 320 - if (part->parts[i].size == SIZE_REMAINING) 321 - part->parts[i].size = master->size - offset; 322 - 323 - if (part->parts[i].size == 0) { 324 - printk(KERN_WARNING ERRP 325 - "%s: skipping zero sized partition\n", 326 - part->mtd_id); 327 - part->num_parts--; 328 - memmove(&part->parts[i], 329 - &part->parts[i + 1], 330 - sizeof(*part->parts) * (part->num_parts - i)); 331 - continue; 332 - } 333 - 334 - if (offset + part->parts[i].size > master->size) { 335 - printk(KERN_WARNING ERRP 336 - "%s: partitioning exceeds flash size, truncating\n", 337 - part->mtd_id); 338 - part->parts[i].size = master->size - offset; 339 - } 340 - offset += part->parts[i].size; 341 - } 342 - 343 - *pparts = kmemdup(part->parts, 344 - sizeof(*part->parts) * part->num_parts, 345 - GFP_KERNEL); 346 - if (!*pparts) 347 - return -ENOMEM; 348 - 349 - return part->num_parts; 350 - } 316 + if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) 317 + break; 351 318 } 352 319 353 - return 0; 320 + if (!part) 321 + return 0; 322 + 323 + for (i = 0, offset = 0; i < part->num_parts; i++) { 324 + if (part->parts[i].offset == OFFSET_CONTINUOUS) 325 + part->parts[i].offset = offset; 326 + else 327 + offset = part->parts[i].offset; 328 + 329 + if (part->parts[i].size == SIZE_REMAINING) 330 + part->parts[i].size = master->size - offset; 331 + 332 + if (part->parts[i].size == 0) { 333 + printk(KERN_WARNING ERRP 334 + "%s: skipping zero sized partition\n", 335 + part->mtd_id); 336 + part->num_parts--; 337 + memmove(&part->parts[i], &part->parts[i + 1], 338 + sizeof(*part->parts) * (part->num_parts - i)); 339 + continue; 340 + } 341 + 342 + if (offset + part->parts[i].size > master->size) { 343 + printk(KERN_WARNING ERRP 344 + "%s: partitioning exceeds flash size, truncating\n", 345 + part->mtd_id); 346 + part->parts[i].size = master->size - offset; 347 + } 348 + offset += part->parts[i].size; 349 + } 350 + 351 + *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts, 352 + GFP_KERNEL); 353 + if (!*pparts) 354 + return -ENOMEM; 355 + 356 + return part->num_parts; 354 357 } 355 358 356 359
+2 -2
drivers/mtd/devices/bcm47xxsflash.c
··· 66 66 return err; 67 67 } 68 68 69 - static int __devexit bcm47xxsflash_remove(struct platform_device *pdev) 69 + static int bcm47xxsflash_remove(struct platform_device *pdev) 70 70 { 71 71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 72 72 ··· 77 77 } 78 78 79 79 static struct platform_driver bcma_sflash_driver = { 80 - .remove = __devexit_p(bcm47xxsflash_remove), 80 + .remove = bcm47xxsflash_remove, 81 81 .driver = { 82 82 .name = "bcma_sflash", 83 83 .owner = THIS_MODULE,
+3 -1
drivers/mtd/devices/block2mtd.c
··· 62 62 memset(page_address(page), 0xff, PAGE_SIZE); 63 63 set_page_dirty(page); 64 64 unlock_page(page); 65 + balance_dirty_pages_ratelimited(mapping); 65 66 break; 66 67 } 67 68 ··· 153 152 memcpy(page_address(page) + offset, buf, cpylen); 154 153 set_page_dirty(page); 155 154 unlock_page(page); 155 + balance_dirty_pages_ratelimited(mapping); 156 156 } 157 157 page_cache_release(page); 158 158 ··· 435 433 } 436 434 437 435 438 - static void __devexit block2mtd_exit(void) 436 + static void block2mtd_exit(void) 439 437 { 440 438 struct list_head *pos, *next; 441 439
+1 -1
drivers/mtd/devices/docg3.c
··· 1440 1440 oobdelta = mtd->ecclayout->oobavail; 1441 1441 break; 1442 1442 default: 1443 - oobdelta = 0; 1443 + return -EINVAL; 1444 1444 } 1445 1445 if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) || 1446 1446 (ofs % DOC_LAYOUT_PAGE_SIZE))
-2
drivers/mtd/devices/docprobe.c
··· 70 70 0xe0000, 0xe2000, 0xe4000, 0xe6000, 71 71 0xe8000, 0xea000, 0xec000, 0xee000, 72 72 #endif /* CONFIG_MTD_DOCPROBE_HIGH */ 73 - #else 74 - #warning Unknown architecture for DiskOnChip. No default probe locations defined 75 73 #endif 76 74 0xffffffff }; 77 75
+30 -18
drivers/mtd/devices/m25p80.c
··· 73 73 #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 74 74 #define MAX_CMD_SIZE 5 75 75 76 - #ifdef CONFIG_M25PXX_USE_FAST_READ 77 - #define OPCODE_READ OPCODE_FAST_READ 78 - #define FAST_READ_DUMMY_BYTE 1 79 - #else 80 - #define OPCODE_READ OPCODE_NORM_READ 81 - #define FAST_READ_DUMMY_BYTE 0 82 - #endif 83 - 84 76 #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) 85 77 86 78 /****************************************************************************/ ··· 85 93 u16 addr_width; 86 94 u8 erase_opcode; 87 95 u8 *command; 96 + bool fast_read; 88 97 }; 89 98 90 99 static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) ··· 161 168 { 162 169 switch (JEDEC_MFR(jedec_id)) { 163 170 case CFI_MFR_MACRONIX: 171 + case 0xEF /* winbond */: 164 172 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; 165 173 return spi_write(flash->spi, flash->command, 1); 166 174 default: ··· 336 342 struct m25p *flash = mtd_to_m25p(mtd); 337 343 struct spi_transfer t[2]; 338 344 struct spi_message m; 345 + uint8_t opcode; 339 346 340 347 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 341 348 __func__, (u32)from, len); ··· 349 354 * Should add 1 byte DUMMY_BYTE. 350 355 */ 351 356 t[0].tx_buf = flash->command; 352 - t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE; 357 + t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0); 353 358 spi_message_add_tail(&t[0], &m); 354 359 355 360 t[1].rx_buf = buf; ··· 371 376 */ 372 377 373 378 /* Set up the write data buffer. */ 374 - flash->command[0] = OPCODE_READ; 379 + opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ; 380 + flash->command[0] = opcode; 375 381 m25p_addr2cmd(flash, from, flash->command); 376 382 377 383 spi_sync(flash->spi, &m); 378 384 379 - *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE; 385 + *retlen = m.actual_length - m25p_cmdsz(flash) - 386 + (flash->fast_read ? 1 : 0); 380 387 381 388 mutex_unlock(&flash->lock); 382 389 ··· 661 664 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, 662 665 663 666 /* Micron */ 664 - { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, 667 + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, 668 + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, 665 669 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, 666 670 667 671 /* Spansion -- single (large) sector size only, at least ··· 743 745 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 744 746 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 745 747 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, 748 + { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, 749 + { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, 746 750 747 751 /* Catalyst / On Semiconductor -- non-JEDEC */ 748 752 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, ··· 756 756 }; 757 757 MODULE_DEVICE_TABLE(spi, m25p_ids); 758 758 759 - static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) 759 + static const struct spi_device_id *jedec_probe(struct spi_device *spi) 760 760 { 761 761 int tmp; 762 762 u8 code = OPCODE_RDID; ··· 801 801 * matches what the READ command supports, at least until this driver 802 802 * understands FAST_READ (for clocks over 25 MHz). 803 803 */ 804 - static int __devinit m25p_probe(struct spi_device *spi) 804 + static int m25p_probe(struct spi_device *spi) 805 805 { 806 806 const struct spi_device_id *id = spi_get_device_id(spi); 807 807 struct flash_platform_data *data; ··· 809 809 struct flash_info *info; 810 810 unsigned i; 811 811 struct mtd_part_parser_data ppdata; 812 + struct device_node __maybe_unused *np = spi->dev.of_node; 812 813 813 814 #ifdef CONFIG_MTD_OF_PARTS 814 - if (!of_device_is_available(spi->dev.of_node)) 815 + if (!of_device_is_available(np)) 815 816 return -ENODEV; 816 817 #endif 817 818 ··· 864 863 flash = kzalloc(sizeof *flash, GFP_KERNEL); 865 864 if (!flash) 866 865 return -ENOMEM; 867 - flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL); 866 + flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0), 867 + GFP_KERNEL); 868 868 if (!flash->command) { 869 869 kfree(flash); 870 870 return -ENOMEM; ··· 922 920 flash->page_size = info->page_size; 923 921 flash->mtd.writebufsize = flash->page_size; 924 922 923 + flash->fast_read = false; 924 + #ifdef CONFIG_OF 925 + if (np && of_property_read_bool(np, "m25p,fast-read")) 926 + flash->fast_read = true; 927 + #endif 928 + 929 + #ifdef CONFIG_M25PXX_USE_FAST_READ 930 + flash->fast_read = true; 931 + #endif 932 + 925 933 if (info->addr_width) 926 934 flash->addr_width = info->addr_width; 927 935 else { ··· 973 961 } 974 962 975 963 976 - static int __devexit m25p_remove(struct spi_device *spi) 964 + static int m25p_remove(struct spi_device *spi) 977 965 { 978 966 struct m25p *flash = dev_get_drvdata(&spi->dev); 979 967 int status; ··· 995 983 }, 996 984 .id_table = m25p_ids, 997 985 .probe = m25p_probe, 998 - .remove = __devexit_p(m25p_remove), 986 + .remove = m25p_remove, 999 987 1000 988 /* REVISIT: many of these chips have deep power-down modes, which 1001 989 * should clearly be entered on suspend() to minimize power use.
+7 -7
drivers/mtd/devices/mtd_dataflash.c
··· 618 618 /* 619 619 * Register DataFlash device with MTD subsystem. 620 620 */ 621 - static int __devinit 621 + static int 622 622 add_dataflash_otp(struct spi_device *spi, char *name, 623 623 int nr_pages, int pagesize, int pageoffset, char revision) 624 624 { ··· 679 679 return err; 680 680 } 681 681 682 - static inline int __devinit 682 + static inline int 683 683 add_dataflash(struct spi_device *spi, char *name, 684 684 int nr_pages, int pagesize, int pageoffset) 685 685 { ··· 705 705 #define IS_POW2PS 0x0001 /* uses 2^N byte pages */ 706 706 }; 707 707 708 - static struct flash_info __devinitdata dataflash_data [] = { 708 + static struct flash_info dataflash_data[] = { 709 709 710 710 /* 711 711 * NOTE: chips with SUP_POW2PS (rev D and up) need two entries, ··· 740 740 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, 741 741 }; 742 742 743 - static struct flash_info *__devinit jedec_probe(struct spi_device *spi) 743 + static struct flash_info *jedec_probe(struct spi_device *spi) 744 744 { 745 745 int tmp; 746 746 uint8_t code = OP_READ_ID; ··· 823 823 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11 824 824 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11 825 825 */ 826 - static int __devinit dataflash_probe(struct spi_device *spi) 826 + static int dataflash_probe(struct spi_device *spi) 827 827 { 828 828 int status; 829 829 struct flash_info *info; ··· 897 897 return status; 898 898 } 899 899 900 - static int __devexit dataflash_remove(struct spi_device *spi) 900 + static int dataflash_remove(struct spi_device *spi) 901 901 { 902 902 struct dataflash *flash = dev_get_drvdata(&spi->dev); 903 903 int status; ··· 920 920 }, 921 921 922 922 .probe = dataflash_probe, 923 - .remove = __devexit_p(dataflash_remove), 923 + .remove = dataflash_remove, 924 924 925 925 /* FIXME: investigate suspend and resume... */ 926 926 };
+6 -17
drivers/mtd/devices/spear_smi.c
··· 756 756 757 757 758 758 #ifdef CONFIG_OF 759 - static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev, 759 + static int spear_smi_probe_config_dt(struct platform_device *pdev, 760 760 struct device_node *np) 761 761 { 762 762 struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev); ··· 799 799 return 0; 800 800 } 801 801 #else 802 - static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev, 802 + static int spear_smi_probe_config_dt(struct platform_device *pdev, 803 803 struct device_node *np) 804 804 { 805 805 return -ENOSYS; ··· 901 901 * and do proper init for any found one. 902 902 * Returns 0 on success, non zero otherwise 903 903 */ 904 - static int __devinit spear_smi_probe(struct platform_device *pdev) 904 + static int spear_smi_probe(struct platform_device *pdev) 905 905 { 906 906 struct device_node *np = pdev->dev.of_node; 907 907 struct spear_smi_plat_data *pdata = NULL; ··· 1016 1016 * 1017 1017 * free all allocations and delete the partitions. 1018 1018 */ 1019 - static int __devexit spear_smi_remove(struct platform_device *pdev) 1019 + static int spear_smi_remove(struct platform_device *pdev) 1020 1020 { 1021 1021 struct spear_smi *dev; 1022 1022 struct spear_snor_flash *flash; ··· 1092 1092 #endif 1093 1093 }, 1094 1094 .probe = spear_smi_probe, 1095 - .remove = __devexit_p(spear_smi_remove), 1095 + .remove = spear_smi_remove, 1096 1096 }; 1097 - 1098 - static int spear_smi_init(void) 1099 - { 1100 - return platform_driver_register(&spear_smi_driver); 1101 - } 1102 - module_init(spear_smi_init); 1103 - 1104 - static void spear_smi_exit(void) 1105 - { 1106 - platform_driver_unregister(&spear_smi_driver); 1107 - } 1108 - module_exit(spear_smi_exit); 1097 + module_platform_driver(spear_smi_driver); 1109 1098 1110 1099 MODULE_LICENSE("GPL"); 1111 1100 MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
+5 -5
drivers/mtd/devices/sst25l.c
··· 64 64 65 65 #define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) 66 66 67 - static struct flash_info __devinitdata sst25l_flash_info[] = { 67 + static struct flash_info sst25l_flash_info[] = { 68 68 {"sst25lf020a", 0xbf43, 256, 1024, 4096}, 69 69 {"sst25lf040a", 0xbf44, 256, 2048, 4096}, 70 70 }; ··· 313 313 return ret; 314 314 } 315 315 316 - static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi) 316 + static struct flash_info *sst25l_match_device(struct spi_device *spi) 317 317 { 318 318 struct flash_info *flash_info = NULL; 319 319 struct spi_message m; ··· 353 353 return flash_info; 354 354 } 355 355 356 - static int __devinit sst25l_probe(struct spi_device *spi) 356 + static int sst25l_probe(struct spi_device *spi) 357 357 { 358 358 struct flash_info *flash_info; 359 359 struct sst25l_flash *flash; ··· 411 411 return 0; 412 412 } 413 413 414 - static int __devexit sst25l_remove(struct spi_device *spi) 414 + static int sst25l_remove(struct spi_device *spi) 415 415 { 416 416 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); 417 417 int ret; ··· 428 428 .owner = THIS_MODULE, 429 429 }, 430 430 .probe = sst25l_probe, 431 - .remove = __devexit_p(sst25l_remove), 431 + .remove = sst25l_remove, 432 432 }; 433 433 434 434 module_spi_driver(sst25l_driver);
-7
drivers/mtd/maps/Kconfig
··· 358 358 IXP2000 based board and would like to use the flash chips on it, 359 359 say 'Y'. 360 360 361 - config MTD_FORTUNET 362 - tristate "CFI Flash device mapped on the FortuNet board" 363 - depends on MTD_CFI && SA1100_FORTUNET 364 - help 365 - This enables access to the Flash on the FortuNet board. If you 366 - have such a board, say 'Y'. 367 - 368 361 config MTD_AUTCPU12 369 362 bool "NV-RAM mapping AUTCPU12 board" 370 363 depends on ARCH_AUTCPU12
-1
drivers/mtd/maps/Makefile
··· 39 39 obj-$(CONFIG_MTD_PCI) += pci.o 40 40 obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 41 41 obj-$(CONFIG_MTD_IMPA7) += impa7.o 42 - obj-$(CONFIG_MTD_FORTUNET) += fortunet.o 43 42 obj-$(CONFIG_MTD_UCLINUX) += uclinux.o 44 43 obj-$(CONFIG_MTD_NETtel) += nettel.o 45 44 obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
+3 -4
drivers/mtd/maps/amd76xrom.c
··· 100 100 } 101 101 102 102 103 - static int __devinit amd76xrom_init_one (struct pci_dev *pdev, 104 - const struct pci_device_id *ent) 103 + static int amd76xrom_init_one(struct pci_dev *pdev, 104 + const struct pci_device_id *ent) 105 105 { 106 106 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 107 107 u8 byte; ··· 289 289 } 290 290 291 291 292 - static void __devexit amd76xrom_remove_one (struct pci_dev *pdev) 292 + static void amd76xrom_remove_one(struct pci_dev *pdev) 293 293 { 294 294 struct amd76xrom_window *window = &amd76xrom_window; 295 295 ··· 347 347 MODULE_LICENSE("GPL"); 348 348 MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>"); 349 349 MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge"); 350 -
+3 -3
drivers/mtd/maps/autcpu12-nvram.c
··· 33 33 struct map_info map; 34 34 }; 35 35 36 - static int __devinit autcpu12_nvram_probe(struct platform_device *pdev) 36 + static int autcpu12_nvram_probe(struct platform_device *pdev) 37 37 { 38 38 map_word tmp, save0, save1; 39 39 struct resource *res; ··· 105 105 return -ENOMEM; 106 106 } 107 107 108 - static int __devexit autcpu12_nvram_remove(struct platform_device *pdev) 108 + static int autcpu12_nvram_remove(struct platform_device *pdev) 109 109 { 110 110 struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev); 111 111 ··· 121 121 .owner = THIS_MODULE, 122 122 }, 123 123 .probe = autcpu12_nvram_probe, 124 - .remove = __devexit_p(autcpu12_nvram_remove), 124 + .remove = autcpu12_nvram_remove, 125 125 }; 126 126 module_platform_driver(autcpu12_nvram_driver); 127 127
+5 -4
drivers/mtd/maps/bfin-async-flash.c
··· 30 30 #include <linux/io.h> 31 31 #include <asm/unaligned.h> 32 32 33 - #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 33 + #define pr_devinit(fmt, args...) \ 34 + ({ static const char __fmt[] = fmt; printk(__fmt, ## args); }) 34 35 35 36 #define DRIVER_NAME "bfin-async-flash" 36 37 ··· 124 123 125 124 static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 126 125 127 - static int __devinit bfin_flash_probe(struct platform_device *pdev) 126 + static int bfin_flash_probe(struct platform_device *pdev) 128 127 { 129 128 int ret; 130 129 struct physmap_flash_data *pdata = pdev->dev.platform_data; ··· 173 172 return 0; 174 173 } 175 174 176 - static int __devexit bfin_flash_remove(struct platform_device *pdev) 175 + static int bfin_flash_remove(struct platform_device *pdev) 177 176 { 178 177 struct async_state *state = platform_get_drvdata(pdev); 179 178 gpio_free(state->enet_flash_pin); ··· 185 184 186 185 static struct platform_driver bfin_flash_driver = { 187 186 .probe = bfin_flash_probe, 188 - .remove = __devexit_p(bfin_flash_remove), 187 + .remove = bfin_flash_remove, 189 188 .driver = { 190 189 .name = DRIVER_NAME, 191 190 },
+3 -3
drivers/mtd/maps/ck804xrom.c
··· 112 112 } 113 113 114 114 115 - static int __devinit ck804xrom_init_one (struct pci_dev *pdev, 116 - const struct pci_device_id *ent) 115 + static int ck804xrom_init_one(struct pci_dev *pdev, 116 + const struct pci_device_id *ent) 117 117 { 118 118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 119 119 u8 byte; ··· 320 320 } 321 321 322 322 323 - static void __devexit ck804xrom_remove_one (struct pci_dev *pdev) 323 + static void ck804xrom_remove_one(struct pci_dev *pdev) 324 324 { 325 325 struct ck804xrom_window *window = &ck804xrom_window; 326 326
+3 -3
drivers/mtd/maps/esb2rom.c
··· 144 144 pci_dev_put(window->pdev); 145 145 } 146 146 147 - static int __devinit esb2rom_init_one(struct pci_dev *pdev, 147 + static int esb2rom_init_one(struct pci_dev *pdev, 148 148 const struct pci_device_id *ent) 149 149 { 150 150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; ··· 378 378 return 0; 379 379 } 380 380 381 - static void __devexit esb2rom_remove_one (struct pci_dev *pdev) 381 + static void esb2rom_remove_one(struct pci_dev *pdev) 382 382 { 383 383 struct esb2rom_window *window = &esb2rom_window; 384 384 esb2rom_cleanup(window); 385 385 } 386 386 387 - static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = { 387 + static struct pci_device_id esb2rom_pci_tbl[] = { 388 388 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, 389 389 PCI_ANY_ID, PCI_ANY_ID, }, 390 390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
-277
drivers/mtd/maps/fortunet.c
··· 1 - /* fortunet.c memory map 2 - * 3 - */ 4 - 5 - #include <linux/module.h> 6 - #include <linux/types.h> 7 - #include <linux/kernel.h> 8 - #include <linux/init.h> 9 - #include <linux/string.h> 10 - 11 - #include <linux/mtd/mtd.h> 12 - #include <linux/mtd/map.h> 13 - #include <linux/mtd/partitions.h> 14 - 15 - #include <asm/io.h> 16 - 17 - #define MAX_NUM_REGIONS 4 18 - #define MAX_NUM_PARTITIONS 8 19 - 20 - #define DEF_WINDOW_ADDR_PHY 0x00000000 21 - #define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes 22 - 23 - #define MTD_FORTUNET_PK "MTD FortuNet: " 24 - 25 - #define MAX_NAME_SIZE 128 26 - 27 - struct map_region 28 - { 29 - int window_addr_physical; 30 - int altbankwidth; 31 - struct map_info map_info; 32 - struct mtd_info *mymtd; 33 - struct mtd_partition parts[MAX_NUM_PARTITIONS]; 34 - char map_name[MAX_NAME_SIZE]; 35 - char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE]; 36 - }; 37 - 38 - static struct map_region map_regions[MAX_NUM_REGIONS]; 39 - static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0}; 40 - static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0}; 41 - 42 - 43 - 44 - struct map_info default_map = { 45 - .size = DEF_WINDOW_SIZE, 46 - .bankwidth = 4, 47 - }; 48 - 49 - static char * __init get_string_option(char *dest,int dest_size,char *sor) 50 - { 51 - if(!dest_size) 52 - return sor; 53 - dest_size--; 54 - while(*sor) 55 - { 56 - if(*sor==',') 57 - { 58 - sor++; 59 - break; 60 - } 61 - else if(*sor=='\"') 62 - { 63 - sor++; 64 - while(*sor) 65 - { 66 - if(*sor=='\"') 67 - { 68 - sor++; 69 - break; 70 - } 71 - *dest = *sor; 72 - dest++; 73 - sor++; 74 - dest_size--; 75 - if(!dest_size) 76 - { 77 - *dest = 0; 78 - return sor; 79 - } 80 - } 81 - } 82 - else 83 - { 84 - *dest = *sor; 85 - dest++; 86 - sor++; 87 - dest_size--; 88 - if(!dest_size) 89 - { 90 - *dest = 0; 91 - return sor; 92 - } 93 - } 94 - } 95 - *dest = 0; 96 - return sor; 97 - } 98 - 99 - static int __init MTD_New_Region(char *line) 100 - { 101 - char string[MAX_NAME_SIZE]; 102 - int params[6]; 103 - get_options (get_string_option(string,sizeof(string),line),6,params); 104 - if(params[0]<1) 105 - { 106 - printk(MTD_FORTUNET_PK "Bad parameters for MTD Region " 107 - " name,region-number[,base,size,bankwidth,altbankwidth]\n"); 108 - return 1; 109 - } 110 - if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS)) 111 - { 112 - printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n", 113 - params[1],MAX_NUM_REGIONS-1); 114 - return 1; 115 - } 116 - memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]])); 117 - memcpy(&map_regions[params[1]].map_info, 118 - &default_map,sizeof(map_regions[params[1]].map_info)); 119 - map_regions_set[params[1]] = 1; 120 - map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY; 121 - map_regions[params[1]].altbankwidth = 2; 122 - map_regions[params[1]].mymtd = NULL; 123 - map_regions[params[1]].map_info.name = map_regions[params[1]].map_name; 124 - strcpy(map_regions[params[1]].map_info.name,string); 125 - if(params[0]>1) 126 - { 127 - map_regions[params[1]].window_addr_physical = params[2]; 128 - } 129 - if(params[0]>2) 130 - { 131 - map_regions[params[1]].map_info.size = params[3]; 132 - } 133 - if(params[0]>3) 134 - { 135 - map_regions[params[1]].map_info.bankwidth = params[4]; 136 - } 137 - if(params[0]>4) 138 - { 139 - map_regions[params[1]].altbankwidth = params[5]; 140 - } 141 - return 1; 142 - } 143 - 144 - static int __init MTD_New_Partition(char *line) 145 - { 146 - char string[MAX_NAME_SIZE]; 147 - int params[4]; 148 - get_options (get_string_option(string,sizeof(string),line),4,params); 149 - if(params[0]<3) 150 - { 151 - printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition " 152 - " name,region-number,size,offset\n"); 153 - return 1; 154 - } 155 - if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS)) 156 - { 157 - printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n", 158 - params[1],MAX_NUM_REGIONS-1); 159 - return 1; 160 - } 161 - if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS) 162 - { 163 - printk(MTD_FORTUNET_PK "Out of space for partition in this region\n"); 164 - return 1; 165 - } 166 - map_regions[params[1]].parts[map_regions_parts[params[1]]].name = 167 - map_regions[params[1]]. parts_name[map_regions_parts[params[1]]]; 168 - strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string); 169 - map_regions[params[1]].parts[map_regions_parts[params[1]]].size = 170 - params[2]; 171 - map_regions[params[1]].parts[map_regions_parts[params[1]]].offset = 172 - params[3]; 173 - map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0; 174 - map_regions_parts[params[1]]++; 175 - return 1; 176 - } 177 - 178 - __setup("MTD_Region=", MTD_New_Region); 179 - __setup("MTD_Partition=", MTD_New_Partition); 180 - 181 - /* Backwards-spelling-compatibility */ 182 - __setup("MTD_Partion=", MTD_New_Partition); 183 - 184 - static int __init init_fortunet(void) 185 - { 186 - int ix,iy; 187 - for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++) 188 - { 189 - if(map_regions_parts[ix]&&(!map_regions_set[ix])) 190 - { 191 - printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n", 192 - ix); 193 - memset(&map_regions[ix],0,sizeof(map_regions[ix])); 194 - memcpy(&map_regions[ix].map_info,&default_map, 195 - sizeof(map_regions[ix].map_info)); 196 - map_regions_set[ix] = 1; 197 - map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY; 198 - map_regions[ix].altbankwidth = 2; 199 - map_regions[ix].mymtd = NULL; 200 - map_regions[ix].map_info.name = map_regions[ix].map_name; 201 - strcpy(map_regions[ix].map_info.name,"FORTUNET"); 202 - } 203 - if(map_regions_set[ix]) 204 - { 205 - iy++; 206 - printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically " 207 - " address %x size %x\n", 208 - map_regions[ix].map_info.name, 209 - map_regions[ix].window_addr_physical, 210 - map_regions[ix].map_info.size); 211 - 212 - map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical, 213 - 214 - map_regions[ix].map_info.virt = 215 - ioremap_nocache( 216 - map_regions[ix].window_addr_physical, 217 - map_regions[ix].map_info.size); 218 - if(!map_regions[ix].map_info.virt) 219 - { 220 - int j = 0; 221 - printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n", 222 - map_regions[ix].map_info.name); 223 - for (j = 0 ; j < ix; j++) 224 - iounmap(map_regions[j].map_info.virt); 225 - return -ENXIO; 226 - } 227 - simple_map_init(&map_regions[ix].map_info); 228 - 229 - printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n", 230 - map_regions[ix].map_info.name, 231 - map_regions[ix].map_info.virt); 232 - map_regions[ix].mymtd = do_map_probe("cfi_probe", 233 - &map_regions[ix].map_info); 234 - if((!map_regions[ix].mymtd)&&( 235 - map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth)) 236 - { 237 - printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth " 238 - "for %s flash.\n", 239 - map_regions[ix].map_info.name); 240 - map_regions[ix].map_info.bankwidth = 241 - map_regions[ix].altbankwidth; 242 - map_regions[ix].mymtd = do_map_probe("cfi_probe", 243 - &map_regions[ix].map_info); 244 - } 245 - map_regions[ix].mymtd->owner = THIS_MODULE; 246 - mtd_device_register(map_regions[ix].mymtd, 247 - map_regions[ix].parts, 248 - map_regions_parts[ix]); 249 - } 250 - } 251 - if(iy) 252 - return 0; 253 - return -ENXIO; 254 - } 255 - 256 - static void __exit cleanup_fortunet(void) 257 - { 258 - int ix; 259 - for(ix=0;ix<MAX_NUM_REGIONS;ix++) 260 - { 261 - if(map_regions_set[ix]) 262 - { 263 - if( map_regions[ix].mymtd ) 264 - { 265 - mtd_device_unregister(map_regions[ix].mymtd); 266 - map_destroy( map_regions[ix].mymtd ); 267 - } 268 - iounmap((void *)map_regions[ix].map_info.virt); 269 - } 270 - } 271 - } 272 - 273 - module_init(init_fortunet); 274 - module_exit(cleanup_fortunet); 275 - 276 - MODULE_AUTHOR("FortuNet, Inc."); 277 - MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
+7 -5
drivers/mtd/maps/gpio-addr-flash.c
··· 26 26 #include <linux/slab.h> 27 27 #include <linux/types.h> 28 28 29 - #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 29 + #define pr_devinit(fmt, args...) \ 30 + ({ static const char __fmt[] = fmt; printk(__fmt, ## args); }) 30 31 31 32 #define DRIVER_NAME "gpio-addr-flash" 32 33 #define PFX DRIVER_NAME ": " ··· 143 142 * 144 143 * See gf_copy_from() caveat. 145 144 */ 146 - static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 145 + static void gf_copy_to(struct map_info *map, unsigned long to, 146 + const void *from, ssize_t len) 147 147 { 148 148 struct async_state *state = gf_map_info_to_state(map); 149 149 ··· 187 185 * ... 188 186 * }; 189 187 */ 190 - static int __devinit gpio_flash_probe(struct platform_device *pdev) 188 + static int gpio_flash_probe(struct platform_device *pdev) 191 189 { 192 190 size_t i, arr_size; 193 191 struct physmap_flash_data *pdata; ··· 260 258 return 0; 261 259 } 262 260 263 - static int __devexit gpio_flash_remove(struct platform_device *pdev) 261 + static int gpio_flash_remove(struct platform_device *pdev) 264 262 { 265 263 struct async_state *state = platform_get_drvdata(pdev); 266 264 size_t i = 0; ··· 275 273 276 274 static struct platform_driver gpio_flash_driver = { 277 275 .probe = gpio_flash_probe, 278 - .remove = __devexit_p(gpio_flash_remove), 276 + .remove = gpio_flash_remove, 279 277 .driver = { 280 278 .name = DRIVER_NAME, 281 279 },
+4 -4
drivers/mtd/maps/ichxrom.c
··· 84 84 } 85 85 86 86 87 - static int __devinit ichxrom_init_one (struct pci_dev *pdev, 88 - const struct pci_device_id *ent) 87 + static int ichxrom_init_one(struct pci_dev *pdev, 88 + const struct pci_device_id *ent) 89 89 { 90 90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 91 91 struct ichxrom_window *window = &ichxrom_window; ··· 315 315 } 316 316 317 317 318 - static void __devexit ichxrom_remove_one (struct pci_dev *pdev) 318 + static void ichxrom_remove_one(struct pci_dev *pdev) 319 319 { 320 320 struct ichxrom_window *window = &ichxrom_window; 321 321 ichxrom_cleanup(window); 322 322 } 323 323 324 - static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = { 324 + static struct pci_device_id ichxrom_pci_tbl[] = { 325 325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, 326 326 PCI_ANY_ID, PCI_ANY_ID, }, 327 327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+9 -9
drivers/mtd/maps/intel_vr_nor.c
··· 63 63 #define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */ 64 64 #define TIMING_MASK 0x3FFF0000 65 65 66 - static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p) 66 + static void vr_nor_destroy_partitions(struct vr_nor_mtd *p) 67 67 { 68 68 mtd_device_unregister(p->info); 69 69 } 70 70 71 - static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) 71 + static int vr_nor_init_partitions(struct vr_nor_mtd *p) 72 72 { 73 73 /* register the flash bank */ 74 74 /* partition the flash bank */ 75 75 return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0); 76 76 } 77 77 78 - static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 78 + static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 79 79 { 80 80 map_destroy(p->info); 81 81 } 82 82 83 - static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p) 83 + static int vr_nor_mtd_setup(struct vr_nor_mtd *p) 84 84 { 85 85 static const char *probe_types[] = 86 86 { "cfi_probe", "jedec_probe", NULL }; ··· 96 96 return 0; 97 97 } 98 98 99 - static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p) 99 + static void vr_nor_destroy_maps(struct vr_nor_mtd *p) 100 100 { 101 101 unsigned int exp_timing_cs0; 102 102 ··· 116 116 * Initialize the map_info structure and map the flash. 117 117 * Returns 0 on success, nonzero otherwise. 118 118 */ 119 - static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p) 119 + static int vr_nor_init_maps(struct vr_nor_mtd *p) 120 120 { 121 121 unsigned long csr_phys, csr_len; 122 122 unsigned long win_phys, win_len; ··· 176 176 {0,} 177 177 }; 178 178 179 - static void __devexit vr_nor_pci_remove(struct pci_dev *dev) 179 + static void vr_nor_pci_remove(struct pci_dev *dev) 180 180 { 181 181 struct vr_nor_mtd *p = pci_get_drvdata(dev); 182 182 ··· 189 189 pci_disable_device(dev); 190 190 } 191 191 192 - static int __devinit 192 + static int 193 193 vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 194 194 { 195 195 struct vr_nor_mtd *p = NULL; ··· 256 256 static struct pci_driver vr_nor_pci_driver = { 257 257 .name = DRV_NAME, 258 258 .probe = vr_nor_pci_probe, 259 - .remove = __devexit_p(vr_nor_pci_remove), 259 + .remove = vr_nor_pci_remove, 260 260 .id_table = vr_nor_pci_ids, 261 261 }; 262 262
+4 -4
drivers/mtd/maps/lantiq-flash.c
··· 45 45 }; 46 46 47 47 static const char ltq_map_name[] = "ltq_nor"; 48 - static const char *ltq_probe_types[] __devinitconst = { 48 + static const char *ltq_probe_types[] = { 49 49 "cmdlinepart", "ofpart", NULL }; 50 50 51 51 static map_word ··· 109 109 spin_unlock_irqrestore(&ebu_lock, flags); 110 110 } 111 111 112 - static int __devinit 112 + static int 113 113 ltq_mtd_probe(struct platform_device *pdev) 114 114 { 115 115 struct mtd_part_parser_data ppdata; ··· 185 185 return err; 186 186 } 187 187 188 - static int __devexit 188 + static int 189 189 ltq_mtd_remove(struct platform_device *pdev) 190 190 { 191 191 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); ··· 209 209 210 210 static struct platform_driver ltq_mtd_driver = { 211 211 .probe = ltq_mtd_probe, 212 - .remove = __devexit_p(ltq_mtd_remove), 212 + .remove = ltq_mtd_remove, 213 213 .driver = { 214 214 .name = "ltq-nor", 215 215 .owner = THIS_MODULE,
+2 -2
drivers/mtd/maps/latch-addr-flash.c
··· 125 125 return 0; 126 126 } 127 127 128 - static int __devinit latch_addr_flash_probe(struct platform_device *dev) 128 + static int latch_addr_flash_probe(struct platform_device *dev) 129 129 { 130 130 struct latch_addr_flash_data *latch_addr_data; 131 131 struct latch_addr_flash_info *info; ··· 218 218 219 219 static struct platform_driver latch_addr_flash_driver = { 220 220 .probe = latch_addr_flash_probe, 221 - .remove = __devexit_p(latch_addr_flash_remove), 221 + .remove = latch_addr_flash_remove, 222 222 .driver = { 223 223 .name = DRIVER_NAME, 224 224 },
+3 -3
drivers/mtd/maps/pci.c
··· 253 253 * Generic code follows. 254 254 */ 255 255 256 - static int __devinit 256 + static int 257 257 mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 258 258 { 259 259 struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data; ··· 308 308 return err; 309 309 } 310 310 311 - static void __devexit 311 + static void 312 312 mtd_pci_remove(struct pci_dev *dev) 313 313 { 314 314 struct mtd_info *mtd = pci_get_drvdata(dev); ··· 326 326 static struct pci_driver mtd_pci_driver = { 327 327 .name = "MTD PCI", 328 328 .probe = mtd_pci_probe, 329 - .remove = __devexit_p(mtd_pci_remove), 329 + .remove = mtd_pci_remove, 330 330 .id_table = mtd_pci_ids, 331 331 }; 332 332
+12 -7
drivers/mtd/maps/physmap_of.c
··· 77 77 /* Helper function to handle probing of the obsolete "direct-mapped" 78 78 * compatible binding, which has an extra "probe-type" property 79 79 * describing the type of flash probe necessary. */ 80 - static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev, 80 + static struct mtd_info *obsolete_probe(struct platform_device *dev, 81 81 struct map_info *map) 82 82 { 83 83 struct device_node *dp = dev->dev.of_node; ··· 116 116 information. */ 117 117 static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", 118 118 "ofpart", "ofoldpart", NULL }; 119 - static const char ** __devinit of_get_probes(struct device_node *dp) 119 + static const char **of_get_probes(struct device_node *dp) 120 120 { 121 121 const char *cp; 122 122 int cplen; ··· 145 145 return res; 146 146 } 147 147 148 - static void __devinit of_free_probes(const char **probes) 148 + static void of_free_probes(const char **probes) 149 149 { 150 150 if (probes != part_probe_types_def) 151 151 kfree(probes); 152 152 } 153 153 154 154 static struct of_device_id of_flash_match[]; 155 - static int __devinit of_flash_probe(struct platform_device *dev) 155 + static int of_flash_probe(struct platform_device *dev) 156 156 { 157 157 const char **part_probe_types; 158 158 const struct of_device_id *match; ··· 170 170 resource_size_t res_size; 171 171 struct mtd_part_parser_data ppdata; 172 172 bool map_indirect; 173 + const char *mtd_name; 173 174 174 175 match = of_match_device(of_flash_match, &dev->dev); 175 176 if (!match) ··· 178 177 probe_type = match->data; 179 178 180 179 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 180 + 181 + of_property_read_string(dp, "linux,mtd-name", &mtd_name); 181 182 182 183 /* 183 184 * Get number of "reg" tuples. Scan for MTD devices on area's ··· 237 234 goto err_out; 238 235 } 239 236 240 - info->list[i].map.name = dev_name(&dev->dev); 237 + info->list[i].map.name = mtd_name ?: dev_name(&dev->dev); 241 238 info->list[i].map.phys = res.start; 242 239 info->list[i].map.size = res_size; 243 240 info->list[i].map.bankwidth = be32_to_cpup(width); ··· 285 282 } 286 283 287 284 err = 0; 285 + info->cmtd = NULL; 288 286 if (info->list_size == 1) { 289 287 info->cmtd = info->list[0].mtd; 290 288 } else if (info->list_size > 1) { ··· 294 290 */ 295 291 info->cmtd = mtd_concat_create(mtd_list, info->list_size, 296 292 dev_name(&dev->dev)); 297 - if (info->cmtd == NULL) 298 - err = -ENXIO; 299 293 } 294 + if (info->cmtd == NULL) 295 + err = -ENXIO; 296 + 300 297 if (err) 301 298 goto err_out; 302 299
+9 -9
drivers/mtd/maps/pismo.c
··· 58 58 pismo->vpp(pismo->vpp_data, on); 59 59 } 60 60 61 - static unsigned int __devinit pismo_width_to_bytes(unsigned int width) 61 + static unsigned int pismo_width_to_bytes(unsigned int width) 62 62 { 63 63 width &= 15; 64 64 if (width > 2) ··· 66 66 return 1 << width; 67 67 } 68 68 69 - static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf, 69 + static int pismo_eeprom_read(struct i2c_client *client, void *buf, 70 70 u8 addr, size_t size) 71 71 { 72 72 int ret; ··· 88 88 return ret == ARRAY_SIZE(msg) ? size : -EIO; 89 89 } 90 90 91 - static int __devinit pismo_add_device(struct pismo_data *pismo, int i, 91 + static int pismo_add_device(struct pismo_data *pismo, int i, 92 92 struct pismo_mem *region, const char *name, void *pdata, size_t psize) 93 93 { 94 94 struct platform_device *dev; ··· 129 129 return ret; 130 130 } 131 131 132 - static int __devinit pismo_add_nor(struct pismo_data *pismo, int i, 132 + static int pismo_add_nor(struct pismo_data *pismo, int i, 133 133 struct pismo_mem *region) 134 134 { 135 135 struct physmap_flash_data data = { ··· 143 143 &data, sizeof(data)); 144 144 } 145 145 146 - static int __devinit pismo_add_sram(struct pismo_data *pismo, int i, 146 + static int pismo_add_sram(struct pismo_data *pismo, int i, 147 147 struct pismo_mem *region) 148 148 { 149 149 struct platdata_mtd_ram data = { ··· 154 154 &data, sizeof(data)); 155 155 } 156 156 157 - static void __devinit pismo_add_one(struct pismo_data *pismo, int i, 157 + static void pismo_add_one(struct pismo_data *pismo, int i, 158 158 const struct pismo_cs_block *cs, phys_addr_t base) 159 159 { 160 160 struct device *dev = &pismo->client->dev; ··· 197 197 } 198 198 } 199 199 200 - static int __devexit pismo_remove(struct i2c_client *client) 200 + static int pismo_remove(struct i2c_client *client) 201 201 { 202 202 struct pismo_data *pismo = i2c_get_clientdata(client); 203 203 int i; ··· 210 210 return 0; 211 211 } 212 212 213 - static int __devinit pismo_probe(struct i2c_client *client, 213 + static int pismo_probe(struct i2c_client *client, 214 214 const struct i2c_device_id *id) 215 215 { 216 216 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); ··· 267 267 .owner = THIS_MODULE, 268 268 }, 269 269 .probe = pismo_probe, 270 - .remove = __devexit_p(pismo_remove), 270 + .remove = pismo_remove, 271 271 .id_table = pismo_id, 272 272 }; 273 273
+3 -3
drivers/mtd/maps/pxa2xx-flash.c
··· 49 49 static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 50 50 51 51 52 - static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) 52 + static int pxa2xx_flash_probe(struct platform_device *pdev) 53 53 { 54 54 struct flash_platform_data *flash = pdev->dev.platform_data; 55 55 struct pxa2xx_flash_info *info; ··· 105 105 return 0; 106 106 } 107 107 108 - static int __devexit pxa2xx_flash_remove(struct platform_device *dev) 108 + static int pxa2xx_flash_remove(struct platform_device *dev) 109 109 { 110 110 struct pxa2xx_flash_info *info = platform_get_drvdata(dev); 111 111 ··· 139 139 .owner = THIS_MODULE, 140 140 }, 141 141 .probe = pxa2xx_flash_probe, 142 - .remove = __devexit_p(pxa2xx_flash_remove), 142 + .remove = pxa2xx_flash_remove, 143 143 .shutdown = pxa2xx_flash_shutdown, 144 144 }; 145 145
+3 -3
drivers/mtd/maps/sa1100-flash.c
··· 149 149 plat->exit(); 150 150 } 151 151 152 - static struct sa_info *__devinit 153 - sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) 152 + static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, 153 + struct flash_platform_data *plat) 154 154 { 155 155 struct sa_info *info; 156 156 int nr, size, i, ret = 0; ··· 246 246 247 247 static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 248 248 249 - static int __devinit sa1100_mtd_probe(struct platform_device *pdev) 249 + static int sa1100_mtd_probe(struct platform_device *pdev) 250 250 { 251 251 struct flash_platform_data *plat = pdev->dev.platform_data; 252 252 struct sa_info *info;
+4 -4
drivers/mtd/maps/scb2_flash.c
··· 69 69 }; 70 70 static int region_fail; 71 71 72 - static int __devinit 72 + static int 73 73 scb2_fixup_mtd(struct mtd_info *mtd) 74 74 { 75 75 int i; ··· 133 133 /* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */ 134 134 #define CSB5_FCR 0x41 135 135 #define CSB5_FCR_DECODE_ALL 0x0e 136 - static int __devinit 136 + static int 137 137 scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent) 138 138 { 139 139 u8 reg; ··· 197 197 return 0; 198 198 } 199 199 200 - static void __devexit 200 + static void 201 201 scb2_flash_remove(struct pci_dev *dev) 202 202 { 203 203 if (!scb2_mtd) ··· 231 231 .name = "Intel SCB2 BIOS Flash", 232 232 .id_table = scb2_flash_pci_ids, 233 233 .probe = scb2_flash_probe, 234 - .remove = __devexit_p(scb2_flash_remove), 234 + .remove = scb2_flash_remove, 235 235 }; 236 236 237 237 module_pci_driver(scb2_flash_driver);
+3 -3
drivers/mtd/maps/sun_uflash.c
··· 108 108 return 0; 109 109 } 110 110 111 - static int __devinit uflash_probe(struct platform_device *op) 111 + static int uflash_probe(struct platform_device *op) 112 112 { 113 113 struct device_node *dp = op->dev.of_node; 114 114 ··· 121 121 return uflash_devinit(op, dp); 122 122 } 123 123 124 - static int __devexit uflash_remove(struct platform_device *op) 124 + static int uflash_remove(struct platform_device *op) 125 125 { 126 126 struct uflash_dev *up = dev_get_drvdata(&op->dev); 127 127 ··· 155 155 .of_match_table = uflash_match, 156 156 }, 157 157 .probe = uflash_probe, 158 - .remove = __devexit_p(uflash_remove), 158 + .remove = uflash_remove, 159 159 }; 160 160 161 161 module_platform_driver(uflash_driver);
+5 -5
drivers/mtd/maps/vmu-flash.c
··· 596 596 } 597 597 598 598 /* Handles very basic info about the flash, queries for details */ 599 - static int __devinit vmu_connect(struct maple_device *mdev) 599 + static int vmu_connect(struct maple_device *mdev) 600 600 { 601 601 unsigned long test_flash_data, basic_flash_data; 602 602 int c, error; ··· 690 690 return error; 691 691 } 692 692 693 - static void __devexit vmu_disconnect(struct maple_device *mdev) 693 + static void vmu_disconnect(struct maple_device *mdev) 694 694 { 695 695 struct memcard *card; 696 696 struct mdev_part *mpart; ··· 772 772 } 773 773 774 774 775 - static int __devinit probe_maple_vmu(struct device *dev) 775 + static int probe_maple_vmu(struct device *dev) 776 776 { 777 777 int error; 778 778 struct maple_device *mdev = to_maple_dev(dev); ··· 789 789 return 0; 790 790 } 791 791 792 - static int __devexit remove_maple_vmu(struct device *dev) 792 + static int remove_maple_vmu(struct device *dev) 793 793 { 794 794 struct maple_device *mdev = to_maple_dev(dev); 795 795 ··· 802 802 .drv = { 803 803 .name = "Dreamcast_visual_memory", 804 804 .probe = probe_maple_vmu, 805 - .remove = __devexit_p(remove_maple_vmu), 805 + .remove = remove_maple_vmu, 806 806 }, 807 807 }; 808 808
+17 -34
drivers/mtd/mtd_blkdevs.c
··· 32 32 #include <linux/hdreg.h> 33 33 #include <linux/init.h> 34 34 #include <linux/mutex.h> 35 - #include <linux/kthread.h> 36 35 #include <asm/uaccess.h> 37 36 38 37 #include "mtdcore.h" ··· 120 121 121 122 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) 122 123 { 123 - if (kthread_should_stop()) 124 - return 1; 125 - 126 124 return dev->bg_stop; 127 125 } 128 126 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); 129 127 130 - static int mtd_blktrans_thread(void *arg) 128 + static void mtd_blktrans_work(struct work_struct *work) 131 129 { 132 - struct mtd_blktrans_dev *dev = arg; 130 + struct mtd_blktrans_dev *dev = 131 + container_of(work, struct mtd_blktrans_dev, work); 133 132 struct mtd_blktrans_ops *tr = dev->tr; 134 133 struct request_queue *rq = dev->rq; 135 134 struct request *req = NULL; ··· 135 138 136 139 spin_lock_irq(rq->queue_lock); 137 140 138 - while (!kthread_should_stop()) { 141 + while (1) { 139 142 int res; 140 143 141 144 dev->bg_stop = false; ··· 153 156 background_done = !dev->bg_stop; 154 157 continue; 155 158 } 156 - set_current_state(TASK_INTERRUPTIBLE); 157 - 158 - if (kthread_should_stop()) 159 - set_current_state(TASK_RUNNING); 160 - 161 - spin_unlock_irq(rq->queue_lock); 162 - schedule(); 163 - spin_lock_irq(rq->queue_lock); 164 - continue; 159 + break; 165 160 } 166 161 167 162 spin_unlock_irq(rq->queue_lock); ··· 174 185 __blk_end_request_all(req, -EIO); 175 186 176 187 spin_unlock_irq(rq->queue_lock); 177 - 178 - return 0; 179 188 } 180 189 181 190 static void mtd_blktrans_request(struct request_queue *rq) ··· 186 199 if (!dev) 187 200 while ((req = blk_fetch_request(rq)) != NULL) 188 201 __blk_end_request_all(req, -ENODEV); 189 - else { 190 - dev->bg_stop = true; 191 - wake_up_process(dev->thread); 192 - } 202 + else 203 + queue_work(dev->wq, &dev->work); 193 204 } 194 205 195 206 static int blktrans_open(struct block_device *bdev, fmode_t mode) ··· 310 325 return ret; 311 326 } 312 327 313 - static const struct block_device_operations mtd_blktrans_ops = { 328 + static const struct block_device_operations mtd_block_ops = { 314 329 .owner = THIS_MODULE, 315 330 .open = blktrans_open, 316 331 .release = blktrans_release, ··· 386 401 gd->private_data = new; 387 402 gd->major = tr->major; 388 403 gd->first_minor = (new->devnum) << tr->part_bits; 389 - gd->fops = &mtd_blktrans_ops; 404 + gd->fops = &mtd_block_ops; 390 405 391 406 if (tr->part_bits) 392 407 if (new->devnum < 26) ··· 422 437 423 438 gd->queue = new->rq; 424 439 425 - /* Create processing thread */ 426 - /* TODO: workqueue ? */ 427 - new->thread = kthread_run(mtd_blktrans_thread, new, 428 - "%s%d", tr->name, new->mtd->index); 429 - if (IS_ERR(new->thread)) { 430 - ret = PTR_ERR(new->thread); 440 + /* Create processing workqueue */ 441 + new->wq = alloc_workqueue("%s%d", 0, 0, 442 + tr->name, new->mtd->index); 443 + if (!new->wq) 431 444 goto error4; 432 - } 445 + INIT_WORK(&new->work, mtd_blktrans_work); 446 + 433 447 gd->driverfs_dev = &new->mtd->dev; 434 448 435 449 if (new->readonly) ··· 468 484 /* Stop new requests to arrive */ 469 485 del_gendisk(old->disk); 470 486 471 - 472 - /* Stop the thread */ 473 - kthread_stop(old->thread); 487 + /* Stop workqueue. This will perform any pending request. */ 488 + destroy_workqueue(old->wq); 474 489 475 490 /* Kill current requests */ 476 491 spin_lock_irqsave(&old->queue_lock, flags);
+7 -8
drivers/mtd/mtdoops.c
··· 271 271 272 272 if (count[0] == 0xffffffff && count[1] == 0xffffffff) 273 273 mark_page_unused(cxt, page); 274 - if (count[0] == 0xffffffff) 274 + if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC) 275 275 continue; 276 276 if (maxcount == 0xffffffff) { 277 277 maxcount = count[0]; ··· 289 289 } 290 290 } 291 291 if (maxcount == 0xffffffff) { 292 - cxt->nextpage = 0; 293 - cxt->nextcount = 1; 294 - schedule_work(&cxt->work_erase); 295 - return; 292 + cxt->nextpage = cxt->oops_pages - 1; 293 + cxt->nextcount = 0; 296 294 } 297 - 298 - cxt->nextpage = maxpos; 299 - cxt->nextcount = maxcount; 295 + else { 296 + cxt->nextpage = maxpos; 297 + cxt->nextcount = maxcount; 298 + } 300 299 301 300 mtdoops_inc_counter(cxt); 302 301 }
+25 -9
drivers/mtd/nand/Kconfig
··· 50 50 of these chips were reused by later, larger chips. 51 51 52 52 config MTD_NAND_DENALI 53 - depends on PCI 53 + tristate "Support Denali NAND controller" 54 + help 55 + Enable support for the Denali NAND controller. This should be 56 + combined with either the PCI or platform drivers to provide device 57 + registration. 58 + 59 + config MTD_NAND_DENALI_PCI 54 60 tristate "Support Denali NAND controller on Intel Moorestown" 61 + depends on PCI && MTD_NAND_DENALI 55 62 help 56 63 Enable the driver for NAND flash on Intel Moorestown, using the 57 64 Denali NAND controller core. 58 - 65 + 66 + config MTD_NAND_DENALI_DT 67 + tristate "Support Denali NAND controller as a DT device" 68 + depends on HAVE_CLK && MTD_NAND_DENALI 69 + help 70 + Enable the driver for NAND flash on platforms using a Denali NAND 71 + controller as a DT device. 72 + 59 73 config MTD_NAND_DENALI_SCRATCH_REG_ADDR 60 74 hex "Denali NAND size scratch register address" 61 75 default "0xFF108018" 62 - depends on MTD_NAND_DENALI 76 + depends on MTD_NAND_DENALI_PCI 63 77 help 64 78 Some platforms place the NAND chip size in a scratch register 65 79 because (some versions of) the driver aren't able to automatically ··· 447 433 block, such as SD card. So pay attention to it when you enable 448 434 the GPMI. 449 435 436 + config MTD_NAND_BCM47XXNFLASH 437 + tristate "Support for NAND flash on BCM4706 BCMA bus" 438 + depends on BCMA_NFLASH 439 + help 440 + BCMA bus can have various flash memories attached, they are 441 + registered by bcma as platform devices. This enables driver for 442 + NAND flash memories. For now only BCM4706 is supported. 443 + 450 444 config MTD_NAND_PLATFORM 451 445 tristate "Support for generic platform NAND driver" 452 446 depends on HAS_IOMEM ··· 520 498 help 521 499 This enables the driver for the NAND flash controller on the 522 500 MXC processors. 523 - 524 - config MTD_NAND_NOMADIK 525 - tristate "ST Nomadik 8815 NAND support" 526 - depends on ARCH_NOMADIK 527 - help 528 - Driver for the NAND flash controller on the Nomadik, with ECC. 529 501 530 502 config MTD_NAND_SH_FLCTL 531 503 tristate "Support for NAND on Renesas SuperH FLCTL"
+3 -1
drivers/mtd/nand/Makefile
··· 11 11 obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 12 12 obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 13 13 obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 14 + obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o 15 + obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o 14 16 obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 15 17 obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 16 18 obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o ··· 47 45 obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 48 46 obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 49 47 obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o 50 - obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 51 48 obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 52 49 obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 53 50 obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 54 51 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ 55 52 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o 53 + obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ 56 54 57 55 nand-objs := nand_base.o nand_bbt.o
+3 -3
drivers/mtd/nand/ams-delta.c
··· 173 173 /* 174 174 * Main initialization routine 175 175 */ 176 - static int __devinit ams_delta_init(struct platform_device *pdev) 176 + static int ams_delta_init(struct platform_device *pdev) 177 177 { 178 178 struct nand_chip *this; 179 179 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 270 270 /* 271 271 * Clean up routine 272 272 */ 273 - static int __devexit ams_delta_cleanup(struct platform_device *pdev) 273 + static int ams_delta_cleanup(struct platform_device *pdev) 274 274 { 275 275 void __iomem *io_base = platform_get_drvdata(pdev); 276 276 ··· 289 289 290 290 static struct platform_driver ams_delta_nand_driver = { 291 291 .probe = ams_delta_init, 292 - .remove = __devexit_p(ams_delta_cleanup), 292 + .remove = ams_delta_cleanup, 293 293 .driver = { 294 294 .name = "ams-delta-nand", 295 295 .owner = THIS_MODULE,
+17 -11
drivers/mtd/nand/atmel_nand.c
··· 331 331 * 12-bits 20-bytes 21-bytes 332 332 * 24-bits 39-bytes 42-bytes 333 333 */ 334 - static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size) 334 + static int pmecc_get_ecc_bytes(int cap, int sector_size) 335 335 { 336 336 int m = 12 + sector_size / 512; 337 337 return (m * cap + 7) / 8; 338 338 } 339 339 340 - static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout, 340 + static void pmecc_config_ecc_layout(struct nand_ecclayout *layout, 341 341 int oobsize, int ecc_len) 342 342 { 343 343 int i; ··· 353 353 oobsize - ecc_len - layout->oobfree[0].offset; 354 354 } 355 355 356 - static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) 356 + static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) 357 357 { 358 358 int table_size; 359 359 ··· 375 375 kfree(host->pmecc_delta); 376 376 } 377 377 378 - static int __devinit pmecc_data_alloc(struct atmel_nand_host *host) 378 + static int pmecc_data_alloc(struct atmel_nand_host *host) 379 379 { 380 380 const int cap = host->pmecc_corr_cap; 381 381 ··· 724 724 struct atmel_nand_host *host = nand_chip->priv; 725 725 int i, err_nbr, eccbytes; 726 726 uint8_t *buf_pos; 727 + int total_err = 0; 727 728 728 729 eccbytes = nand_chip->ecc.bytes; 729 730 for (i = 0; i < eccbytes; i++) ··· 752 751 pmecc_correct_data(mtd, buf_pos, ecc, i, 753 752 host->pmecc_bytes_per_sector, err_nbr); 754 753 mtd->ecc_stats.corrected += err_nbr; 754 + total_err += err_nbr; 755 755 } 756 756 } 757 757 pmecc_stat >>= 1; 758 758 } 759 759 760 - return 0; 760 + return total_err; 761 761 } 762 762 763 763 static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, ··· 770 768 uint32_t *eccpos = chip->ecc.layout->eccpos; 771 769 uint32_t stat; 772 770 unsigned long end_time; 771 + int bitflips = 0; 773 772 774 773 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); 775 774 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); ··· 793 790 } 794 791 795 792 stat = pmecc_readl_relaxed(host->ecc, ISR); 796 - if (stat != 0) 797 - if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0) 798 - return -EIO; 793 + if (stat != 0) { 794 + bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]); 795 + if (bitflips < 0) 796 + /* uncorrectable errors */ 797 + return 0; 798 + } 799 799 800 - return 0; 800 + return bitflips; 801 801 } 802 802 803 803 static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, ··· 1212 1206 } 1213 1207 1214 1208 #if defined(CONFIG_OF) 1215 - static int __devinit atmel_of_init_port(struct atmel_nand_host *host, 1209 + static int atmel_of_init_port(struct atmel_nand_host *host, 1216 1210 struct device_node *np) 1217 1211 { 1218 1212 u32 val, table_offset; ··· 1299 1293 return 0; 1300 1294 } 1301 1295 #else 1302 - static int __devinit atmel_of_init_port(struct atmel_nand_host *host, 1296 + static int atmel_of_init_port(struct atmel_nand_host *host, 1303 1297 struct device_node *np) 1304 1298 { 1305 1299 return -EINVAL;
+4 -4
drivers/mtd/nand/au1550nd.c
··· 382 382 while(!this->dev_ready(mtd)); 383 383 } 384 384 385 - static int __devinit find_nand_cs(unsigned long nand_base) 385 + static int find_nand_cs(unsigned long nand_base) 386 386 { 387 387 void __iomem *base = 388 388 (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR); ··· 403 403 return -ENODEV; 404 404 } 405 405 406 - static int __devinit au1550nd_probe(struct platform_device *pdev) 406 + static int au1550nd_probe(struct platform_device *pdev) 407 407 { 408 408 struct au1550nd_platdata *pd; 409 409 struct au1550nd_ctx *ctx; ··· 491 491 return ret; 492 492 } 493 493 494 - static int __devexit au1550nd_remove(struct platform_device *pdev) 494 + static int au1550nd_remove(struct platform_device *pdev) 495 495 { 496 496 struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); 497 497 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 509 509 .owner = THIS_MODULE, 510 510 }, 511 511 .probe = au1550nd_probe, 512 - .remove = __devexit_p(au1550nd_remove), 512 + .remove = au1550nd_remove, 513 513 }; 514 514 515 515 module_platform_driver(au1550nd_driver);
+4
drivers/mtd/nand/bcm47xxnflash/Makefile
··· 1 + bcm47xxnflash-y += main.o 2 + bcm47xxnflash-y += ops_bcm4706.o 3 + 4 + obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
+22
drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
··· 1 + #ifndef __BCM47XXNFLASH_H 2 + #define __BCM47XXNFLASH_H 3 + 4 + #include <linux/mtd/mtd.h> 5 + #include <linux/mtd/nand.h> 6 + 7 + struct bcm47xxnflash { 8 + struct bcma_drv_cc *cc; 9 + 10 + struct nand_chip nand_chip; 11 + struct mtd_info mtd; 12 + 13 + unsigned curr_command; 14 + int curr_page_addr; 15 + int curr_column; 16 + 17 + u8 id_data[8]; 18 + }; 19 + 20 + int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n); 21 + 22 + #endif /* BCM47XXNFLASH */
+108
drivers/mtd/nand/bcm47xxnflash/main.c
··· 1 + /* 2 + * BCM47XX NAND flash driver 3 + * 4 + * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + */ 11 + 12 + #include <linux/module.h> 13 + #include <linux/kernel.h> 14 + #include <linux/slab.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/bcma/bcma.h> 17 + 18 + #include "bcm47xxnflash.h" 19 + 20 + MODULE_DESCRIPTION("NAND flash driver for BCMA bus"); 21 + MODULE_LICENSE("GPL"); 22 + MODULE_AUTHOR("Rafał Miłecki"); 23 + 24 + static const char *probes[] = { "bcm47xxpart", NULL }; 25 + 26 + static int bcm47xxnflash_probe(struct platform_device *pdev) 27 + { 28 + struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev); 29 + struct bcm47xxnflash *b47n; 30 + int err = 0; 31 + 32 + b47n = kzalloc(sizeof(*b47n), GFP_KERNEL); 33 + if (!b47n) { 34 + err = -ENOMEM; 35 + goto out; 36 + } 37 + 38 + b47n->nand_chip.priv = b47n; 39 + b47n->mtd.owner = THIS_MODULE; 40 + b47n->mtd.priv = &b47n->nand_chip; /* Required */ 41 + b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash); 42 + 43 + if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { 44 + err = bcm47xxnflash_ops_bcm4706_init(b47n); 45 + } else { 46 + pr_err("Device not supported\n"); 47 + err = -ENOTSUPP; 48 + } 49 + if (err) { 50 + pr_err("Initialization failed: %d\n", err); 51 + goto err_init; 52 + } 53 + 54 + err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0); 55 + if (err) { 56 + pr_err("Failed to register MTD device: %d\n", err); 57 + goto err_dev_reg; 58 + } 59 + 60 + return 0; 61 + 62 + err_dev_reg: 63 + err_init: 64 + kfree(b47n); 65 + out: 66 + return err; 67 + } 68 + 69 + static int __devexit bcm47xxnflash_remove(struct platform_device *pdev) 70 + { 71 + struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev); 72 + 73 + if (nflash->mtd) 74 + mtd_device_unregister(nflash->mtd); 75 + 76 + return 0; 77 + } 78 + 79 + static struct platform_driver bcm47xxnflash_driver = { 80 + .remove = __devexit_p(bcm47xxnflash_remove), 81 + .driver = { 82 + .name = "bcma_nflash", 83 + .owner = THIS_MODULE, 84 + }, 85 + }; 86 + 87 + static int __init bcm47xxnflash_init(void) 88 + { 89 + int err; 90 + 91 + /* 92 + * Platform device "bcma_nflash" exists on SoCs and is registered very 93 + * early, it won't be added during runtime (use platform_driver_probe). 94 + */ 95 + err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe); 96 + if (err) 97 + pr_err("Failed to register serial flash driver: %d\n", err); 98 + 99 + return err; 100 + } 101 + 102 + static void __exit bcm47xxnflash_exit(void) 103 + { 104 + platform_driver_unregister(&bcm47xxnflash_driver); 105 + } 106 + 107 + module_init(bcm47xxnflash_init); 108 + module_exit(bcm47xxnflash_exit);
+413
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
··· 1 + /* 2 + * BCM47XX NAND flash driver 3 + * 4 + * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + */ 11 + 12 + #include <linux/module.h> 13 + #include <linux/kernel.h> 14 + #include <linux/slab.h> 15 + #include <linux/bcma/bcma.h> 16 + 17 + #include "bcm47xxnflash.h" 18 + 19 + /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has 20 + * shown 164 retries as maxiumum. */ 21 + #define NFLASH_READY_RETRIES 1000 22 + 23 + #define NFLASH_SECTOR_SIZE 512 24 + 25 + #define NCTL_CMD0 0x00010000 26 + #define NCTL_CMD1W 0x00080000 27 + #define NCTL_READ 0x00100000 28 + #define NCTL_WRITE 0x00200000 29 + #define NCTL_SPECADDR 0x01000000 30 + #define NCTL_READY 0x04000000 31 + #define NCTL_ERR 0x08000000 32 + #define NCTL_CSA 0x40000000 33 + #define NCTL_START 0x80000000 34 + 35 + /************************************************** 36 + * Various helpers 37 + **************************************************/ 38 + 39 + static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock) 40 + { 41 + return ((ns * 1000 * clock) / 1000000) + 1; 42 + } 43 + 44 + static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code) 45 + { 46 + int i = 0; 47 + 48 + bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code); 49 + for (i = 0; i < NFLASH_READY_RETRIES; i++) { 50 + if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) { 51 + i = 0; 52 + break; 53 + } 54 + } 55 + if (i) { 56 + pr_err("NFLASH control command not ready!\n"); 57 + return -EBUSY; 58 + } 59 + return 0; 60 + } 61 + 62 + static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc) 63 + { 64 + int i; 65 + 66 + for (i = 0; i < NFLASH_READY_RETRIES; i++) { 67 + if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) { 68 + if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & 69 + BCMA_CC_NFLASH_CTL_ERR) { 70 + pr_err("Error on polling\n"); 71 + return -EBUSY; 72 + } else { 73 + return 0; 74 + } 75 + } 76 + } 77 + 78 + pr_err("Polling timeout!\n"); 79 + return -EBUSY; 80 + } 81 + 82 + /************************************************** 83 + * R/W 84 + **************************************************/ 85 + 86 + static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf, 87 + int len) 88 + { 89 + struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv; 90 + struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv; 91 + 92 + u32 ctlcode; 93 + u32 *dest = (u32 *)buf; 94 + int i; 95 + int toread; 96 + 97 + BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask); 98 + /* Don't validate column using nand_chip->page_shift, it may be bigger 99 + * when accessing OOB */ 100 + 101 + while (len) { 102 + /* We can read maximum of 0x200 bytes at once */ 103 + toread = min(len, 0x200); 104 + 105 + /* Set page and column */ 106 + bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR, 107 + b47n->curr_column); 108 + bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR, 109 + b47n->curr_page_addr); 110 + 111 + /* Prepare to read */ 112 + ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 | 113 + NCTL_CMD0; 114 + ctlcode |= NAND_CMD_READSTART << 8; 115 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) 116 + return; 117 + if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc)) 118 + return; 119 + 120 + /* Eventually read some data :) */ 121 + for (i = 0; i < toread; i += 4, dest++) { 122 + ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ; 123 + if (i == toread - 4) /* Last read goes without that */ 124 + ctlcode &= ~NCTL_CSA; 125 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, 126 + ctlcode)) 127 + return; 128 + *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA); 129 + } 130 + 131 + b47n->curr_column += toread; 132 + len -= toread; 133 + } 134 + } 135 + 136 + static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd, 137 + const uint8_t *buf, int len) 138 + { 139 + struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv; 140 + struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv; 141 + struct bcma_drv_cc *cc = b47n->cc; 142 + 143 + u32 ctlcode; 144 + const u32 *data = (u32 *)buf; 145 + int i; 146 + 147 + BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask); 148 + /* Don't validate column using nand_chip->page_shift, it may be bigger 149 + * when accessing OOB */ 150 + 151 + for (i = 0; i < len; i += 4, data++) { 152 + bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data); 153 + 154 + ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE; 155 + if (i == len - 4) /* Last read goes without that */ 156 + ctlcode &= ~NCTL_CSA; 157 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) { 158 + pr_err("%s ctl_cmd didn't work!\n", __func__); 159 + return; 160 + } 161 + } 162 + 163 + b47n->curr_column += len; 164 + } 165 + 166 + /************************************************** 167 + * NAND chip ops 168 + **************************************************/ 169 + 170 + /* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */ 171 + static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd, 172 + int chip) 173 + { 174 + return; 175 + } 176 + 177 + /* 178 + * Default nand_command and nand_command_lp don't match BCM4706 hardware layout. 179 + * For example, reading chip id is performed in a non-standard way. 180 + * Setting column and page is also handled differently, we use a special 181 + * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert 182 + * standard commands would be much more complicated. 183 + */ 184 + static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd, 185 + unsigned command, int column, 186 + int page_addr) 187 + { 188 + struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv; 189 + struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv; 190 + struct bcma_drv_cc *cc = b47n->cc; 191 + u32 ctlcode; 192 + int i; 193 + 194 + if (column != -1) 195 + b47n->curr_column = column; 196 + if (page_addr != -1) 197 + b47n->curr_page_addr = page_addr; 198 + 199 + switch (command) { 200 + case NAND_CMD_RESET: 201 + pr_warn("Chip reset not implemented yet\n"); 202 + break; 203 + case NAND_CMD_READID: 204 + ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0; 205 + ctlcode |= NAND_CMD_READID; 206 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) { 207 + pr_err("READID error\n"); 208 + break; 209 + } 210 + 211 + /* 212 + * Reading is specific, last one has to go without NCTL_CSA 213 + * bit. We don't know how many reads NAND subsystem is going 214 + * to perform, so cache everything. 215 + */ 216 + for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) { 217 + ctlcode = NCTL_CSA | NCTL_READ; 218 + if (i == ARRAY_SIZE(b47n->id_data) - 1) 219 + ctlcode &= ~NCTL_CSA; 220 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, 221 + ctlcode)) { 222 + pr_err("READID error\n"); 223 + break; 224 + } 225 + b47n->id_data[i] = 226 + bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA) 227 + & 0xFF; 228 + } 229 + 230 + break; 231 + case NAND_CMD_STATUS: 232 + ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS; 233 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) 234 + pr_err("STATUS command error\n"); 235 + break; 236 + case NAND_CMD_READ0: 237 + break; 238 + case NAND_CMD_READOOB: 239 + if (page_addr != -1) 240 + b47n->curr_column += mtd->writesize; 241 + break; 242 + case NAND_CMD_ERASE1: 243 + bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR, 244 + b47n->curr_page_addr); 245 + ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 | 246 + NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8); 247 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) 248 + pr_err("ERASE1 failed\n"); 249 + break; 250 + case NAND_CMD_ERASE2: 251 + break; 252 + case NAND_CMD_SEQIN: 253 + /* Set page and column */ 254 + bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR, 255 + b47n->curr_column); 256 + bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR, 257 + b47n->curr_page_addr); 258 + 259 + /* Prepare to write */ 260 + ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000; 261 + ctlcode |= NAND_CMD_SEQIN; 262 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) 263 + pr_err("SEQIN failed\n"); 264 + break; 265 + case NAND_CMD_PAGEPROG: 266 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 | 267 + NAND_CMD_PAGEPROG)) 268 + pr_err("PAGEPROG failed\n"); 269 + if (bcm47xxnflash_ops_bcm4706_poll(cc)) 270 + pr_err("PAGEPROG not ready\n"); 271 + break; 272 + default: 273 + pr_err("Command 0x%X unsupported\n", command); 274 + break; 275 + } 276 + b47n->curr_command = command; 277 + } 278 + 279 + static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd) 280 + { 281 + struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv; 282 + struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv; 283 + struct bcma_drv_cc *cc = b47n->cc; 284 + u32 tmp = 0; 285 + 286 + switch (b47n->curr_command) { 287 + case NAND_CMD_READID: 288 + if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) { 289 + pr_err("Requested invalid id_data: %d\n", 290 + b47n->curr_column); 291 + return 0; 292 + } 293 + return b47n->id_data[b47n->curr_column++]; 294 + case NAND_CMD_STATUS: 295 + if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ)) 296 + return 0; 297 + return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff; 298 + case NAND_CMD_READOOB: 299 + bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4); 300 + return tmp & 0xFF; 301 + } 302 + 303 + pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command); 304 + return 0; 305 + } 306 + 307 + static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd, 308 + uint8_t *buf, int len) 309 + { 310 + struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv; 311 + struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv; 312 + 313 + switch (b47n->curr_command) { 314 + case NAND_CMD_READ0: 315 + case NAND_CMD_READOOB: 316 + bcm47xxnflash_ops_bcm4706_read(mtd, buf, len); 317 + return; 318 + } 319 + 320 + pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command); 321 + } 322 + 323 + static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd, 324 + const uint8_t *buf, int len) 325 + { 326 + struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv; 327 + struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv; 328 + 329 + switch (b47n->curr_command) { 330 + case NAND_CMD_SEQIN: 331 + bcm47xxnflash_ops_bcm4706_write(mtd, buf, len); 332 + return; 333 + } 334 + 335 + pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command); 336 + } 337 + 338 + /************************************************** 339 + * Init 340 + **************************************************/ 341 + 342 + int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n) 343 + { 344 + int err; 345 + u32 freq; 346 + u16 clock; 347 + u8 w0, w1, w2, w3, w4; 348 + 349 + unsigned long chipsize; /* MiB */ 350 + u8 tbits, col_bits, col_size, row_bits, row_bsize; 351 + u32 val; 352 + 353 + b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip; 354 + b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc; 355 + b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte; 356 + b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf; 357 + b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf; 358 + b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH; 359 + b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */ 360 + 361 + /* Enable NAND flash access */ 362 + bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG, 363 + BCMA_CC_4706_FLASHSCFG_NF1); 364 + 365 + /* Configure wait counters */ 366 + if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) { 367 + freq = 100000000; 368 + } else { 369 + freq = bcma_chipco_pll_read(b47n->cc, 4); 370 + freq = (freq * 0xFFF) >> 3; 371 + freq = (freq * 25000000) >> 3; 372 + } 373 + clock = freq / 1000000; 374 + w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock); 375 + w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock); 376 + w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock); 377 + w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock); 378 + w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock); 379 + bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0, 380 + (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0)); 381 + 382 + /* Scan NAND */ 383 + err = nand_scan(&b47n->mtd, 1); 384 + if (err) { 385 + pr_err("Could not scan NAND flash: %d\n", err); 386 + goto exit; 387 + } 388 + 389 + /* Configure FLASH */ 390 + chipsize = b47n->nand_chip.chipsize >> 20; 391 + tbits = ffs(chipsize); /* find first bit set */ 392 + if (!tbits || tbits != fls(chipsize)) { 393 + pr_err("Invalid flash size: 0x%lX\n", chipsize); 394 + err = -ENOTSUPP; 395 + goto exit; 396 + } 397 + tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */ 398 + 399 + col_bits = b47n->nand_chip.page_shift + 1; 400 + col_size = (col_bits + 7) / 8; 401 + 402 + row_bits = tbits - col_bits + 1; 403 + row_bsize = (row_bits + 7) / 8; 404 + 405 + val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2; 406 + bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val); 407 + 408 + exit: 409 + if (err) 410 + bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG, 411 + ~BCMA_CC_4706_FLASHSCFG_NF1); 412 + return err; 413 + }
+4 -4
drivers/mtd/nand/bf5xx_nand.c
··· 658 658 /* 659 659 * Device management interface 660 660 */ 661 - static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info) 661 + static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info) 662 662 { 663 663 struct mtd_info *mtd = &info->mtd; 664 664 struct mtd_partition *parts = info->platform->partitions; ··· 667 667 return mtd_device_register(mtd, parts, nr); 668 668 } 669 669 670 - static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 670 + static int bf5xx_nand_remove(struct platform_device *pdev) 671 671 { 672 672 struct bf5xx_nand_info *info = to_nand_info(pdev); 673 673 ··· 725 725 * it can allocate all necessary resources then calls the 726 726 * nand layer to look for devices 727 727 */ 728 - static int __devinit bf5xx_nand_probe(struct platform_device *pdev) 728 + static int bf5xx_nand_probe(struct platform_device *pdev) 729 729 { 730 730 struct bf5xx_nand_platform *plat = to_nand_plat(pdev); 731 731 struct bf5xx_nand_info *info = NULL; ··· 865 865 /* driver device registration */ 866 866 static struct platform_driver bf5xx_nand_driver = { 867 867 .probe = bf5xx_nand_probe, 868 - .remove = __devexit_p(bf5xx_nand_remove), 868 + .remove = bf5xx_nand_remove, 869 869 .suspend = bf5xx_nand_suspend, 870 870 .resume = bf5xx_nand_resume, 871 871 .driver = {
+6 -6
drivers/mtd/nand/cafe_nand.c
··· 585 585 } 586 586 587 587 /* F_2[X]/(X**6+X+1) */ 588 - static unsigned short __devinit gf64_mul(u8 a, u8 b) 588 + static unsigned short gf64_mul(u8 a, u8 b) 589 589 { 590 590 u8 c; 591 591 unsigned int i; ··· 604 604 } 605 605 606 606 /* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */ 607 - static u16 __devinit gf4096_mul(u16 a, u16 b) 607 + static u16 gf4096_mul(u16 a, u16 b) 608 608 { 609 609 u8 ah, al, bh, bl, ch, cl; 610 610 ··· 619 619 return (ch << 6) ^ cl; 620 620 } 621 621 622 - static int __devinit cafe_mul(int x) 622 + static int cafe_mul(int x) 623 623 { 624 624 if (x == 0) 625 625 return 1; 626 626 return gf4096_mul(x, 0xe01); 627 627 } 628 628 629 - static int __devinit cafe_nand_probe(struct pci_dev *pdev, 629 + static int cafe_nand_probe(struct pci_dev *pdev, 630 630 const struct pci_device_id *ent) 631 631 { 632 632 struct mtd_info *mtd; ··· 821 821 return err; 822 822 } 823 823 824 - static void __devexit cafe_nand_remove(struct pci_dev *pdev) 824 + static void cafe_nand_remove(struct pci_dev *pdev) 825 825 { 826 826 struct mtd_info *mtd = pci_get_drvdata(pdev); 827 827 struct cafe_priv *cafe = mtd->priv; ··· 887 887 .name = "CAFÉ NAND", 888 888 .id_table = cafe_nand_tbl, 889 889 .probe = cafe_nand_probe, 890 - .remove = __devexit_p(cafe_nand_remove), 890 + .remove = cafe_nand_remove, 891 891 .resume = cafe_nand_resume, 892 892 }; 893 893
+1 -2
drivers/mtd/nand/cs553x_nand.c
··· 237 237 this->ecc.hwctl = cs_enable_hwecc; 238 238 this->ecc.calculate = cs_calculate_ecc; 239 239 this->ecc.correct = nand_correct_data; 240 + this->ecc.strength = 1; 240 241 241 242 /* Enable the following for a flash based bad block table */ 242 243 this->bbt_options = NAND_BBT_USE_FLASH; ··· 247 246 err = -ENXIO; 248 247 goto out_ior; 249 248 } 250 - 251 - this->ecc.strength = 1; 252 249 253 250 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); 254 251
+9 -2
drivers/mtd/nand/davinci_nand.c
··· 821 821 if (ret < 0) 822 822 goto err_scan; 823 823 824 - ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts, 825 - pdata->nr_parts); 824 + if (pdata->parts) 825 + ret = mtd_device_parse_register(&info->mtd, NULL, NULL, 826 + pdata->parts, pdata->nr_parts); 827 + else { 828 + struct mtd_part_parser_data ppdata; 826 829 830 + ppdata.of_node = pdev->dev.of_node; 831 + ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata, 832 + NULL, 0); 833 + } 827 834 if (ret < 0) 828 835 goto err_scan; 829 836
+30 -132
drivers/mtd/nand/denali.c
··· 16 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 17 * 18 18 */ 19 - 20 19 #include <linux/interrupt.h> 21 20 #include <linux/delay.h> 22 21 #include <linux/dma-mapping.h> 23 22 #include <linux/wait.h> 24 23 #include <linux/mutex.h> 25 24 #include <linux/slab.h> 26 - #include <linux/pci.h> 27 25 #include <linux/mtd/mtd.h> 28 26 #include <linux/module.h> 29 27 ··· 86 88 /* this is a helper macro that allows us to 87 89 * format the bank into the proper bits for the controller */ 88 90 #define BANK(x) ((x) << 24) 89 - 90 - /* List of platforms this NAND controller has be integrated into */ 91 - static const struct pci_device_id denali_pci_ids[] = { 92 - { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 }, 93 - { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST }, 94 - { /* end: all zeroes */ } 95 - }; 96 91 97 92 /* forward declarations */ 98 93 static void clear_interrupts(struct denali_nand_info *denali); ··· 690 699 691 700 if (comp_res == 0) { 692 701 /* timeout */ 693 - printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", 702 + pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n", 694 703 intr_status, irq_mask); 695 704 696 705 intr_status = 0; ··· 1296 1305 /* TODO: Read OOB data */ 1297 1306 break; 1298 1307 default: 1299 - printk(KERN_ERR ": unsupported command" 1300 - " received 0x%x\n", cmd); 1308 + pr_err(": unsupported command received 0x%x\n", cmd); 1301 1309 break; 1302 1310 } 1303 1311 } ··· 1415 1425 denali->irq_status = 0; 1416 1426 } 1417 1427 1418 - /* driver entry point */ 1419 - static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 1428 + int denali_init(struct denali_nand_info *denali) 1420 1429 { 1421 - int ret = -ENODEV; 1422 - resource_size_t csr_base, mem_base; 1423 - unsigned long csr_len, mem_len; 1424 - struct denali_nand_info *denali; 1430 + int ret; 1425 1431 1426 - denali = kzalloc(sizeof(*denali), GFP_KERNEL); 1427 - if (!denali) 1428 - return -ENOMEM; 1429 - 1430 - ret = pci_enable_device(dev); 1431 - if (ret) { 1432 - printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); 1433 - goto failed_alloc_memery; 1434 - } 1435 - 1436 - if (id->driver_data == INTEL_CE4100) { 1432 + if (denali->platform == INTEL_CE4100) { 1437 1433 /* Due to a silicon limitation, we can only support 1438 1434 * ONFI timing mode 1 and below. 1439 1435 */ 1440 1436 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { 1441 - printk(KERN_ERR "Intel CE4100 only supports" 1442 - " ONFI timing mode 1 or below\n"); 1443 - ret = -EINVAL; 1444 - goto failed_enable_dev; 1445 - } 1446 - denali->platform = INTEL_CE4100; 1447 - mem_base = pci_resource_start(dev, 0); 1448 - mem_len = pci_resource_len(dev, 1); 1449 - csr_base = pci_resource_start(dev, 1); 1450 - csr_len = pci_resource_len(dev, 1); 1451 - } else { 1452 - denali->platform = INTEL_MRST; 1453 - csr_base = pci_resource_start(dev, 0); 1454 - csr_len = pci_resource_len(dev, 0); 1455 - mem_base = pci_resource_start(dev, 1); 1456 - mem_len = pci_resource_len(dev, 1); 1457 - if (!mem_len) { 1458 - mem_base = csr_base + csr_len; 1459 - mem_len = csr_len; 1437 + pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n"); 1438 + return -EINVAL; 1460 1439 } 1461 1440 } 1462 1441 1463 1442 /* Is 32-bit DMA supported? */ 1464 - ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); 1443 + ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); 1465 1444 if (ret) { 1466 - printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1467 - goto failed_enable_dev; 1445 + pr_err("Spectra: no usable DMA configuration\n"); 1446 + return ret; 1468 1447 } 1469 - denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf, 1448 + denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf, 1470 1449 DENALI_BUF_SIZE, 1471 1450 DMA_BIDIRECTIONAL); 1472 1451 1473 - if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) { 1474 - dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); 1475 - goto failed_enable_dev; 1452 + if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) { 1453 + dev_err(denali->dev, "Spectra: failed to map DMA buffer\n"); 1454 + return -EIO; 1476 1455 } 1477 - 1478 - pci_set_master(dev); 1479 - denali->dev = &dev->dev; 1480 - denali->mtd.dev.parent = &dev->dev; 1481 - 1482 - ret = pci_request_regions(dev, DENALI_NAND_NAME); 1483 - if (ret) { 1484 - printk(KERN_ERR "Spectra: Unable to request memory regions\n"); 1485 - goto failed_dma_map; 1486 - } 1487 - 1488 - denali->flash_reg = ioremap_nocache(csr_base, csr_len); 1489 - if (!denali->flash_reg) { 1490 - printk(KERN_ERR "Spectra: Unable to remap memory region\n"); 1491 - ret = -ENOMEM; 1492 - goto failed_req_regions; 1493 - } 1494 - 1495 - denali->flash_mem = ioremap_nocache(mem_base, mem_len); 1496 - if (!denali->flash_mem) { 1497 - printk(KERN_ERR "Spectra: ioremap_nocache failed!"); 1498 - ret = -ENOMEM; 1499 - goto failed_remap_reg; 1500 - } 1501 - 1456 + denali->mtd.dev.parent = denali->dev; 1502 1457 denali_hw_init(denali); 1503 1458 denali_drv_init(denali); 1504 1459 1505 1460 /* denali_isr register is done after all the hardware 1506 1461 * initilization is finished*/ 1507 - if (request_irq(dev->irq, denali_isr, IRQF_SHARED, 1462 + if (request_irq(denali->irq, denali_isr, IRQF_SHARED, 1508 1463 DENALI_NAND_NAME, denali)) { 1509 - printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); 1510 - ret = -ENODEV; 1511 - goto failed_remap_mem; 1464 + pr_err("Spectra: Unable to allocate IRQ\n"); 1465 + return -ENODEV; 1512 1466 } 1513 1467 1514 1468 /* now that our ISR is registered, we can enable interrupts */ 1515 1469 denali_set_intr_modes(denali, true); 1516 - 1517 - pci_set_drvdata(dev, denali); 1518 - 1519 1470 denali->mtd.name = "denali-nand"; 1520 1471 denali->mtd.owner = THIS_MODULE; 1521 1472 denali->mtd.priv = &denali->nand; ··· 1480 1549 */ 1481 1550 if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { 1482 1551 ret = -ENODEV; 1483 - printk(KERN_ERR "Spectra: device size not supported by this " 1484 - "version of MTD."); 1552 + pr_err("Spectra: device size not supported by this version of MTD."); 1485 1553 goto failed_req_irq; 1486 1554 } 1487 1555 ··· 1532 1602 } else if (denali->mtd.oobsize < (denali->bbtskipbytes + 1533 1603 ECC_8BITS * (denali->mtd.writesize / 1534 1604 ECC_SECTOR_SIZE))) { 1535 - printk(KERN_ERR "Your NAND chip OOB is not large enough to" 1536 - " contain 8bit ECC correction codes"); 1605 + pr_err("Your NAND chip OOB is not large enough to \ 1606 + contain 8bit ECC correction codes"); 1537 1607 goto failed_req_irq; 1538 1608 } else { 1539 1609 denali->nand.ecc.strength = 8; ··· 1585 1655 1586 1656 ret = mtd_device_register(&denali->mtd, NULL, 0); 1587 1657 if (ret) { 1588 - dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", 1658 + dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n", 1589 1659 ret); 1590 1660 goto failed_req_irq; 1591 1661 } 1592 1662 return 0; 1593 1663 1594 1664 failed_req_irq: 1595 - denali_irq_cleanup(dev->irq, denali); 1596 - failed_remap_mem: 1597 - iounmap(denali->flash_mem); 1598 - failed_remap_reg: 1599 - iounmap(denali->flash_reg); 1600 - failed_req_regions: 1601 - pci_release_regions(dev); 1602 - failed_dma_map: 1603 - dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1604 - DMA_BIDIRECTIONAL); 1605 - failed_enable_dev: 1606 - pci_disable_device(dev); 1607 - failed_alloc_memery: 1608 - kfree(denali); 1665 + denali_irq_cleanup(denali->irq, denali); 1666 + 1609 1667 return ret; 1610 1668 } 1669 + EXPORT_SYMBOL(denali_init); 1611 1670 1612 1671 /* driver exit point */ 1613 - static void denali_pci_remove(struct pci_dev *dev) 1672 + void denali_remove(struct denali_nand_info *denali) 1614 1673 { 1615 - struct denali_nand_info *denali = pci_get_drvdata(dev); 1616 - 1617 - nand_release(&denali->mtd); 1618 - 1619 - denali_irq_cleanup(dev->irq, denali); 1620 - 1621 - iounmap(denali->flash_reg); 1622 - iounmap(denali->flash_mem); 1623 - pci_release_regions(dev); 1624 - pci_disable_device(dev); 1625 - dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1626 - DMA_BIDIRECTIONAL); 1627 - pci_set_drvdata(dev, NULL); 1628 - kfree(denali); 1674 + denali_irq_cleanup(denali->irq, denali); 1675 + dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1676 + DMA_BIDIRECTIONAL); 1629 1677 } 1630 - 1631 - MODULE_DEVICE_TABLE(pci, denali_pci_ids); 1632 - 1633 - static struct pci_driver denali_pci_driver = { 1634 - .name = DENALI_NAND_NAME, 1635 - .id_table = denali_pci_ids, 1636 - .probe = denali_pci_probe, 1637 - .remove = denali_pci_remove, 1638 - }; 1639 - 1640 - module_pci_driver(denali_pci_driver); 1678 + EXPORT_SYMBOL(denali_remove);
+5
drivers/mtd/nand/denali.h
··· 466 466 467 467 #define INTEL_CE4100 1 468 468 #define INTEL_MRST 2 469 + #define DT 3 469 470 470 471 struct denali_nand_info { 471 472 struct mtd_info mtd; ··· 488 487 uint32_t irq_status; 489 488 int irq_debug_array[32]; 490 489 int idx; 490 + int irq; 491 491 492 492 uint32_t devnum; /* represent how many nands connected */ 493 493 uint32_t fwblks; /* represent how many blocks FW used */ ··· 497 495 uint32_t bbtskipbytes; 498 496 uint32_t max_banks; 499 497 }; 498 + 499 + extern int denali_init(struct denali_nand_info *denali); 500 + extern void denali_remove(struct denali_nand_info *denali); 500 501 501 502 #endif /*_LLD_NAND_*/
+167
drivers/mtd/nand/denali_dt.c
··· 1 + /* 2 + * NAND Flash Controller Device Driver for DT 3 + * 4 + * Copyright © 2011, Picochip. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + */ 15 + #include <linux/clk.h> 16 + #include <linux/err.h> 17 + #include <linux/io.h> 18 + #include <linux/ioport.h> 19 + #include <linux/kernel.h> 20 + #include <linux/module.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/of.h> 23 + #include <linux/of_device.h> 24 + #include <linux/slab.h> 25 + 26 + #include "denali.h" 27 + 28 + struct denali_dt { 29 + struct denali_nand_info denali; 30 + struct clk *clk; 31 + }; 32 + 33 + static void __iomem *request_and_map(struct device *dev, 34 + const struct resource *res) 35 + { 36 + void __iomem *ptr; 37 + 38 + if (!devm_request_mem_region(dev, res->start, resource_size(res), 39 + "denali-dt")) { 40 + dev_err(dev, "unable to request %s\n", res->name); 41 + return NULL; 42 + } 43 + 44 + ptr = devm_ioremap_nocache(dev, res->start, resource_size(res)); 45 + if (!res) 46 + dev_err(dev, "ioremap_nocache of %s failed!", res->name); 47 + 48 + return ptr; 49 + } 50 + 51 + static const struct of_device_id denali_nand_dt_ids[] = { 52 + { .compatible = "denali,denali-nand-dt" }, 53 + { /* sentinel */ } 54 + }; 55 + 56 + MODULE_DEVICE_TABLE(of, denali_nand_dt_ids); 57 + 58 + static u64 denali_dma_mask; 59 + 60 + static int denali_dt_probe(struct platform_device *ofdev) 61 + { 62 + struct resource *denali_reg, *nand_data; 63 + struct denali_dt *dt; 64 + struct denali_nand_info *denali; 65 + int ret; 66 + const struct of_device_id *of_id; 67 + 68 + of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev); 69 + if (of_id) { 70 + ofdev->id_entry = of_id->data; 71 + } else { 72 + pr_err("Failed to find the right device id.\n"); 73 + return -ENOMEM; 74 + } 75 + 76 + dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); 77 + if (!dt) 78 + return -ENOMEM; 79 + denali = &dt->denali; 80 + 81 + denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); 82 + nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); 83 + if (!denali_reg || !nand_data) { 84 + dev_err(&ofdev->dev, "resources not completely defined\n"); 85 + return -EINVAL; 86 + } 87 + 88 + denali->platform = DT; 89 + denali->dev = &ofdev->dev; 90 + denali->irq = platform_get_irq(ofdev, 0); 91 + if (denali->irq < 0) { 92 + dev_err(&ofdev->dev, "no irq defined\n"); 93 + return -ENXIO; 94 + } 95 + 96 + denali->flash_reg = request_and_map(&ofdev->dev, denali_reg); 97 + if (!denali->flash_reg) 98 + return -ENOMEM; 99 + 100 + denali->flash_mem = request_and_map(&ofdev->dev, nand_data); 101 + if (!denali->flash_mem) 102 + return -ENOMEM; 103 + 104 + if (!of_property_read_u32(ofdev->dev.of_node, 105 + "dma-mask", (u32 *)&denali_dma_mask)) { 106 + denali->dev->dma_mask = &denali_dma_mask; 107 + } else { 108 + denali->dev->dma_mask = NULL; 109 + } 110 + 111 + dt->clk = clk_get(&ofdev->dev, NULL); 112 + if (IS_ERR(dt->clk)) { 113 + dev_err(&ofdev->dev, "no clk available\n"); 114 + return PTR_ERR(dt->clk); 115 + } 116 + clk_prepare_enable(dt->clk); 117 + 118 + ret = denali_init(denali); 119 + if (ret) 120 + goto out_disable_clk; 121 + 122 + platform_set_drvdata(ofdev, dt); 123 + return 0; 124 + 125 + out_disable_clk: 126 + clk_disable_unprepare(dt->clk); 127 + clk_put(dt->clk); 128 + 129 + return ret; 130 + } 131 + 132 + static int denali_dt_remove(struct platform_device *ofdev) 133 + { 134 + struct denali_dt *dt = platform_get_drvdata(ofdev); 135 + 136 + denali_remove(&dt->denali); 137 + clk_disable(dt->clk); 138 + clk_put(dt->clk); 139 + 140 + return 0; 141 + } 142 + 143 + static struct platform_driver denali_dt_driver = { 144 + .probe = denali_dt_probe, 145 + .remove = denali_dt_remove, 146 + .driver = { 147 + .name = "denali-nand-dt", 148 + .owner = THIS_MODULE, 149 + .of_match_table = of_match_ptr(denali_nand_dt_ids), 150 + }, 151 + }; 152 + 153 + static int __init denali_init_dt(void) 154 + { 155 + return platform_driver_register(&denali_dt_driver); 156 + } 157 + module_init(denali_init_dt); 158 + 159 + static void __exit denali_exit_dt(void) 160 + { 161 + platform_driver_unregister(&denali_dt_driver); 162 + } 163 + module_exit(denali_exit_dt); 164 + 165 + MODULE_LICENSE("GPL"); 166 + MODULE_AUTHOR("Jamie Iles"); 167 + MODULE_DESCRIPTION("DT driver for Denali NAND controller");
+144
drivers/mtd/nand/denali_pci.c
··· 1 + /* 2 + * NAND Flash Controller Device Driver 3 + * Copyright © 2009-2010, Intel Corporation and its suppliers. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + */ 14 + #include <linux/kernel.h> 15 + #include <linux/module.h> 16 + #include <linux/pci.h> 17 + #include <linux/slab.h> 18 + 19 + #include "denali.h" 20 + 21 + #define DENALI_NAND_NAME "denali-nand-pci" 22 + 23 + /* List of platforms this NAND controller has be integrated into */ 24 + static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = { 25 + { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 }, 26 + { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST }, 27 + { /* end: all zeroes */ } 28 + }; 29 + MODULE_DEVICE_TABLE(pci, denali_pci_ids); 30 + 31 + static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 32 + { 33 + int ret = -ENODEV; 34 + resource_size_t csr_base, mem_base; 35 + unsigned long csr_len, mem_len; 36 + struct denali_nand_info *denali; 37 + 38 + denali = kzalloc(sizeof(*denali), GFP_KERNEL); 39 + if (!denali) 40 + return -ENOMEM; 41 + 42 + ret = pci_enable_device(dev); 43 + if (ret) { 44 + pr_err("Spectra: pci_enable_device failed.\n"); 45 + goto failed_alloc_memery; 46 + } 47 + 48 + if (id->driver_data == INTEL_CE4100) { 49 + denali->platform = INTEL_CE4100; 50 + mem_base = pci_resource_start(dev, 0); 51 + mem_len = pci_resource_len(dev, 1); 52 + csr_base = pci_resource_start(dev, 1); 53 + csr_len = pci_resource_len(dev, 1); 54 + } else { 55 + denali->platform = INTEL_MRST; 56 + csr_base = pci_resource_start(dev, 0); 57 + csr_len = pci_resource_len(dev, 0); 58 + mem_base = pci_resource_start(dev, 1); 59 + mem_len = pci_resource_len(dev, 1); 60 + if (!mem_len) { 61 + mem_base = csr_base + csr_len; 62 + mem_len = csr_len; 63 + } 64 + } 65 + 66 + pci_set_master(dev); 67 + denali->dev = &dev->dev; 68 + denali->irq = dev->irq; 69 + 70 + ret = pci_request_regions(dev, DENALI_NAND_NAME); 71 + if (ret) { 72 + pr_err("Spectra: Unable to request memory regions\n"); 73 + goto failed_enable_dev; 74 + } 75 + 76 + denali->flash_reg = ioremap_nocache(csr_base, csr_len); 77 + if (!denali->flash_reg) { 78 + pr_err("Spectra: Unable to remap memory region\n"); 79 + ret = -ENOMEM; 80 + goto failed_req_regions; 81 + } 82 + 83 + denali->flash_mem = ioremap_nocache(mem_base, mem_len); 84 + if (!denali->flash_mem) { 85 + pr_err("Spectra: ioremap_nocache failed!"); 86 + ret = -ENOMEM; 87 + goto failed_remap_reg; 88 + } 89 + 90 + ret = denali_init(denali); 91 + if (ret) 92 + goto failed_remap_mem; 93 + 94 + pci_set_drvdata(dev, denali); 95 + 96 + return 0; 97 + 98 + failed_remap_mem: 99 + iounmap(denali->flash_mem); 100 + failed_remap_reg: 101 + iounmap(denali->flash_reg); 102 + failed_req_regions: 103 + pci_release_regions(dev); 104 + failed_enable_dev: 105 + pci_disable_device(dev); 106 + failed_alloc_memery: 107 + kfree(denali); 108 + 109 + return ret; 110 + } 111 + 112 + /* driver exit point */ 113 + static void denali_pci_remove(struct pci_dev *dev) 114 + { 115 + struct denali_nand_info *denali = pci_get_drvdata(dev); 116 + 117 + denali_remove(denali); 118 + iounmap(denali->flash_reg); 119 + iounmap(denali->flash_mem); 120 + pci_release_regions(dev); 121 + pci_disable_device(dev); 122 + pci_set_drvdata(dev, NULL); 123 + kfree(denali); 124 + } 125 + 126 + static struct pci_driver denali_pci_driver = { 127 + .name = DENALI_NAND_NAME, 128 + .id_table = denali_pci_ids, 129 + .probe = denali_pci_probe, 130 + .remove = denali_pci_remove, 131 + }; 132 + 133 + static int denali_init_pci(void) 134 + { 135 + pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__); 136 + return pci_register_driver(&denali_pci_driver); 137 + } 138 + module_init(denali_init_pci); 139 + 140 + static void denali_exit_pci(void) 141 + { 142 + pci_unregister_driver(&denali_pci_driver); 143 + } 144 + module_exit(denali_exit_pci);
-2
drivers/mtd/nand/diskonchip.c
··· 53 53 0xe0000, 0xe2000, 0xe4000, 0xe6000, 54 54 0xe8000, 0xea000, 0xec000, 0xee000, 55 55 #endif /* CONFIG_MTD_DOCPROBE_HIGH */ 56 - #else 57 - #warning Unknown architecture for DiskOnChip. No default probe locations defined 58 56 #endif 59 57 0xffffffff }; 60 58
+64 -9
drivers/mtd/nand/docg4.c
··· 46 46 #include <linux/bitrev.h> 47 47 48 48 /* 49 + * In "reliable mode" consecutive 2k pages are used in parallel (in some 50 + * fashion) to store the same data. The data can be read back from the 51 + * even-numbered pages in the normal manner; odd-numbered pages will appear to 52 + * contain junk. Systems that boot from the docg4 typically write the secondary 53 + * program loader (SPL) code in this mode. The SPL is loaded by the initial 54 + * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped 55 + * to the reset vector address). This module parameter enables you to use this 56 + * driver to write the SPL. When in this mode, no more than 2k of data can be 57 + * written at a time, because the addresses do not increment in the normal 58 + * manner, and the starting offset must be within an even-numbered 2k region; 59 + * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800, 60 + * 0x1a00, ... Reliable mode is a special case and should not be used unless 61 + * you know what you're doing. 62 + */ 63 + static bool reliable_mode; 64 + module_param(reliable_mode, bool, 0); 65 + MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode"); 66 + 67 + /* 49 68 * You'll want to ignore badblocks if you're reading a partition that contains 50 69 * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since 51 70 * it does not use mtd nand's method for marking bad blocks (using oob area). ··· 132 113 #define DOCG4_SEQ_PAGEWRITE 0x16 133 114 #define DOCG4_SEQ_PAGEPROG 0x1e 134 115 #define DOCG4_SEQ_BLOCKERASE 0x24 116 + #define DOCG4_SEQ_SETMODE 0x45 135 117 136 118 /* DOC_FLASHCOMMAND register commands */ 137 119 #define DOCG4_CMD_PAGE_READ 0x00 ··· 142 122 #define DOC_CMD_PROG_BLOCK_ADDR 0x60 143 123 #define DOCG4_CMD_PAGEWRITE 0x80 144 124 #define DOC_CMD_PROG_CYCLE2 0x10 125 + #define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */ 126 + #define DOC_CMD_RELIABLE_MODE 0x22 145 127 #define DOC_CMD_RESET 0xff 146 128 147 129 /* DOC_POWERMODE register bits */ ··· 212 190 #define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */ 213 191 214 192 #define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */ 193 + #define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */ 215 194 216 195 /* 217 - * Oob bytes 0 - 6 are available to the user. 218 - * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc. 196 + * Bytes 0, 1 are used as badblock marker. 197 + * Bytes 2 - 6 are available to the user. 198 + * Byte 7 is hamming ecc for first 7 oob bytes only. 199 + * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14. 219 200 * Byte 15 (the last) is used by the driver as a "page written" flag. 220 201 */ 221 202 static struct nand_ecclayout docg4_oobinfo = { 222 203 .eccbytes = 9, 223 204 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, 224 - .oobavail = 7, 225 - .oobfree = { {0, 7} } 205 + .oobavail = 5, 206 + .oobfree = { {.offset = 2, .length = 5} } 226 207 }; 227 208 228 209 /* ··· 636 611 dev_dbg(doc->dev, 637 612 "docg4: %s: g4 addr: %x\n", __func__, docg4_addr); 638 613 sequence_reset(mtd); 614 + 615 + if (unlikely(reliable_mode)) { 616 + writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE); 617 + writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND); 618 + writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND); 619 + write_nop(docptr); 620 + } 621 + 639 622 writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE); 640 623 writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND); 641 624 write_nop(docptr); ··· 724 691 break; 725 692 726 693 case NAND_CMD_SEQIN: 694 + if (unlikely(reliable_mode)) { 695 + uint16_t g4_page = g4_addr >> 16; 696 + 697 + /* writes to odd-numbered 2k pages are invalid */ 698 + if (g4_page & 0x01) 699 + dev_warn(doc->dev, 700 + "invalid reliable mode address\n"); 701 + } 702 + 727 703 write_page_prologue(mtd, g4_addr); 728 704 729 705 /* hack for deferred write of oob bytes */ ··· 1021 979 struct docg4_priv *doc = nand->priv; 1022 980 uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0); 1023 981 uint8_t *buf; 1024 - int i, block, status; 982 + int i, block; 983 + __u32 eccfailed_stats = mtd->ecc_stats.failed; 1025 984 1026 985 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL); 1027 986 if (buf == NULL) 1028 987 return -ENOMEM; 1029 988 1030 989 read_page_prologue(mtd, g4_addr); 1031 - status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE); 1032 - if (status) 1033 - goto exit; 990 + docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE); 1034 991 1035 992 /* 1036 993 * If no memory-based bbt was created, exit. This will happen if module ··· 1040 999 */ 1041 1000 if (nand->bbt == NULL) /* no memory-based bbt */ 1042 1001 goto exit; 1002 + 1003 + if (mtd->ecc_stats.failed > eccfailed_stats) { 1004 + /* 1005 + * Whoops, an ecc failure ocurred reading the factory bbt. 1006 + * It is stored redundantly, so we get another chance. 1007 + */ 1008 + eccfailed_stats = mtd->ecc_stats.failed; 1009 + docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE); 1010 + if (mtd->ecc_stats.failed > eccfailed_stats) { 1011 + dev_warn(doc->dev, 1012 + "The factory bbt could not be read!\n"); 1013 + goto exit; 1014 + } 1015 + } 1043 1016 1044 1017 /* 1045 1018 * Parse factory bbt and update memory-based bbt. Factory bbt format is ··· 1074 1019 } 1075 1020 exit: 1076 1021 kfree(buf); 1077 - return status; 1022 + return 0; 1078 1023 } 1079 1024 1080 1025 static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+1 -16
drivers/mtd/nand/fsl_elbc_nand.c
··· 109 109 }; 110 110 111 111 /* 112 - * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset 113 - * 1, so we have to adjust bad block pattern. This pattern should be used for 114 - * x8 chips only. So far hardware does not support x16 chips anyway. 115 - */ 116 - static u8 scan_ff_pattern[] = { 0xff, }; 117 - 118 - static struct nand_bbt_descr largepage_memorybased = { 119 - .options = 0, 120 - .offs = 0, 121 - .len = 1, 122 - .pattern = scan_ff_pattern, 123 - }; 124 - 125 - /* 126 112 * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt, 127 113 * interfere with ECC positions, that's why we implement our own descriptors. 128 114 * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0. ··· 685 699 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 686 700 &fsl_elbc_oob_lp_eccm1 : 687 701 &fsl_elbc_oob_lp_eccm0; 688 - chip->badblock_pattern = &largepage_memorybased; 689 702 } 690 703 } else { 691 704 dev_err(priv->dev, ··· 799 814 800 815 static DEFINE_MUTEX(fsl_elbc_nand_mutex); 801 816 802 - static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev) 817 + static int fsl_elbc_nand_probe(struct platform_device *pdev) 803 818 { 804 819 struct fsl_lbc_regs __iomem *lbc; 805 820 struct fsl_elbc_mtd *priv;
+3 -3
drivers/mtd/nand/fsl_ifc_nand.c
··· 389 389 timing = IFC_FIR_OP_RBCD; 390 390 391 391 out_be32(&ifc->ifc_nand.nand_fir0, 392 - (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | 392 + (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 393 393 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 394 394 (timing << IFC_NAND_FIR0_OP2_SHIFT)); 395 395 out_be32(&ifc->ifc_nand.nand_fcr0, ··· 754 754 755 755 /* READID */ 756 756 out_be32(&ifc->ifc_nand.nand_fir0, 757 - (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | 757 + (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 758 758 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 759 759 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); 760 760 out_be32(&ifc->ifc_nand.nand_fcr0, ··· 922 922 923 923 static DEFINE_MUTEX(fsl_ifc_nand_mutex); 924 924 925 - static int __devinit fsl_ifc_nand_probe(struct platform_device *dev) 925 + static int fsl_ifc_nand_probe(struct platform_device *dev) 926 926 { 927 927 struct fsl_ifc_regs __iomem *ifc; 928 928 struct fsl_ifc_mtd *priv;
+4 -4
drivers/mtd/nand/fsl_upm.c
··· 152 152 fun_wait_rnb(fun); 153 153 } 154 154 155 - static int __devinit fun_chip_init(struct fsl_upm_nand *fun, 155 + static int fun_chip_init(struct fsl_upm_nand *fun, 156 156 const struct device_node *upm_np, 157 157 const struct resource *io_res) 158 158 { ··· 201 201 return ret; 202 202 } 203 203 204 - static int __devinit fun_probe(struct platform_device *ofdev) 204 + static int fun_probe(struct platform_device *ofdev) 205 205 { 206 206 struct fsl_upm_nand *fun; 207 207 struct resource io_res; ··· 318 318 return ret; 319 319 } 320 320 321 - static int __devexit fun_remove(struct platform_device *ofdev) 321 + static int fun_remove(struct platform_device *ofdev) 322 322 { 323 323 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); 324 324 int i; ··· 350 350 .of_match_table = of_fun_match, 351 351 }, 352 352 .probe = fun_probe, 353 - .remove = __devexit_p(fun_remove), 353 + .remove = fun_remove, 354 354 }; 355 355 356 356 module_platform_driver(of_fun_driver);
+44 -62
drivers/mtd/nand/fsmc_nand.c
··· 361 361 struct nand_chip *this = mtd->priv; 362 362 struct fsmc_nand_data *host = container_of(mtd, 363 363 struct fsmc_nand_data, mtd); 364 - void *__iomem *regs = host->regs_va; 364 + void __iomem *regs = host->regs_va; 365 365 unsigned int bank = host->bank; 366 366 367 367 if (ctrl & NAND_CTRL_CHANGE) { ··· 383 383 pc |= FSMC_ENABLE; 384 384 else 385 385 pc &= ~FSMC_ENABLE; 386 - writel(pc, FSMC_NAND_REG(regs, bank, PC)); 386 + writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC)); 387 387 } 388 388 389 389 mb(); 390 390 391 391 if (cmd != NAND_CMD_NONE) 392 - writeb(cmd, this->IO_ADDR_W); 392 + writeb_relaxed(cmd, this->IO_ADDR_W); 393 393 } 394 394 395 395 /* ··· 426 426 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT; 427 427 428 428 if (busw) 429 - writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC)); 429 + writel_relaxed(value | FSMC_DEVWID_16, 430 + FSMC_NAND_REG(regs, bank, PC)); 430 431 else 431 - writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC)); 432 + writel_relaxed(value | FSMC_DEVWID_8, 433 + FSMC_NAND_REG(regs, bank, PC)); 432 434 433 - writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar, 435 + writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar, 434 436 FSMC_NAND_REG(regs, bank, PC)); 435 - writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM)); 436 - writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB)); 437 + writel_relaxed(thiz | thold | twait | tset, 438 + FSMC_NAND_REG(regs, bank, COMM)); 439 + writel_relaxed(thiz | thold | twait | tset, 440 + FSMC_NAND_REG(regs, bank, ATTRIB)); 437 441 } 438 442 439 443 /* ··· 450 446 void __iomem *regs = host->regs_va; 451 447 uint32_t bank = host->bank; 452 448 453 - writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256, 449 + writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256, 454 450 FSMC_NAND_REG(regs, bank, PC)); 455 - writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN, 451 + writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN, 456 452 FSMC_NAND_REG(regs, bank, PC)); 457 - writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN, 453 + writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN, 458 454 FSMC_NAND_REG(regs, bank, PC)); 459 455 } 460 456 ··· 474 470 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; 475 471 476 472 do { 477 - if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY) 473 + if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY) 478 474 break; 479 475 else 480 476 cond_resched(); ··· 485 481 return -ETIMEDOUT; 486 482 } 487 483 488 - ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1)); 484 + ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); 489 485 ecc[0] = (uint8_t) (ecc_tmp >> 0); 490 486 ecc[1] = (uint8_t) (ecc_tmp >> 8); 491 487 ecc[2] = (uint8_t) (ecc_tmp >> 16); 492 488 ecc[3] = (uint8_t) (ecc_tmp >> 24); 493 489 494 - ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2)); 490 + ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2)); 495 491 ecc[4] = (uint8_t) (ecc_tmp >> 0); 496 492 ecc[5] = (uint8_t) (ecc_tmp >> 8); 497 493 ecc[6] = (uint8_t) (ecc_tmp >> 16); 498 494 ecc[7] = (uint8_t) (ecc_tmp >> 24); 499 495 500 - ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3)); 496 + ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3)); 501 497 ecc[8] = (uint8_t) (ecc_tmp >> 0); 502 498 ecc[9] = (uint8_t) (ecc_tmp >> 8); 503 499 ecc[10] = (uint8_t) (ecc_tmp >> 16); 504 500 ecc[11] = (uint8_t) (ecc_tmp >> 24); 505 501 506 - ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS)); 502 + ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS)); 507 503 ecc[12] = (uint8_t) (ecc_tmp >> 16); 508 504 509 505 return 0; ··· 523 519 uint32_t bank = host->bank; 524 520 uint32_t ecc_tmp; 525 521 526 - ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1)); 522 + ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); 527 523 ecc[0] = (uint8_t) (ecc_tmp >> 0); 528 524 ecc[1] = (uint8_t) (ecc_tmp >> 8); 529 525 ecc[2] = (uint8_t) (ecc_tmp >> 16); ··· 605 601 dma_async_issue_pending(chan); 606 602 607 603 ret = 608 - wait_for_completion_interruptible_timeout(&host->dma_access_complete, 604 + wait_for_completion_timeout(&host->dma_access_complete, 609 605 msecs_to_jiffies(3000)); 610 606 if (ret <= 0) { 611 607 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); ··· 632 628 uint32_t *p = (uint32_t *)buf; 633 629 len = len >> 2; 634 630 for (i = 0; i < len; i++) 635 - writel(p[i], chip->IO_ADDR_W); 631 + writel_relaxed(p[i], chip->IO_ADDR_W); 636 632 } else { 637 633 for (i = 0; i < len; i++) 638 - writeb(buf[i], chip->IO_ADDR_W); 634 + writeb_relaxed(buf[i], chip->IO_ADDR_W); 639 635 } 640 636 } 641 637 ··· 655 651 uint32_t *p = (uint32_t *)buf; 656 652 len = len >> 2; 657 653 for (i = 0; i < len; i++) 658 - p[i] = readl(chip->IO_ADDR_R); 654 + p[i] = readl_relaxed(chip->IO_ADDR_R); 659 655 } else { 660 656 for (i = 0; i < len; i++) 661 - buf[i] = readb(chip->IO_ADDR_R); 657 + buf[i] = readb_relaxed(chip->IO_ADDR_R); 662 658 } 663 659 } 664 660 ··· 787 783 uint32_t num_err, i; 788 784 uint32_t ecc1, ecc2, ecc3, ecc4; 789 785 790 - num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF; 786 + num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF; 791 787 792 788 /* no bit flipping */ 793 789 if (likely(num_err == 0)) ··· 830 826 * uint64_t array and error offset indexes are populated in err_idx 831 827 * array 832 828 */ 833 - ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1)); 834 - ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2)); 835 - ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3)); 836 - ecc4 = readl(FSMC_NAND_REG(regs, bank, STS)); 829 + ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); 830 + ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2)); 831 + ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3)); 832 + ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS)); 837 833 838 834 err_idx[0] = (ecc1 >> 0) & 0x1FFF; 839 835 err_idx[1] = (ecc1 >> 13) & 0x1FFF; ··· 864 860 } 865 861 866 862 #ifdef CONFIG_OF 867 - static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, 863 + static int fsmc_nand_probe_config_dt(struct platform_device *pdev, 868 864 struct device_node *np) 869 865 { 870 866 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); ··· 880 876 return -EINVAL; 881 877 } 882 878 } 883 - of_property_read_u32(np, "st,ale-off", &pdata->ale_off); 884 - of_property_read_u32(np, "st,cle-off", &pdata->cle_off); 885 879 if (of_get_property(np, "nand-skip-bbtscan", NULL)) 886 880 pdata->options = NAND_SKIP_BBTSCAN; 887 881 888 882 return 0; 889 883 } 890 884 #else 891 - static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, 885 + static int fsmc_nand_probe_config_dt(struct platform_device *pdev, 892 886 struct device_node *np) 893 887 { 894 888 return -ENOSYS; ··· 937 935 if (!res) 938 936 return -EINVAL; 939 937 940 - if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), 941 - pdev->name)) { 942 - dev_err(&pdev->dev, "Failed to get memory data resourse\n"); 943 - return -ENOENT; 944 - } 945 - 946 - host->data_pa = (dma_addr_t)res->start; 947 - host->data_va = devm_ioremap(&pdev->dev, res->start, 948 - resource_size(res)); 938 + host->data_va = devm_request_and_ioremap(&pdev->dev, res); 949 939 if (!host->data_va) { 950 940 dev_err(&pdev->dev, "data ioremap failed\n"); 951 941 return -ENOMEM; 952 942 } 943 + host->data_pa = (dma_addr_t)res->start; 953 944 954 - if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off, 955 - resource_size(res), pdev->name)) { 956 - dev_err(&pdev->dev, "Failed to get memory ale resourse\n"); 957 - return -ENOENT; 958 - } 945 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); 946 + if (!res) 947 + return -EINVAL; 959 948 960 - host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off, 961 - resource_size(res)); 949 + host->addr_va = devm_request_and_ioremap(&pdev->dev, res); 962 950 if (!host->addr_va) { 963 951 dev_err(&pdev->dev, "ale ioremap failed\n"); 964 952 return -ENOMEM; 965 953 } 966 954 967 - if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off, 968 - resource_size(res), pdev->name)) { 969 - dev_err(&pdev->dev, "Failed to get memory cle resourse\n"); 970 - return -ENOENT; 971 - } 955 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 956 + if (!res) 957 + return -EINVAL; 972 958 973 - host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off, 974 - resource_size(res)); 959 + host->cmd_va = devm_request_and_ioremap(&pdev->dev, res); 975 960 if (!host->cmd_va) { 976 961 dev_err(&pdev->dev, "ale ioremap failed\n"); 977 962 return -ENOMEM; ··· 968 979 if (!res) 969 980 return -EINVAL; 970 981 971 - if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), 972 - pdev->name)) { 973 - dev_err(&pdev->dev, "Failed to get memory regs resourse\n"); 974 - return -ENOENT; 975 - } 976 - 977 - host->regs_va = devm_ioremap(&pdev->dev, res->start, 978 - resource_size(res)); 982 + host->regs_va = devm_request_and_ioremap(&pdev->dev, res); 979 983 if (!host->regs_va) { 980 984 dev_err(&pdev->dev, "regs ioremap failed\n"); 981 985 return -ENOMEM;
+21 -13
drivers/mtd/nand/gpio.c
··· 90 90 { 91 91 struct nand_chip *this = mtd->priv; 92 92 93 - writesb(this->IO_ADDR_W, buf, len); 93 + iowrite8_rep(this->IO_ADDR_W, buf, len); 94 94 } 95 95 96 96 static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len) 97 97 { 98 98 struct nand_chip *this = mtd->priv; 99 99 100 - readsb(this->IO_ADDR_R, buf, len); 100 + ioread8_rep(this->IO_ADDR_R, buf, len); 101 101 } 102 102 103 103 static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf, ··· 106 106 struct nand_chip *this = mtd->priv; 107 107 108 108 if (IS_ALIGNED((unsigned long)buf, 2)) { 109 - writesw(this->IO_ADDR_W, buf, len>>1); 109 + iowrite16_rep(this->IO_ADDR_W, buf, len>>1); 110 110 } else { 111 111 int i; 112 112 unsigned short *ptr = (unsigned short *)buf; ··· 121 121 struct nand_chip *this = mtd->priv; 122 122 123 123 if (IS_ALIGNED((unsigned long)buf, 2)) { 124 - readsw(this->IO_ADDR_R, buf, len>>1); 124 + ioread16_rep(this->IO_ADDR_R, buf, len>>1); 125 125 } else { 126 126 int i; 127 127 unsigned short *ptr = (unsigned short *)buf; ··· 134 134 static int gpio_nand_devready(struct mtd_info *mtd) 135 135 { 136 136 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); 137 - return gpio_get_value(gpiomtd->plat.gpio_rdy); 137 + 138 + if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) 139 + return gpio_get_value(gpiomtd->plat.gpio_rdy); 140 + 141 + return 1; 138 142 } 139 143 140 144 #ifdef CONFIG_OF ··· 231 227 return platform_get_resource(pdev, IORESOURCE_MEM, 1); 232 228 } 233 229 234 - static int __devexit gpio_nand_remove(struct platform_device *dev) 230 + static int gpio_nand_remove(struct platform_device *dev) 235 231 { 236 232 struct gpiomtd *gpiomtd = platform_get_drvdata(dev); 237 233 struct resource *res; ··· 256 252 gpio_free(gpiomtd->plat.gpio_nce); 257 253 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 258 254 gpio_free(gpiomtd->plat.gpio_nwp); 259 - gpio_free(gpiomtd->plat.gpio_rdy); 255 + if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) 256 + gpio_free(gpiomtd->plat.gpio_rdy); 260 257 261 258 kfree(gpiomtd); 262 259 ··· 282 277 return ptr; 283 278 } 284 279 285 - static int __devinit gpio_nand_probe(struct platform_device *dev) 280 + static int gpio_nand_probe(struct platform_device *dev) 286 281 { 287 282 struct gpiomtd *gpiomtd; 288 283 struct nand_chip *this; ··· 341 336 if (ret) 342 337 goto err_cle; 343 338 gpio_direction_output(gpiomtd->plat.gpio_cle, 0); 344 - ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY"); 345 - if (ret) 346 - goto err_rdy; 347 - gpio_direction_input(gpiomtd->plat.gpio_rdy); 339 + if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { 340 + ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY"); 341 + if (ret) 342 + goto err_rdy; 343 + gpio_direction_input(gpiomtd->plat.gpio_rdy); 344 + } 348 345 349 346 350 347 this->IO_ADDR_W = this->IO_ADDR_R; ··· 393 386 err_wp: 394 387 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 395 388 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 396 - gpio_free(gpiomtd->plat.gpio_rdy); 389 + if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) 390 + gpio_free(gpiomtd->plat.gpio_rdy); 397 391 err_rdy: 398 392 gpio_free(gpiomtd->plat.gpio_cle); 399 393 err_cle:
+9 -1
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
··· 18 18 * with this program; if not, write to the Free Software Foundation, Inc., 19 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 20 */ 21 - #include <linux/mtd/gpmi-nand.h> 22 21 #include <linux/delay.h> 23 22 #include <linux/clk.h> 24 23 ··· 164 165 ret = gpmi_reset_block(r->gpmi_regs, false); 165 166 if (ret) 166 167 goto err_out; 168 + 169 + /* 170 + * Reset BCH here, too. We got failures otherwise :( 171 + * See later BCH reset for explanation of MX23 handling 172 + */ 173 + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 174 + if (ret) 175 + goto err_out; 176 + 167 177 168 178 /* Choose NAND mode. */ 169 179 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+24 -17
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/interrupt.h> 27 27 #include <linux/module.h> 28 - #include <linux/mtd/gpmi-nand.h> 29 28 #include <linux/mtd/partitions.h> 30 29 #include <linux/pinctrl/consumer.h> 31 30 #include <linux/of.h> 32 31 #include <linux/of_device.h> 33 32 #include <linux/of_mtd.h> 34 33 #include "gpmi-nand.h" 34 + 35 + /* Resource names for the GPMI NAND driver. */ 36 + #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" 37 + #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" 38 + #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" 39 + #define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma" 35 40 36 41 /* add our owner bbt descriptor */ 37 42 static uint8_t scan_ff_pattern[] = { 0xff }; ··· 227 222 228 223 ret = dma_map_sg(this->dev, sgl, 1, dr); 229 224 if (ret == 0) 230 - pr_err("map failed.\n"); 225 + pr_err("DMA mapping failed.\n"); 231 226 232 227 this->direct_dma_map_ok = false; 233 228 } ··· 319 314 return 0; 320 315 } 321 316 322 - static int __devinit 317 + static int 323 318 acquire_register_block(struct gpmi_nand_data *this, const char *res_name) 324 319 { 325 320 struct platform_device *pdev = this->pdev; ··· 360 355 res->bch_regs = NULL; 361 356 } 362 357 363 - static int __devinit 358 + static int 364 359 acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) 365 360 { 366 361 struct platform_device *pdev = this->pdev; ··· 427 422 } 428 423 } 429 424 430 - static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) 425 + static int acquire_dma_channels(struct gpmi_nand_data *this) 431 426 { 432 427 struct platform_device *pdev = this->pdev; 433 428 struct resource *r_dma; ··· 461 456 462 457 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); 463 458 if (!dma_chan) { 464 - pr_err("dma_request_channel failed.\n"); 459 + pr_err("Failed to request DMA channel.\n"); 465 460 goto acquire_err; 466 461 } 467 462 ··· 492 487 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", 493 488 }; 494 489 495 - static int __devinit gpmi_get_clks(struct gpmi_nand_data *this) 490 + static int gpmi_get_clks(struct gpmi_nand_data *this) 496 491 { 497 492 struct resources *r = &this->resources; 498 493 char **extra_clks = NULL; ··· 538 533 return -ENOMEM; 539 534 } 540 535 541 - static int __devinit acquire_resources(struct gpmi_nand_data *this) 536 + static int acquire_resources(struct gpmi_nand_data *this) 542 537 { 543 538 struct pinctrl *pinctrl; 544 539 int ret; ··· 588 583 release_dma_channels(this); 589 584 } 590 585 591 - static int __devinit init_hardware(struct gpmi_nand_data *this) 586 + static int init_hardware(struct gpmi_nand_data *this) 592 587 { 593 588 int ret; 594 589 ··· 630 625 length, DMA_FROM_DEVICE); 631 626 if (dma_mapping_error(dev, dest_phys)) { 632 627 if (alt_size < length) { 633 - pr_err("Alternate buffer is too small\n"); 628 + pr_err("%s, Alternate buffer is too small\n", 629 + __func__); 634 630 return -ENOMEM; 635 631 } 636 632 goto map_failed; ··· 681 675 DMA_TO_DEVICE); 682 676 if (dma_mapping_error(dev, source_phys)) { 683 677 if (alt_size < length) { 684 - pr_err("Alternate buffer is too small\n"); 678 + pr_err("%s, Alternate buffer is too small\n", 679 + __func__); 685 680 return -ENOMEM; 686 681 } 687 682 goto map_failed; ··· 770 763 771 764 error_alloc: 772 765 gpmi_free_dma_buffer(this); 773 - pr_err("allocate DMA buffer ret!!\n"); 766 + pr_err("Error allocating DMA buffers!\n"); 774 767 return -ENOMEM; 775 768 } 776 769 ··· 1481 1474 /* Set up the NFC geometry which is used by BCH. */ 1482 1475 ret = bch_set_geometry(this); 1483 1476 if (ret) { 1484 - pr_err("set geometry ret : %d\n", ret); 1477 + pr_err("Error setting BCH geometry : %d\n", ret); 1485 1478 return ret; 1486 1479 } 1487 1480 ··· 1542 1535 gpmi_free_dma_buffer(this); 1543 1536 } 1544 1537 1545 - static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) 1538 + static int gpmi_nfc_init(struct gpmi_nand_data *this) 1546 1539 { 1547 1540 struct mtd_info *mtd = &this->mtd; 1548 1541 struct nand_chip *chip = &this->nand; ··· 1625 1618 }; 1626 1619 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); 1627 1620 1628 - static int __devinit gpmi_nand_probe(struct platform_device *pdev) 1621 + static int gpmi_nand_probe(struct platform_device *pdev) 1629 1622 { 1630 1623 struct gpmi_nand_data *this; 1631 1624 const struct of_device_id *of_id; ··· 1675 1668 return ret; 1676 1669 } 1677 1670 1678 - static int __devexit gpmi_nand_remove(struct platform_device *pdev) 1671 + static int gpmi_nand_remove(struct platform_device *pdev) 1679 1672 { 1680 1673 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 1681 1674 ··· 1692 1685 .of_match_table = gpmi_nand_id_table, 1693 1686 }, 1694 1687 .probe = gpmi_nand_probe, 1695 - .remove = __devexit_p(gpmi_nand_remove), 1688 + .remove = gpmi_nand_remove, 1696 1689 .id_table = gpmi_ids, 1697 1690 }; 1698 1691 module_platform_driver(gpmi_nand_driver);
-1
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
··· 130 130 /* System Interface */ 131 131 struct device *dev; 132 132 struct platform_device *pdev; 133 - struct gpmi_nand_platform_data *pdata; 134 133 135 134 /* Resources */ 136 135 struct resources resources;
+9 -5
drivers/mtd/nand/jz4740_nand.c
··· 316 316 return ret; 317 317 } 318 318 319 - static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base) 319 + static inline void jz_nand_iounmap_resource(struct resource *res, 320 + void __iomem *base) 320 321 { 321 322 iounmap(base); 322 323 release_mem_region(res->start, resource_size(res)); 323 324 } 324 325 325 - static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) { 326 + static int jz_nand_detect_bank(struct platform_device *pdev, 327 + struct jz_nand *nand, unsigned char bank, 328 + size_t chipnr, uint8_t *nand_maf_id, 329 + uint8_t *nand_dev_id) { 326 330 int ret; 327 331 int gpio; 328 332 char gpio_name[9]; ··· 404 400 return ret; 405 401 } 406 402 407 - static int __devinit jz_nand_probe(struct platform_device *pdev) 403 + static int jz_nand_probe(struct platform_device *pdev) 408 404 { 409 405 int ret; 410 406 struct jz_nand *nand; ··· 545 541 return ret; 546 542 } 547 543 548 - static int __devexit jz_nand_remove(struct platform_device *pdev) 544 + static int jz_nand_remove(struct platform_device *pdev) 549 545 { 550 546 struct jz_nand *nand = platform_get_drvdata(pdev); 551 547 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; ··· 577 573 578 574 static struct platform_driver jz_nand_driver = { 579 575 .probe = jz_nand_probe, 580 - .remove = __devexit_p(jz_nand_remove), 576 + .remove = jz_nand_remove, 581 577 .driver = { 582 578 .name = "jz4740-nand", 583 579 .owner = THIS_MODULE,
+3 -3
drivers/mtd/nand/lpc32xx_mlc.c
··· 655 655 /* 656 656 * Probe for NAND controller 657 657 */ 658 - static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) 658 + static int lpc32xx_nand_probe(struct platform_device *pdev) 659 659 { 660 660 struct lpc32xx_nand_host *host; 661 661 struct mtd_info *mtd; ··· 845 845 /* 846 846 * Remove NAND device 847 847 */ 848 - static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) 848 + static int lpc32xx_nand_remove(struct platform_device *pdev) 849 849 { 850 850 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 851 851 struct mtd_info *mtd = &host->mtd; ··· 907 907 908 908 static struct platform_driver lpc32xx_nand_driver = { 909 909 .probe = lpc32xx_nand_probe, 910 - .remove = __devexit_p(lpc32xx_nand_remove), 910 + .remove = lpc32xx_nand_remove, 911 911 .resume = lpc32xx_nand_resume, 912 912 .suspend = lpc32xx_nand_suspend, 913 913 .driver = {
+3 -3
drivers/mtd/nand/lpc32xx_slc.c
··· 755 755 /* 756 756 * Probe for NAND controller 757 757 */ 758 - static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) 758 + static int lpc32xx_nand_probe(struct platform_device *pdev) 759 759 { 760 760 struct lpc32xx_nand_host *host; 761 761 struct mtd_info *mtd; ··· 949 949 /* 950 950 * Remove NAND device. 951 951 */ 952 - static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) 952 + static int lpc32xx_nand_remove(struct platform_device *pdev) 953 953 { 954 954 uint32_t tmp; 955 955 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); ··· 1021 1021 1022 1022 static struct platform_driver lpc32xx_nand_driver = { 1023 1023 .probe = lpc32xx_nand_probe, 1024 - .remove = __devexit_p(lpc32xx_nand_remove), 1024 + .remove = lpc32xx_nand_remove, 1025 1025 .resume = lpc32xx_nand_resume, 1026 1026 .suspend = lpc32xx_nand_suspend, 1027 1027 .driver = {
+4 -4
drivers/mtd/nand/mpc5121_nfc.c
··· 626 626 iounmap(prv->csreg); 627 627 } 628 628 629 - static int __devinit mpc5121_nfc_probe(struct platform_device *op) 629 + static int mpc5121_nfc_probe(struct platform_device *op) 630 630 { 631 631 struct device_node *rootnode, *dn = op->dev.of_node; 632 632 struct device *dev = &op->dev; ··· 827 827 return retval; 828 828 } 829 829 830 - static int __devexit mpc5121_nfc_remove(struct platform_device *op) 830 + static int mpc5121_nfc_remove(struct platform_device *op) 831 831 { 832 832 struct device *dev = &op->dev; 833 833 struct mtd_info *mtd = dev_get_drvdata(dev); ··· 841 841 return 0; 842 842 } 843 843 844 - static struct of_device_id mpc5121_nfc_match[] __devinitdata = { 844 + static struct of_device_id mpc5121_nfc_match[] = { 845 845 { .compatible = "fsl,mpc5121-nfc", }, 846 846 {}, 847 847 }; 848 848 849 849 static struct platform_driver mpc5121_nfc_driver = { 850 850 .probe = mpc5121_nfc_probe, 851 - .remove = __devexit_p(mpc5121_nfc_remove), 851 + .remove = mpc5121_nfc_remove, 852 852 .driver = { 853 853 .name = DRV_NAME, 854 854 .owner = THIS_MODULE,
+7 -5
drivers/mtd/nand/mxc_nand.c
··· 266 266 } 267 267 }; 268 268 269 - static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL }; 269 + static const char const *part_probes[] = { 270 + "cmdlinepart", "RedBoot", "ofpart", NULL }; 270 271 271 272 static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size) 272 273 { ··· 1379 1378 } 1380 1379 #endif 1381 1380 1382 - static int __devinit mxcnd_probe(struct platform_device *pdev) 1381 + static int mxcnd_probe(struct platform_device *pdev) 1383 1382 { 1384 1383 struct nand_chip *this; 1385 1384 struct mtd_info *mtd; ··· 1557 1556 return 0; 1558 1557 1559 1558 escan: 1560 - clk_disable_unprepare(host->clk); 1559 + if (host->clk_act) 1560 + clk_disable_unprepare(host->clk); 1561 1561 1562 1562 return err; 1563 1563 } 1564 1564 1565 - static int __devexit mxcnd_remove(struct platform_device *pdev) 1565 + static int mxcnd_remove(struct platform_device *pdev) 1566 1566 { 1567 1567 struct mxc_nand_host *host = platform_get_drvdata(pdev); 1568 1568 ··· 1582 1580 }, 1583 1581 .id_table = mxcnd_devtype, 1584 1582 .probe = mxcnd_probe, 1585 - .remove = __devexit_p(mxcnd_remove), 1583 + .remove = mxcnd_remove, 1586 1584 }; 1587 1585 module_platform_driver(mxcnd_driver); 1588 1586
+65 -49
drivers/mtd/nand/nand_base.c
··· 93 93 .length = 78} } 94 94 }; 95 95 96 - static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, 97 - int new_state); 96 + static int nand_get_device(struct mtd_info *mtd, int new_state); 98 97 99 98 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 100 99 struct mtd_oob_ops *ops); ··· 129 130 * nand_release_device - [GENERIC] release chip 130 131 * @mtd: MTD device structure 131 132 * 132 - * Deselect, release chip lock and wake up anyone waiting on the device. 133 + * Release chip lock and wake up anyone waiting on the device. 133 134 */ 134 135 static void nand_release_device(struct mtd_info *mtd) 135 136 { 136 137 struct nand_chip *chip = mtd->priv; 137 - 138 - /* De-select the NAND device */ 139 - chip->select_chip(mtd, -1); 140 138 141 139 /* Release the controller and the chip */ 142 140 spin_lock(&chip->controller->lock); ··· 156 160 } 157 161 158 162 /** 159 - * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip 163 + * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 160 164 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 161 165 * @mtd: MTD device structure 162 166 * ··· 299 303 if (getchip) { 300 304 chipnr = (int)(ofs >> chip->chip_shift); 301 305 302 - nand_get_device(chip, mtd, FL_READING); 306 + nand_get_device(mtd, FL_READING); 303 307 304 308 /* Select the NAND device */ 305 309 chip->select_chip(mtd, chipnr); ··· 329 333 i++; 330 334 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); 331 335 332 - if (getchip) 336 + if (getchip) { 337 + chip->select_chip(mtd, -1); 333 338 nand_release_device(mtd); 339 + } 334 340 335 341 return res; 336 342 } ··· 381 383 struct mtd_oob_ops ops; 382 384 loff_t wr_ofs = ofs; 383 385 384 - nand_get_device(chip, mtd, FL_WRITING); 386 + nand_get_device(mtd, FL_WRITING); 385 387 386 388 ops.datbuf = NULL; 387 389 ops.oobbuf = buf; ··· 490 492 void nand_wait_ready(struct mtd_info *mtd) 491 493 { 492 494 struct nand_chip *chip = mtd->priv; 493 - unsigned long timeo = jiffies + 2; 495 + unsigned long timeo = jiffies + msecs_to_jiffies(20); 494 496 495 497 /* 400ms timeout */ 496 498 if (in_interrupt() || oops_in_progress) ··· 748 750 749 751 /** 750 752 * nand_get_device - [GENERIC] Get chip for selected access 751 - * @chip: the nand chip descriptor 752 753 * @mtd: MTD device structure 753 754 * @new_state: the state which is requested 754 755 * 755 756 * Get the device and lock it for exclusive access 756 757 */ 757 758 static int 758 - nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) 759 + nand_get_device(struct mtd_info *mtd, int new_state) 759 760 { 761 + struct nand_chip *chip = mtd->priv; 760 762 spinlock_t *lock = &chip->controller->lock; 761 763 wait_queue_head_t *wq = &chip->controller->wq; 762 764 DECLARE_WAITQUEUE(wait, current); ··· 863 865 led_trigger_event(nand_led_trigger, LED_OFF); 864 866 865 867 status = (int)chip->read_byte(mtd); 868 + /* This can happen if in case of timeout or buggy dev_ready */ 869 + WARN_ON(!(status & NAND_STATUS_READY)); 866 870 return status; 867 871 } 868 872 ··· 899 899 /* Call wait ready function */ 900 900 status = chip->waitfunc(mtd, chip); 901 901 /* See if device thinks it succeeded */ 902 - if (status & 0x01) { 902 + if (status & NAND_STATUS_FAIL) { 903 903 pr_debug("%s: error status = 0x%08x\n", 904 904 __func__, status); 905 905 ret = -EIO; ··· 932 932 if (ofs + len == mtd->size) 933 933 len -= mtd->erasesize; 934 934 935 - nand_get_device(chip, mtd, FL_UNLOCKING); 935 + nand_get_device(mtd, FL_UNLOCKING); 936 936 937 937 /* Shift to get chip number */ 938 938 chipnr = ofs >> chip->chip_shift; ··· 950 950 ret = __nand_unlock(mtd, ofs, len, 0); 951 951 952 952 out: 953 + chip->select_chip(mtd, -1); 953 954 nand_release_device(mtd); 954 955 955 956 return ret; ··· 982 981 if (check_offs_len(mtd, ofs, len)) 983 982 ret = -EINVAL; 984 983 985 - nand_get_device(chip, mtd, FL_LOCKING); 984 + nand_get_device(mtd, FL_LOCKING); 986 985 987 986 /* Shift to get chip number */ 988 987 chipnr = ofs >> chip->chip_shift; ··· 1005 1004 /* Call wait ready function */ 1006 1005 status = chip->waitfunc(mtd, chip); 1007 1006 /* See if device thinks it succeeded */ 1008 - if (status & 0x01) { 1007 + if (status & NAND_STATUS_FAIL) { 1009 1008 pr_debug("%s: error status = 0x%08x\n", 1010 1009 __func__, status); 1011 1010 ret = -EIO; ··· 1015 1014 ret = __nand_unlock(mtd, ofs, len, 0x1); 1016 1015 1017 1016 out: 1017 + chip->select_chip(mtd, -1); 1018 1018 nand_release_device(mtd); 1019 1019 1020 1020 return ret; ··· 1552 1550 chip->select_chip(mtd, chipnr); 1553 1551 } 1554 1552 } 1553 + chip->select_chip(mtd, -1); 1555 1554 1556 1555 ops->retlen = ops->len - (size_t) readlen; 1557 1556 if (oob) ··· 1580 1577 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, 1581 1578 size_t *retlen, uint8_t *buf) 1582 1579 { 1583 - struct nand_chip *chip = mtd->priv; 1584 1580 struct mtd_oob_ops ops; 1585 1581 int ret; 1586 1582 1587 - nand_get_device(chip, mtd, FL_READING); 1583 + nand_get_device(mtd, FL_READING); 1588 1584 ops.len = len; 1589 1585 ops.datbuf = buf; 1590 1586 ops.oobbuf = NULL; ··· 1806 1804 chip->select_chip(mtd, chipnr); 1807 1805 } 1808 1806 } 1807 + chip->select_chip(mtd, -1); 1809 1808 1810 1809 ops->oobretlen = ops->ooblen - readlen; 1811 1810 ··· 1830 1827 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 1831 1828 struct mtd_oob_ops *ops) 1832 1829 { 1833 - struct nand_chip *chip = mtd->priv; 1834 1830 int ret = -ENOTSUPP; 1835 1831 1836 1832 ops->retlen = 0; ··· 1841 1839 return -EINVAL; 1842 1840 } 1843 1841 1844 - nand_get_device(chip, mtd, FL_READING); 1842 + nand_get_device(mtd, FL_READING); 1845 1843 1846 1844 switch (ops->mode) { 1847 1845 case MTD_OPS_PLACE_OOB: ··· 2188 2186 chip->select_chip(mtd, chipnr); 2189 2187 2190 2188 /* Check, if it is write protected */ 2191 - if (nand_check_wp(mtd)) 2192 - return -EIO; 2189 + if (nand_check_wp(mtd)) { 2190 + ret = -EIO; 2191 + goto err_out; 2192 + } 2193 2193 2194 2194 realpage = (int)(to >> chip->page_shift); 2195 2195 page = realpage & chip->pagemask; ··· 2203 2199 chip->pagebuf = -1; 2204 2200 2205 2201 /* Don't allow multipage oob writes with offset */ 2206 - if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 2207 - return -EINVAL; 2202 + if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 2203 + ret = -EINVAL; 2204 + goto err_out; 2205 + } 2208 2206 2209 2207 while (1) { 2210 2208 int bytes = mtd->writesize; ··· 2257 2251 ops->retlen = ops->len - writelen; 2258 2252 if (unlikely(oob)) 2259 2253 ops->oobretlen = ops->ooblen; 2254 + 2255 + err_out: 2256 + chip->select_chip(mtd, -1); 2260 2257 return ret; 2261 2258 } 2262 2259 ··· 2311 2302 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, 2312 2303 size_t *retlen, const uint8_t *buf) 2313 2304 { 2314 - struct nand_chip *chip = mtd->priv; 2315 2305 struct mtd_oob_ops ops; 2316 2306 int ret; 2317 2307 2318 - nand_get_device(chip, mtd, FL_WRITING); 2308 + nand_get_device(mtd, FL_WRITING); 2319 2309 ops.len = len; 2320 2310 ops.datbuf = (uint8_t *)buf; 2321 2311 ops.oobbuf = NULL; ··· 2385 2377 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2386 2378 2387 2379 /* Check, if it is write protected */ 2388 - if (nand_check_wp(mtd)) 2380 + if (nand_check_wp(mtd)) { 2381 + chip->select_chip(mtd, -1); 2389 2382 return -EROFS; 2383 + } 2390 2384 2391 2385 /* Invalidate the page cache, if we write to the cached page */ 2392 2386 if (page == chip->pagebuf) ··· 2400 2390 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask); 2401 2391 else 2402 2392 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2393 + 2394 + chip->select_chip(mtd, -1); 2403 2395 2404 2396 if (status) 2405 2397 return status; ··· 2420 2408 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 2421 2409 struct mtd_oob_ops *ops) 2422 2410 { 2423 - struct nand_chip *chip = mtd->priv; 2424 2411 int ret = -ENOTSUPP; 2425 2412 2426 2413 ops->retlen = 0; ··· 2431 2420 return -EINVAL; 2432 2421 } 2433 2422 2434 - nand_get_device(chip, mtd, FL_WRITING); 2423 + nand_get_device(mtd, FL_WRITING); 2435 2424 2436 2425 switch (ops->mode) { 2437 2426 case MTD_OPS_PLACE_OOB: ··· 2524 2513 return -EINVAL; 2525 2514 2526 2515 /* Grab the lock and see if the device is available */ 2527 - nand_get_device(chip, mtd, FL_ERASING); 2516 + nand_get_device(mtd, FL_ERASING); 2528 2517 2529 2518 /* Shift to get first page */ 2530 2519 page = (int)(instr->addr >> chip->page_shift); ··· 2634 2623 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2635 2624 2636 2625 /* Deselect and wake up anyone waiting on the device */ 2626 + chip->select_chip(mtd, -1); 2637 2627 nand_release_device(mtd); 2638 2628 2639 2629 /* Do call back function */ ··· 2670 2658 */ 2671 2659 static void nand_sync(struct mtd_info *mtd) 2672 2660 { 2673 - struct nand_chip *chip = mtd->priv; 2674 - 2675 2661 pr_debug("%s: called\n", __func__); 2676 2662 2677 2663 /* Grab the lock and see if the device is available */ 2678 - nand_get_device(chip, mtd, FL_SYNCING); 2664 + nand_get_device(mtd, FL_SYNCING); 2679 2665 /* Release it and go back */ 2680 2666 nand_release_device(mtd); 2681 2667 } ··· 2759 2749 */ 2760 2750 static int nand_suspend(struct mtd_info *mtd) 2761 2751 { 2762 - struct nand_chip *chip = mtd->priv; 2763 - 2764 - return nand_get_device(chip, mtd, FL_PM_SUSPENDED); 2752 + return nand_get_device(mtd, FL_PM_SUSPENDED); 2765 2753 } 2766 2754 2767 2755 /** ··· 2857 2849 int i; 2858 2850 int val; 2859 2851 2852 + /* ONFI need to be probed in 8 bits mode */ 2853 + WARN_ON(chip->options & NAND_BUSWIDTH_16); 2860 2854 /* Try ONFI for unknown chip or LP */ 2861 2855 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2862 2856 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || ··· 2923 2913 * 2924 2914 * Check if an ID string is repeated within a given sequence of bytes at 2925 2915 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 2926 - * period of 2). This is a helper function for nand_id_len(). Returns non-zero 2916 + * period of 3). This is a helper function for nand_id_len(). Returns non-zero 2927 2917 * if the repetition has a period of @period; otherwise, returns zero. 2928 2918 */ 2929 2919 static int nand_id_has_period(u8 *id_data, int arrlen, int period) ··· 3252 3242 break; 3253 3243 } 3254 3244 3255 - /* 3256 - * Check, if buswidth is correct. Hardware drivers should set 3257 - * chip correct! 3258 - */ 3259 - if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3245 + if (chip->options & NAND_BUSWIDTH_AUTO) { 3246 + WARN_ON(chip->options & NAND_BUSWIDTH_16); 3247 + chip->options |= busw; 3248 + nand_set_defaults(chip, busw); 3249 + } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3250 + /* 3251 + * Check, if buswidth is correct. Hardware drivers should set 3252 + * chip correct! 3253 + */ 3260 3254 pr_info("NAND device: Manufacturer ID:" 3261 3255 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3262 3256 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); ··· 3299 3285 chip->cmdfunc = nand_command_lp; 3300 3286 3301 3287 pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)," 3302 - " page size: %d, OOB size: %d\n", 3288 + " %dMiB, page size: %d, OOB size: %d\n", 3303 3289 *maf_id, *dev_id, nand_manuf_ids[maf_idx].name, 3304 3290 chip->onfi_version ? chip->onfi_params.model : type->name, 3305 - mtd->writesize, mtd->oobsize); 3291 + (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize); 3306 3292 3307 3293 return type; 3308 3294 } ··· 3341 3327 return PTR_ERR(type); 3342 3328 } 3343 3329 3330 + chip->select_chip(mtd, -1); 3331 + 3344 3332 /* Check for a chip array */ 3345 3333 for (i = 1; i < maxchips; i++) { 3346 3334 chip->select_chip(mtd, i); ··· 3352 3336 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 3353 3337 /* Read manufacturer and device IDs */ 3354 3338 if (nand_maf_id != chip->read_byte(mtd) || 3355 - nand_dev_id != chip->read_byte(mtd)) 3339 + nand_dev_id != chip->read_byte(mtd)) { 3340 + chip->select_chip(mtd, -1); 3356 3341 break; 3342 + } 3343 + chip->select_chip(mtd, -1); 3357 3344 } 3358 3345 if (i > 1) 3359 3346 pr_info("%d NAND chips detected\n", i); ··· 3614 3595 3615 3596 /* Initialize state */ 3616 3597 chip->state = FL_READY; 3617 - 3618 - /* De-select the device */ 3619 - chip->select_chip(mtd, -1); 3620 3598 3621 3599 /* Invalidate the pagebuffer reference */ 3622 3600 chip->pagebuf = -1;
+130 -56
drivers/mtd/nand/nandsim.c
··· 42 42 #include <linux/sched.h> 43 43 #include <linux/fs.h> 44 44 #include <linux/pagemap.h> 45 + #include <linux/seq_file.h> 46 + #include <linux/debugfs.h> 45 47 46 48 /* Default simulator parameters values */ 47 49 #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ ··· 107 105 static char *weakpages = NULL; 108 106 static unsigned int bitflips = 0; 109 107 static char *gravepages = NULL; 110 - static unsigned int rptwear = 0; 111 108 static unsigned int overridesize = 0; 112 109 static char *cache_file = NULL; 113 110 static unsigned int bbt; ··· 131 130 module_param(weakpages, charp, 0400); 132 131 module_param(bitflips, uint, 0400); 133 132 module_param(gravepages, charp, 0400); 134 - module_param(rptwear, uint, 0400); 135 133 module_param(overridesize, uint, 0400); 136 134 module_param(cache_file, charp, 0400); 137 135 module_param(bbt, uint, 0400); ··· 162 162 MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]" 163 163 " separated by commas e.g. 1401:2 means page 1401" 164 164 " can be read only twice before failing"); 165 - MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero"); 166 165 MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " 167 166 "The size is specified in erase blocks and as the exponent of a power of two" 168 167 " e.g. 5 means a size of 32 erase blocks"); ··· 285 286 /* Maximum page cache pages needed to read or write a NAND page to the cache_file */ 286 287 #define NS_MAX_HELD_PAGES 16 287 288 289 + struct nandsim_debug_info { 290 + struct dentry *dfs_root; 291 + struct dentry *dfs_wear_report; 292 + }; 293 + 288 294 /* 289 295 * A union to represent flash memory contents and flash buffer. 290 296 */ ··· 369 365 void *file_buf; 370 366 struct page *held_pages[NS_MAX_HELD_PAGES]; 371 367 int held_cnt; 368 + 369 + struct nandsim_debug_info dbg; 372 370 }; 373 371 374 372 /* ··· 448 442 static unsigned long *erase_block_wear = NULL; 449 443 static unsigned int wear_eb_count = 0; 450 444 static unsigned long total_wear = 0; 451 - static unsigned int rptwear_cnt = 0; 452 445 453 446 /* MTD structure for NAND controller */ 454 447 static struct mtd_info *nsmtd; 448 + 449 + static int nandsim_debugfs_show(struct seq_file *m, void *private) 450 + { 451 + unsigned long wmin = -1, wmax = 0, avg; 452 + unsigned long deciles[10], decile_max[10], tot = 0; 453 + unsigned int i; 454 + 455 + /* Calc wear stats */ 456 + for (i = 0; i < wear_eb_count; ++i) { 457 + unsigned long wear = erase_block_wear[i]; 458 + if (wear < wmin) 459 + wmin = wear; 460 + if (wear > wmax) 461 + wmax = wear; 462 + tot += wear; 463 + } 464 + 465 + for (i = 0; i < 9; ++i) { 466 + deciles[i] = 0; 467 + decile_max[i] = (wmax * (i + 1) + 5) / 10; 468 + } 469 + deciles[9] = 0; 470 + decile_max[9] = wmax; 471 + for (i = 0; i < wear_eb_count; ++i) { 472 + int d; 473 + unsigned long wear = erase_block_wear[i]; 474 + for (d = 0; d < 10; ++d) 475 + if (wear <= decile_max[d]) { 476 + deciles[d] += 1; 477 + break; 478 + } 479 + } 480 + avg = tot / wear_eb_count; 481 + 482 + /* Output wear report */ 483 + seq_printf(m, "Total numbers of erases: %lu\n", tot); 484 + seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count); 485 + seq_printf(m, "Average number of erases: %lu\n", avg); 486 + seq_printf(m, "Maximum number of erases: %lu\n", wmax); 487 + seq_printf(m, "Minimum number of erases: %lu\n", wmin); 488 + for (i = 0; i < 10; ++i) { 489 + unsigned long from = (i ? decile_max[i - 1] + 1 : 0); 490 + if (from > decile_max[i]) 491 + continue; 492 + seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n", 493 + from, 494 + decile_max[i], 495 + deciles[i]); 496 + } 497 + 498 + return 0; 499 + } 500 + 501 + static int nandsim_debugfs_open(struct inode *inode, struct file *file) 502 + { 503 + return single_open(file, nandsim_debugfs_show, inode->i_private); 504 + } 505 + 506 + static const struct file_operations dfs_fops = { 507 + .open = nandsim_debugfs_open, 508 + .read = seq_read, 509 + .llseek = seq_lseek, 510 + .release = single_release, 511 + }; 512 + 513 + /** 514 + * nandsim_debugfs_create - initialize debugfs 515 + * @dev: nandsim device description object 516 + * 517 + * This function creates all debugfs files for UBI device @ubi. Returns zero in 518 + * case of success and a negative error code in case of failure. 519 + */ 520 + static int nandsim_debugfs_create(struct nandsim *dev) 521 + { 522 + struct nandsim_debug_info *dbg = &dev->dbg; 523 + struct dentry *dent; 524 + int err; 525 + 526 + if (!IS_ENABLED(CONFIG_DEBUG_FS)) 527 + return 0; 528 + 529 + dent = debugfs_create_dir("nandsim", NULL); 530 + if (IS_ERR_OR_NULL(dent)) { 531 + int err = dent ? -ENODEV : PTR_ERR(dent); 532 + 533 + NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n", 534 + err); 535 + return err; 536 + } 537 + dbg->dfs_root = dent; 538 + 539 + dent = debugfs_create_file("wear_report", S_IRUSR, 540 + dbg->dfs_root, dev, &dfs_fops); 541 + if (IS_ERR_OR_NULL(dent)) 542 + goto out_remove; 543 + dbg->dfs_wear_report = dent; 544 + 545 + return 0; 546 + 547 + out_remove: 548 + debugfs_remove_recursive(dbg->dfs_root); 549 + err = dent ? PTR_ERR(dent) : -ENODEV; 550 + return err; 551 + } 552 + 553 + /** 554 + * nandsim_debugfs_remove - destroy all debugfs files 555 + */ 556 + static void nandsim_debugfs_remove(struct nandsim *ns) 557 + { 558 + if (IS_ENABLED(CONFIG_DEBUG_FS)) 559 + debugfs_remove_recursive(ns->dbg.dfs_root); 560 + } 455 561 456 562 /* 457 563 * Allocate array of page pointers, create slab allocation for an array ··· 1029 911 { 1030 912 size_t mem; 1031 913 1032 - if (!rptwear) 1033 - return 0; 1034 914 wear_eb_count = div_u64(mtd->size, mtd->erasesize); 1035 915 mem = wear_eb_count * sizeof(unsigned long); 1036 916 if (mem / sizeof(unsigned long) != wear_eb_count) { ··· 1045 929 1046 930 static void update_wear(unsigned int erase_block_no) 1047 931 { 1048 - unsigned long wmin = -1, wmax = 0, avg; 1049 - unsigned long deciles[10], decile_max[10], tot = 0; 1050 - unsigned int i; 1051 - 1052 932 if (!erase_block_wear) 1053 933 return; 1054 934 total_wear += 1; 935 + /* 936 + * TODO: Notify this through a debugfs entry, 937 + * instead of showing an error message. 938 + */ 1055 939 if (total_wear == 0) 1056 940 NS_ERR("Erase counter total overflow\n"); 1057 941 erase_block_wear[erase_block_no] += 1; 1058 942 if (erase_block_wear[erase_block_no] == 0) 1059 943 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no); 1060 - rptwear_cnt += 1; 1061 - if (rptwear_cnt < rptwear) 1062 - return; 1063 - rptwear_cnt = 0; 1064 - /* Calc wear stats */ 1065 - for (i = 0; i < wear_eb_count; ++i) { 1066 - unsigned long wear = erase_block_wear[i]; 1067 - if (wear < wmin) 1068 - wmin = wear; 1069 - if (wear > wmax) 1070 - wmax = wear; 1071 - tot += wear; 1072 - } 1073 - for (i = 0; i < 9; ++i) { 1074 - deciles[i] = 0; 1075 - decile_max[i] = (wmax * (i + 1) + 5) / 10; 1076 - } 1077 - deciles[9] = 0; 1078 - decile_max[9] = wmax; 1079 - for (i = 0; i < wear_eb_count; ++i) { 1080 - int d; 1081 - unsigned long wear = erase_block_wear[i]; 1082 - for (d = 0; d < 10; ++d) 1083 - if (wear <= decile_max[d]) { 1084 - deciles[d] += 1; 1085 - break; 1086 - } 1087 - } 1088 - avg = tot / wear_eb_count; 1089 - /* Output wear report */ 1090 - NS_INFO("*** Wear Report ***\n"); 1091 - NS_INFO("Total numbers of erases: %lu\n", tot); 1092 - NS_INFO("Number of erase blocks: %u\n", wear_eb_count); 1093 - NS_INFO("Average number of erases: %lu\n", avg); 1094 - NS_INFO("Maximum number of erases: %lu\n", wmax); 1095 - NS_INFO("Minimum number of erases: %lu\n", wmin); 1096 - for (i = 0; i < 10; ++i) { 1097 - unsigned long from = (i ? decile_max[i - 1] + 1 : 0); 1098 - if (from > decile_max[i]) 1099 - continue; 1100 - NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n", 1101 - from, 1102 - decile_max[i], 1103 - deciles[i]); 1104 - } 1105 - NS_INFO("*** End of Wear Report ***\n"); 1106 944 } 1107 945 1108 946 /* ··· 2397 2327 if ((retval = setup_wear_reporting(nsmtd)) != 0) 2398 2328 goto err_exit; 2399 2329 2330 + if ((retval = nandsim_debugfs_create(nand)) != 0) 2331 + goto err_exit; 2332 + 2400 2333 if ((retval = init_nandsim(nsmtd)) != 0) 2401 2334 goto err_exit; 2402 2335 ··· 2439 2366 struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv; 2440 2367 int i; 2441 2368 2369 + nandsim_debugfs_remove(ns); 2442 2370 free_nandsim(ns); /* Free nandsim private resources */ 2443 2371 nand_release(nsmtd); /* Unregister driver */ 2444 2372 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
+3 -3
drivers/mtd/nand/ndfc.c
··· 197 197 return ret; 198 198 } 199 199 200 - static int __devinit ndfc_probe(struct platform_device *ofdev) 200 + static int ndfc_probe(struct platform_device *ofdev) 201 201 { 202 202 struct ndfc_controller *ndfc; 203 203 const __be32 *reg; ··· 256 256 return 0; 257 257 } 258 258 259 - static int __devexit ndfc_remove(struct platform_device *ofdev) 259 + static int ndfc_remove(struct platform_device *ofdev) 260 260 { 261 261 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); 262 262 ··· 279 279 .of_match_table = ndfc_match, 280 280 }, 281 281 .probe = ndfc_probe, 282 - .remove = __devexit_p(ndfc_remove), 282 + .remove = ndfc_remove, 283 283 }; 284 284 285 285 module_platform_driver(ndfc_driver);
-235
drivers/mtd/nand/nomadik_nand.c
··· 1 - /* 2 - * drivers/mtd/nand/nomadik_nand.c 3 - * 4 - * Overview: 5 - * Driver for on-board NAND flash on Nomadik Platforms 6 - * 7 - * Copyright © 2007 STMicroelectronics Pvt. Ltd. 8 - * Author: Sachin Verma <sachin.verma@st.com> 9 - * 10 - * Copyright © 2009 Alessandro Rubini 11 - * 12 - * This program is free software; you can redistribute it and/or modify 13 - * it under the terms of the GNU General Public License as published by 14 - * the Free Software Foundation; either version 2 of the License, or 15 - * (at your option) any later version. 16 - * 17 - * This program is distributed in the hope that it will be useful, 18 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 - * GNU General Public License for more details. 21 - * 22 - */ 23 - 24 - #include <linux/init.h> 25 - #include <linux/module.h> 26 - #include <linux/types.h> 27 - #include <linux/mtd/mtd.h> 28 - #include <linux/mtd/nand.h> 29 - #include <linux/mtd/nand_ecc.h> 30 - #include <linux/platform_device.h> 31 - #include <linux/mtd/partitions.h> 32 - #include <linux/io.h> 33 - #include <linux/slab.h> 34 - #include <linux/platform_data/mtd-nomadik-nand.h> 35 - #include <mach/fsmc.h> 36 - 37 - #include <mtd/mtd-abi.h> 38 - 39 - struct nomadik_nand_host { 40 - struct mtd_info mtd; 41 - struct nand_chip nand; 42 - void __iomem *data_va; 43 - void __iomem *cmd_va; 44 - void __iomem *addr_va; 45 - struct nand_bbt_descr *bbt_desc; 46 - }; 47 - 48 - static struct nand_ecclayout nomadik_ecc_layout = { 49 - .eccbytes = 3 * 4, 50 - .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */ 51 - 0x02, 0x03, 0x04, 52 - 0x12, 0x13, 0x14, 53 - 0x22, 0x23, 0x24, 54 - 0x32, 0x33, 0x34}, 55 - /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */ 56 - .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} }, 57 - }; 58 - 59 - static void nomadik_ecc_control(struct mtd_info *mtd, int mode) 60 - { 61 - /* No need to enable hw ecc, it's on by default */ 62 - } 63 - 64 - static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 65 - { 66 - struct nand_chip *nand = mtd->priv; 67 - struct nomadik_nand_host *host = nand->priv; 68 - 69 - if (cmd == NAND_CMD_NONE) 70 - return; 71 - 72 - if (ctrl & NAND_CLE) 73 - writeb(cmd, host->cmd_va); 74 - else 75 - writeb(cmd, host->addr_va); 76 - } 77 - 78 - static int nomadik_nand_probe(struct platform_device *pdev) 79 - { 80 - struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data; 81 - struct nomadik_nand_host *host; 82 - struct mtd_info *mtd; 83 - struct nand_chip *nand; 84 - struct resource *res; 85 - int ret = 0; 86 - 87 - /* Allocate memory for the device structure (and zero it) */ 88 - host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL); 89 - if (!host) { 90 - dev_err(&pdev->dev, "Failed to allocate device structure.\n"); 91 - return -ENOMEM; 92 - } 93 - 94 - /* Call the client's init function, if any */ 95 - if (pdata->init) 96 - ret = pdata->init(); 97 - if (ret < 0) { 98 - dev_err(&pdev->dev, "Init function failed\n"); 99 - goto err; 100 - } 101 - 102 - /* ioremap three regions */ 103 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); 104 - if (!res) { 105 - ret = -EIO; 106 - goto err_unmap; 107 - } 108 - host->addr_va = ioremap(res->start, resource_size(res)); 109 - 110 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 111 - if (!res) { 112 - ret = -EIO; 113 - goto err_unmap; 114 - } 115 - host->data_va = ioremap(res->start, resource_size(res)); 116 - 117 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 118 - if (!res) { 119 - ret = -EIO; 120 - goto err_unmap; 121 - } 122 - host->cmd_va = ioremap(res->start, resource_size(res)); 123 - 124 - if (!host->addr_va || !host->data_va || !host->cmd_va) { 125 - ret = -ENOMEM; 126 - goto err_unmap; 127 - } 128 - 129 - /* Link all private pointers */ 130 - mtd = &host->mtd; 131 - nand = &host->nand; 132 - mtd->priv = nand; 133 - nand->priv = host; 134 - 135 - host->mtd.owner = THIS_MODULE; 136 - nand->IO_ADDR_R = host->data_va; 137 - nand->IO_ADDR_W = host->data_va; 138 - nand->cmd_ctrl = nomadik_cmd_ctrl; 139 - 140 - /* 141 - * This stanza declares ECC_HW but uses soft routines. It's because 142 - * HW claims to make the calculation but not the correction. However, 143 - * I haven't managed to get the desired data out of it until now. 144 - */ 145 - nand->ecc.mode = NAND_ECC_SOFT; 146 - nand->ecc.layout = &nomadik_ecc_layout; 147 - nand->ecc.hwctl = nomadik_ecc_control; 148 - nand->ecc.size = 512; 149 - nand->ecc.bytes = 3; 150 - 151 - nand->options = pdata->options; 152 - 153 - /* 154 - * Scan to find existence of the device 155 - */ 156 - if (nand_scan(&host->mtd, 1)) { 157 - ret = -ENXIO; 158 - goto err_unmap; 159 - } 160 - 161 - mtd_device_register(&host->mtd, pdata->parts, pdata->nparts); 162 - 163 - platform_set_drvdata(pdev, host); 164 - return 0; 165 - 166 - err_unmap: 167 - if (host->cmd_va) 168 - iounmap(host->cmd_va); 169 - if (host->data_va) 170 - iounmap(host->data_va); 171 - if (host->addr_va) 172 - iounmap(host->addr_va); 173 - err: 174 - kfree(host); 175 - return ret; 176 - } 177 - 178 - /* 179 - * Clean up routine 180 - */ 181 - static int nomadik_nand_remove(struct platform_device *pdev) 182 - { 183 - struct nomadik_nand_host *host = platform_get_drvdata(pdev); 184 - struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data; 185 - 186 - if (pdata->exit) 187 - pdata->exit(); 188 - 189 - if (host) { 190 - nand_release(&host->mtd); 191 - iounmap(host->cmd_va); 192 - iounmap(host->data_va); 193 - iounmap(host->addr_va); 194 - kfree(host); 195 - } 196 - return 0; 197 - } 198 - 199 - static int nomadik_nand_suspend(struct device *dev) 200 - { 201 - struct nomadik_nand_host *host = dev_get_drvdata(dev); 202 - int ret = 0; 203 - if (host) 204 - ret = mtd_suspend(&host->mtd); 205 - return ret; 206 - } 207 - 208 - static int nomadik_nand_resume(struct device *dev) 209 - { 210 - struct nomadik_nand_host *host = dev_get_drvdata(dev); 211 - if (host) 212 - mtd_resume(&host->mtd); 213 - return 0; 214 - } 215 - 216 - static const struct dev_pm_ops nomadik_nand_pm_ops = { 217 - .suspend = nomadik_nand_suspend, 218 - .resume = nomadik_nand_resume, 219 - }; 220 - 221 - static struct platform_driver nomadik_nand_driver = { 222 - .probe = nomadik_nand_probe, 223 - .remove = nomadik_nand_remove, 224 - .driver = { 225 - .owner = THIS_MODULE, 226 - .name = "nomadik_nand", 227 - .pm = &nomadik_nand_pm_ops, 228 - }, 229 - }; 230 - 231 - module_platform_driver(nomadik_nand_driver); 232 - 233 - MODULE_LICENSE("GPL"); 234 - MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)"); 235 - MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
+3 -3
drivers/mtd/nand/nuc900_nand.c
··· 246 246 spin_unlock(&nand->lock); 247 247 } 248 248 249 - static int __devinit nuc900_nand_probe(struct platform_device *pdev) 249 + static int nuc900_nand_probe(struct platform_device *pdev) 250 250 { 251 251 struct nuc900_nand *nuc900_nand; 252 252 struct nand_chip *chip; ··· 317 317 return retval; 318 318 } 319 319 320 - static int __devexit nuc900_nand_remove(struct platform_device *pdev) 320 + static int nuc900_nand_remove(struct platform_device *pdev) 321 321 { 322 322 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 323 323 struct resource *res; ··· 340 340 341 341 static struct platform_driver nuc900_nand_driver = { 342 342 .probe = nuc900_nand_probe, 343 - .remove = __devexit_p(nuc900_nand_remove), 343 + .remove = nuc900_nand_remove, 344 344 .driver = { 345 345 .name = "nuc900-fmi", 346 346 .owner = THIS_MODULE,
+1 -1
drivers/mtd/nand/omap2.c
··· 1323 1323 } 1324 1324 #endif /* CONFIG_MTD_NAND_OMAP_BCH */ 1325 1325 1326 - static int __devinit omap_nand_probe(struct platform_device *pdev) 1326 + static int omap_nand_probe(struct platform_device *pdev) 1327 1327 { 1328 1328 struct omap_nand_info *info; 1329 1329 struct omap_nand_platform_data *pdata;
+2 -2
drivers/mtd/nand/orion_nand.c
··· 194 194 return ret; 195 195 } 196 196 197 - static int __devexit orion_nand_remove(struct platform_device *pdev) 197 + static int orion_nand_remove(struct platform_device *pdev) 198 198 { 199 199 struct mtd_info *mtd = platform_get_drvdata(pdev); 200 200 struct nand_chip *nc = mtd->priv; ··· 223 223 #endif 224 224 225 225 static struct platform_driver orion_nand_driver = { 226 - .remove = __devexit_p(orion_nand_remove), 226 + .remove = orion_nand_remove, 227 227 .driver = { 228 228 .name = "orion_nand", 229 229 .owner = THIS_MODULE,
+2 -2
drivers/mtd/nand/pasemi_nand.c
··· 89 89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); 90 90 } 91 91 92 - static int __devinit pasemi_nand_probe(struct platform_device *ofdev) 92 + static int pasemi_nand_probe(struct platform_device *ofdev) 93 93 { 94 94 struct pci_dev *pdev; 95 95 struct device_node *np = ofdev->dev.of_node; ··· 184 184 return err; 185 185 } 186 186 187 - static int __devexit pasemi_nand_remove(struct platform_device *ofdev) 187 + static int pasemi_nand_remove(struct platform_device *ofdev) 188 188 { 189 189 struct nand_chip *chip; 190 190
+3 -3
drivers/mtd/nand/plat_nand.c
··· 28 28 /* 29 29 * Probe for the NAND device. 30 30 */ 31 - static int __devinit plat_nand_probe(struct platform_device *pdev) 31 + static int plat_nand_probe(struct platform_device *pdev) 32 32 { 33 33 struct platform_nand_data *pdata = pdev->dev.platform_data; 34 34 struct mtd_part_parser_data ppdata; ··· 134 134 /* 135 135 * Remove a NAND device. 136 136 */ 137 - static int __devexit plat_nand_remove(struct platform_device *pdev) 137 + static int plat_nand_remove(struct platform_device *pdev) 138 138 { 139 139 struct plat_nand_data *data = platform_get_drvdata(pdev); 140 140 struct platform_nand_data *pdata = pdev->dev.platform_data; ··· 160 160 161 161 static struct platform_driver plat_nand_driver = { 162 162 .probe = plat_nand_probe, 163 - .remove = __devexit_p(plat_nand_remove), 163 + .remove = plat_nand_remove, 164 164 .driver = { 165 165 .name = "gen_nand", 166 166 .owner = THIS_MODULE,
+5 -2
drivers/mtd/nand/s3c2410.c
··· 730 730 struct s3c2410_nand_mtd *mtd, 731 731 struct s3c2410_nand_set *set) 732 732 { 733 - if (set) 733 + if (set) { 734 734 mtd->mtd.name = set->name; 735 735 736 - return mtd_device_parse_register(&mtd->mtd, NULL, NULL, 736 + return mtd_device_parse_register(&mtd->mtd, NULL, NULL, 737 737 set->partitions, set->nr_partitions); 738 + } 739 + 740 + return -ENODEV; 738 741 } 739 742 740 743 /**
+275 -31
drivers/mtd/nand/sh_flctl.c
··· 23 23 24 24 #include <linux/module.h> 25 25 #include <linux/kernel.h> 26 + #include <linux/completion.h> 26 27 #include <linux/delay.h> 28 + #include <linux/dmaengine.h> 29 + #include <linux/dma-mapping.h> 27 30 #include <linux/interrupt.h> 28 31 #include <linux/io.h> 32 + #include <linux/of.h> 33 + #include <linux/of_device.h> 34 + #include <linux/of_mtd.h> 29 35 #include <linux/platform_device.h> 30 36 #include <linux/pm_runtime.h> 37 + #include <linux/sh_dma.h> 31 38 #include <linux/slab.h> 32 39 #include <linux/string.h> 33 40 ··· 111 104 112 105 timeout_error(flctl, __func__); 113 106 writeb(0x0, FLTRCR(flctl)); 107 + } 108 + 109 + static void flctl_dma_complete(void *param) 110 + { 111 + struct sh_flctl *flctl = param; 112 + 113 + complete(&flctl->dma_complete); 114 + } 115 + 116 + static void flctl_release_dma(struct sh_flctl *flctl) 117 + { 118 + if (flctl->chan_fifo0_rx) { 119 + dma_release_channel(flctl->chan_fifo0_rx); 120 + flctl->chan_fifo0_rx = NULL; 121 + } 122 + if (flctl->chan_fifo0_tx) { 123 + dma_release_channel(flctl->chan_fifo0_tx); 124 + flctl->chan_fifo0_tx = NULL; 125 + } 126 + } 127 + 128 + static void flctl_setup_dma(struct sh_flctl *flctl) 129 + { 130 + dma_cap_mask_t mask; 131 + struct dma_slave_config cfg; 132 + struct platform_device *pdev = flctl->pdev; 133 + struct sh_flctl_platform_data *pdata = pdev->dev.platform_data; 134 + int ret; 135 + 136 + if (!pdata) 137 + return; 138 + 139 + if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) 140 + return; 141 + 142 + /* We can only either use DMA for both Tx and Rx or not use it at all */ 143 + dma_cap_zero(mask); 144 + dma_cap_set(DMA_SLAVE, mask); 145 + 146 + flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, 147 + (void *)pdata->slave_id_fifo0_tx); 148 + dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, 149 + flctl->chan_fifo0_tx); 150 + 151 + if (!flctl->chan_fifo0_tx) 152 + return; 153 + 154 + memset(&cfg, 0, sizeof(cfg)); 155 + cfg.slave_id = pdata->slave_id_fifo0_tx; 156 + cfg.direction = DMA_MEM_TO_DEV; 157 + cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl); 158 + cfg.src_addr = 0; 159 + ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); 160 + if (ret < 0) 161 + goto err; 162 + 163 + flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, 164 + (void *)pdata->slave_id_fifo0_rx); 165 + dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, 166 + flctl->chan_fifo0_rx); 167 + 168 + if (!flctl->chan_fifo0_rx) 169 + goto err; 170 + 171 + cfg.slave_id = pdata->slave_id_fifo0_rx; 172 + cfg.direction = DMA_DEV_TO_MEM; 173 + cfg.dst_addr = 0; 174 + cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl); 175 + ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); 176 + if (ret < 0) 177 + goto err; 178 + 179 + init_completion(&flctl->dma_complete); 180 + 181 + return; 182 + 183 + err: 184 + flctl_release_dma(flctl); 114 185 } 115 186 116 187 static void set_addr(struct mtd_info *mtd, int column, int page_addr) ··· 310 225 311 226 for (i = 0; i < 3; i++) { 312 227 uint8_t org; 313 - int index; 228 + unsigned int index; 314 229 315 230 data = readl(ecc_reg[i]); 316 231 ··· 346 261 timeout_error(flctl, __func__); 347 262 } 348 263 264 + static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, 265 + int len, enum dma_data_direction dir) 266 + { 267 + struct dma_async_tx_descriptor *desc = NULL; 268 + struct dma_chan *chan; 269 + enum dma_transfer_direction tr_dir; 270 + dma_addr_t dma_addr; 271 + dma_cookie_t cookie = -EINVAL; 272 + uint32_t reg; 273 + int ret; 274 + 275 + if (dir == DMA_FROM_DEVICE) { 276 + chan = flctl->chan_fifo0_rx; 277 + tr_dir = DMA_DEV_TO_MEM; 278 + } else { 279 + chan = flctl->chan_fifo0_tx; 280 + tr_dir = DMA_MEM_TO_DEV; 281 + } 282 + 283 + dma_addr = dma_map_single(chan->device->dev, buf, len, dir); 284 + 285 + if (dma_addr) 286 + desc = dmaengine_prep_slave_single(chan, dma_addr, len, 287 + tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 288 + 289 + if (desc) { 290 + reg = readl(FLINTDMACR(flctl)); 291 + reg |= DREQ0EN; 292 + writel(reg, FLINTDMACR(flctl)); 293 + 294 + desc->callback = flctl_dma_complete; 295 + desc->callback_param = flctl; 296 + cookie = dmaengine_submit(desc); 297 + 298 + dma_async_issue_pending(chan); 299 + } else { 300 + /* DMA failed, fall back to PIO */ 301 + flctl_release_dma(flctl); 302 + dev_warn(&flctl->pdev->dev, 303 + "DMA failed, falling back to PIO\n"); 304 + ret = -EIO; 305 + goto out; 306 + } 307 + 308 + ret = 309 + wait_for_completion_timeout(&flctl->dma_complete, 310 + msecs_to_jiffies(3000)); 311 + 312 + if (ret <= 0) { 313 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 314 + dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); 315 + } 316 + 317 + out: 318 + reg = readl(FLINTDMACR(flctl)); 319 + reg &= ~DREQ0EN; 320 + writel(reg, FLINTDMACR(flctl)); 321 + 322 + dma_unmap_single(chan->device->dev, dma_addr, len, dir); 323 + 324 + /* ret > 0 is success */ 325 + return ret; 326 + } 327 + 349 328 static void read_datareg(struct sh_flctl *flctl, int offset) 350 329 { 351 330 unsigned long data; ··· 428 279 429 280 len_4align = (rlen + 3) / 4; 430 281 282 + /* initiate DMA transfer */ 283 + if (flctl->chan_fifo0_rx && rlen >= 32 && 284 + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0) 285 + goto convert; /* DMA success */ 286 + 287 + /* do polling transfer */ 431 288 for (i = 0; i < len_4align; i++) { 432 289 wait_rfifo_ready(flctl); 433 290 buf[i] = readl(FLDTFIFO(flctl)); 434 - buf[i] = be32_to_cpu(buf[i]); 435 291 } 292 + 293 + convert: 294 + for (i = 0; i < len_4align; i++) 295 + buf[i] = be32_to_cpu(buf[i]); 436 296 } 437 297 438 298 static enum flctl_ecc_res_t read_ecfiforeg ··· 463 305 return res; 464 306 } 465 307 466 - static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 308 + static void write_fiforeg(struct sh_flctl *flctl, int rlen, 309 + unsigned int offset) 467 310 { 468 311 int i, len_4align; 469 - unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; 470 - void *fifo_addr = (void *)FLDTFIFO(flctl); 312 + unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 471 313 472 314 len_4align = (rlen + 3) / 4; 473 315 for (i = 0; i < len_4align; i++) { 474 316 wait_wfifo_ready(flctl); 475 - writel(cpu_to_be32(data[i]), fifo_addr); 317 + writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); 476 318 } 477 319 } 478 320 479 - static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 321 + static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, 322 + unsigned int offset) 480 323 { 481 324 int i, len_4align; 482 - unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; 325 + unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 483 326 484 327 len_4align = (rlen + 3) / 4; 328 + 329 + for (i = 0; i < len_4align; i++) 330 + buf[i] = cpu_to_be32(buf[i]); 331 + 332 + /* initiate DMA transfer */ 333 + if (flctl->chan_fifo0_tx && rlen >= 32 && 334 + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0) 335 + return; /* DMA success */ 336 + 337 + /* do polling transfer */ 485 338 for (i = 0; i < len_4align; i++) { 486 339 wait_wecfifo_ready(flctl); 487 - writel(cpu_to_be32(data[i]), FLECFIFO(flctl)); 340 + writel(buf[i], FLECFIFO(flctl)); 488 341 } 489 342 } 490 343 ··· 919 750 static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 920 751 { 921 752 struct sh_flctl *flctl = mtd_to_flctl(mtd); 922 - int index = flctl->index; 923 753 924 - memcpy(&flctl->done_buff[index], buf, len); 754 + memcpy(&flctl->done_buff[flctl->index], buf, len); 925 755 flctl->index += len; 926 756 } 927 757 928 758 static uint8_t flctl_read_byte(struct mtd_info *mtd) 929 759 { 930 760 struct sh_flctl *flctl = mtd_to_flctl(mtd); 931 - int index = flctl->index; 932 761 uint8_t data; 933 762 934 - data = flctl->done_buff[index]; 763 + data = flctl->done_buff[flctl->index]; 935 764 flctl->index++; 936 765 return data; 937 766 } 938 767 939 768 static uint16_t flctl_read_word(struct mtd_info *mtd) 940 769 { 941 - struct sh_flctl *flctl = mtd_to_flctl(mtd); 942 - int index = flctl->index; 943 - uint16_t data; 944 - uint16_t *buf = (uint16_t *)&flctl->done_buff[index]; 770 + struct sh_flctl *flctl = mtd_to_flctl(mtd); 771 + uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index]; 945 772 946 - data = *buf; 947 - flctl->index += 2; 948 - return data; 773 + flctl->index += 2; 774 + return *buf; 949 775 } 950 776 951 777 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 952 778 { 953 779 struct sh_flctl *flctl = mtd_to_flctl(mtd); 954 - int index = flctl->index; 955 780 956 - memcpy(buf, &flctl->done_buff[index], len); 781 + memcpy(buf, &flctl->done_buff[flctl->index], len); 957 782 flctl->index += len; 958 783 } 959 784 ··· 1021 858 return IRQ_HANDLED; 1022 859 } 1023 860 1024 - static int __devinit flctl_probe(struct platform_device *pdev) 861 + #ifdef CONFIG_OF 862 + struct flctl_soc_config { 863 + unsigned long flcmncr_val; 864 + unsigned has_hwecc:1; 865 + unsigned use_holden:1; 866 + }; 867 + 868 + static struct flctl_soc_config flctl_sh7372_config = { 869 + .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL, 870 + .has_hwecc = 1, 871 + .use_holden = 1, 872 + }; 873 + 874 + static const struct of_device_id of_flctl_match[] = { 875 + { .compatible = "renesas,shmobile-flctl-sh7372", 876 + .data = &flctl_sh7372_config }, 877 + {}, 878 + }; 879 + MODULE_DEVICE_TABLE(of, of_flctl_match); 880 + 881 + static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 882 + { 883 + const struct of_device_id *match; 884 + struct flctl_soc_config *config; 885 + struct sh_flctl_platform_data *pdata; 886 + struct device_node *dn = dev->of_node; 887 + int ret; 888 + 889 + match = of_match_device(of_flctl_match, dev); 890 + if (match) 891 + config = (struct flctl_soc_config *)match->data; 892 + else { 893 + dev_err(dev, "%s: no OF configuration attached\n", __func__); 894 + return NULL; 895 + } 896 + 897 + pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data), 898 + GFP_KERNEL); 899 + if (!pdata) { 900 + dev_err(dev, "%s: failed to allocate config data\n", __func__); 901 + return NULL; 902 + } 903 + 904 + /* set SoC specific options */ 905 + pdata->flcmncr_val = config->flcmncr_val; 906 + pdata->has_hwecc = config->has_hwecc; 907 + pdata->use_holden = config->use_holden; 908 + 909 + /* parse user defined options */ 910 + ret = of_get_nand_bus_width(dn); 911 + if (ret == 16) 912 + pdata->flcmncr_val |= SEL_16BIT; 913 + else if (ret != 8) { 914 + dev_err(dev, "%s: invalid bus width\n", __func__); 915 + return NULL; 916 + } 917 + 918 + return pdata; 919 + } 920 + #else /* CONFIG_OF */ 921 + #define of_flctl_match NULL 922 + static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 923 + { 924 + return NULL; 925 + } 926 + #endif /* CONFIG_OF */ 927 + 928 + static int flctl_probe(struct platform_device *pdev) 1025 929 { 1026 930 struct resource *res; 1027 931 struct sh_flctl *flctl; ··· 1097 867 struct sh_flctl_platform_data *pdata; 1098 868 int ret = -ENXIO; 1099 869 int irq; 1100 - 1101 - pdata = pdev->dev.platform_data; 1102 - if (pdata == NULL) { 1103 - dev_err(&pdev->dev, "no platform data defined\n"); 1104 - return -EINVAL; 1105 - } 870 + struct mtd_part_parser_data ppdata = {}; 1106 871 1107 872 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); 1108 873 if (!flctl) { ··· 1129 904 goto err_flste; 1130 905 } 1131 906 907 + if (pdev->dev.of_node) 908 + pdata = flctl_parse_dt(&pdev->dev); 909 + else 910 + pdata = pdev->dev.platform_data; 911 + 912 + if (!pdata) { 913 + dev_err(&pdev->dev, "no setup data defined\n"); 914 + ret = -EINVAL; 915 + goto err_pdata; 916 + } 917 + 1132 918 platform_set_drvdata(pdev, flctl); 1133 919 flctl_mtd = &flctl->mtd; 1134 920 nand = &flctl->chip; ··· 1168 932 pm_runtime_enable(&pdev->dev); 1169 933 pm_runtime_resume(&pdev->dev); 1170 934 935 + flctl_setup_dma(flctl); 936 + 1171 937 ret = nand_scan_ident(flctl_mtd, 1, NULL); 1172 938 if (ret) 1173 939 goto err_chip; ··· 1182 944 if (ret) 1183 945 goto err_chip; 1184 946 1185 - mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 947 + ppdata.of_node = pdev->dev.of_node; 948 + ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts, 949 + pdata->nr_parts); 1186 950 1187 951 return 0; 1188 952 1189 953 err_chip: 954 + flctl_release_dma(flctl); 1190 955 pm_runtime_disable(&pdev->dev); 956 + err_pdata: 1191 957 free_irq(irq, flctl); 1192 958 err_flste: 1193 959 iounmap(flctl->reg); ··· 1200 958 return ret; 1201 959 } 1202 960 1203 - static int __devexit flctl_remove(struct platform_device *pdev) 961 + static int flctl_remove(struct platform_device *pdev) 1204 962 { 1205 963 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1206 964 965 + flctl_release_dma(flctl); 1207 966 nand_release(&flctl->mtd); 1208 967 pm_runtime_disable(&pdev->dev); 1209 968 free_irq(platform_get_irq(pdev, 0), flctl); ··· 1219 976 .driver = { 1220 977 .name = "sh_flctl", 1221 978 .owner = THIS_MODULE, 979 + .of_match_table = of_flctl_match, 1222 980 }, 1223 981 }; 1224 982
+3 -3
drivers/mtd/nand/sharpsl.c
··· 106 106 /* 107 107 * Main initialization routine 108 108 */ 109 - static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 109 + static int sharpsl_nand_probe(struct platform_device *pdev) 110 110 { 111 111 struct nand_chip *this; 112 112 struct resource *r; ··· 205 205 /* 206 206 * Clean up routine 207 207 */ 208 - static int __devexit sharpsl_nand_remove(struct platform_device *pdev) 208 + static int sharpsl_nand_remove(struct platform_device *pdev) 209 209 { 210 210 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); 211 211 ··· 228 228 .owner = THIS_MODULE, 229 229 }, 230 230 .probe = sharpsl_nand_probe, 231 - .remove = __devexit_p(sharpsl_nand_remove), 231 + .remove = sharpsl_nand_remove, 232 232 }; 233 233 234 234 module_platform_driver(sharpsl_nand_driver);
+3 -3
drivers/mtd/nand/socrates_nand.c
··· 140 140 /* 141 141 * Probe for the NAND device. 142 142 */ 143 - static int __devinit socrates_nand_probe(struct platform_device *ofdev) 143 + static int socrates_nand_probe(struct platform_device *ofdev) 144 144 { 145 145 struct socrates_nand_host *host; 146 146 struct mtd_info *mtd; ··· 220 220 /* 221 221 * Remove a NAND device. 222 222 */ 223 - static int __devexit socrates_nand_remove(struct platform_device *ofdev) 223 + static int socrates_nand_remove(struct platform_device *ofdev) 224 224 { 225 225 struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); 226 226 struct mtd_info *mtd = &host->mtd; ··· 251 251 .of_match_table = socrates_nand_match, 252 252 }, 253 253 .probe = socrates_nand_probe, 254 - .remove = __devexit_p(socrates_nand_remove), 254 + .remove = socrates_nand_remove, 255 255 }; 256 256 257 257 module_platform_driver(socrates_nand_driver);
+4 -1
drivers/mtd/ofpart.c
··· 71 71 (*pparts)[i].name = (char *)partname; 72 72 73 73 if (of_get_property(pp, "read-only", &len)) 74 - (*pparts)[i].mask_flags = MTD_WRITEABLE; 74 + (*pparts)[i].mask_flags |= MTD_WRITEABLE; 75 + 76 + if (of_get_property(pp, "lock", &len)) 77 + (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK; 75 78 76 79 i++; 77 80 }
+3 -3
drivers/mtd/onenand/generic.c
··· 35 35 struct onenand_chip onenand; 36 36 }; 37 37 38 - static int __devinit generic_onenand_probe(struct platform_device *pdev) 38 + static int generic_onenand_probe(struct platform_device *pdev) 39 39 { 40 40 struct onenand_info *info; 41 41 struct onenand_platform_data *pdata = pdev->dev.platform_data; ··· 88 88 return err; 89 89 } 90 90 91 - static int __devexit generic_onenand_remove(struct platform_device *pdev) 91 + static int generic_onenand_remove(struct platform_device *pdev) 92 92 { 93 93 struct onenand_info *info = platform_get_drvdata(pdev); 94 94 struct resource *res = pdev->resource; ··· 112 112 .owner = THIS_MODULE, 113 113 }, 114 114 .probe = generic_onenand_probe, 115 - .remove = __devexit_p(generic_onenand_remove), 115 + .remove = generic_onenand_remove, 116 116 }; 117 117 118 118 module_platform_driver(generic_onenand_driver);
+3 -3
drivers/mtd/onenand/omap2.c
··· 630 630 return ret; 631 631 } 632 632 633 - static int __devinit omap2_onenand_probe(struct platform_device *pdev) 633 + static int omap2_onenand_probe(struct platform_device *pdev) 634 634 { 635 635 struct omap_onenand_platform_data *pdata; 636 636 struct omap2_onenand *c; ··· 799 799 return r; 800 800 } 801 801 802 - static int __devexit omap2_onenand_remove(struct platform_device *pdev) 802 + static int omap2_onenand_remove(struct platform_device *pdev) 803 803 { 804 804 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 805 805 ··· 822 822 823 823 static struct platform_driver omap2_onenand_driver = { 824 824 .probe = omap2_onenand_probe, 825 - .remove = __devexit_p(omap2_onenand_remove), 825 + .remove = omap2_onenand_remove, 826 826 .shutdown = omap2_onenand_shutdown, 827 827 .driver = { 828 828 .name = DRIVER_NAME,
+2 -2
drivers/mtd/onenand/samsung.c
··· 1053 1053 return err; 1054 1054 } 1055 1055 1056 - static int __devexit s3c_onenand_remove(struct platform_device *pdev) 1056 + static int s3c_onenand_remove(struct platform_device *pdev) 1057 1057 { 1058 1058 struct mtd_info *mtd = platform_get_drvdata(pdev); 1059 1059 ··· 1130 1130 }, 1131 1131 .id_table = s3c_onenand_driver_ids, 1132 1132 .probe = s3c_onenand_probe, 1133 - .remove = __devexit_p(s3c_onenand_remove), 1133 + .remove = s3c_onenand_remove, 1134 1134 }; 1135 1135 1136 1136 module_platform_driver(s3c_onenand_driver);
+37 -36
drivers/mtd/tests/mtd_nandbiterrs.c
··· 39 39 * this program; see the file COPYING. If not, write to the Free Software 40 40 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 41 41 */ 42 + 43 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 + 42 45 #include <linux/init.h> 43 46 #include <linux/module.h> 44 47 #include <linux/moduleparam.h> ··· 49 46 #include <linux/err.h> 50 47 #include <linux/mtd/nand.h> 51 48 #include <linux/slab.h> 52 - 53 - #define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA) 54 49 55 50 static int dev; 56 51 module_param(dev, int, S_IRUGO); ··· 104 103 struct erase_info ei; 105 104 loff_t addr = eraseblock * mtd->erasesize; 106 105 107 - msg("erase_block\n"); 106 + pr_info("erase_block\n"); 108 107 109 108 memset(&ei, 0, sizeof(struct erase_info)); 110 109 ei.mtd = mtd; ··· 113 112 114 113 err = mtd_erase(mtd, &ei); 115 114 if (err || ei.state == MTD_ERASE_FAILED) { 116 - msg("error %d while erasing\n", err); 115 + pr_err("error %d while erasing\n", err); 117 116 if (!err) 118 117 err = -EIO; 119 118 return err; ··· 129 128 size_t written; 130 129 131 130 if (log) 132 - msg("write_page\n"); 131 + pr_info("write_page\n"); 133 132 134 133 err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer); 135 134 if (err || written != mtd->writesize) { 136 - msg("error: write failed at %#llx\n", (long long)offset); 135 + pr_err("error: write failed at %#llx\n", (long long)offset); 137 136 if (!err) 138 137 err = -EIO; 139 138 } ··· 148 147 struct mtd_oob_ops ops; 149 148 150 149 if (log) 151 - msg("rewrite page\n"); 150 + pr_info("rewrite page\n"); 152 151 153 152 ops.mode = MTD_OPS_RAW; /* No ECC */ 154 153 ops.len = mtd->writesize; ··· 161 160 162 161 err = mtd_write_oob(mtd, offset, &ops); 163 162 if (err || ops.retlen != mtd->writesize) { 164 - msg("error: write_oob failed (%d)\n", err); 163 + pr_err("error: write_oob failed (%d)\n", err); 165 164 if (!err) 166 165 err = -EIO; 167 166 } ··· 178 177 struct mtd_ecc_stats oldstats; 179 178 180 179 if (log) 181 - msg("read_page\n"); 180 + pr_info("read_page\n"); 182 181 183 182 /* Saving last mtd stats */ 184 183 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats)); ··· 188 187 err = mtd->ecc_stats.corrected - oldstats.corrected; 189 188 190 189 if (err < 0 || read != mtd->writesize) { 191 - msg("error: read failed at %#llx\n", (long long)offset); 190 + pr_err("error: read failed at %#llx\n", (long long)offset); 192 191 if (err >= 0) 193 192 err = -EIO; 194 193 } ··· 202 201 unsigned i, errs = 0; 203 202 204 203 if (log) 205 - msg("verify_page\n"); 204 + pr_info("verify_page\n"); 206 205 207 206 for (i = 0; i < mtd->writesize; i++) { 208 207 if (rbuffer[i] != hash(i+seed)) { 209 - msg("Error: page offset %u, expected %02x, got %02x\n", 208 + pr_err("Error: page offset %u, expected %02x, got %02x\n", 210 209 i, hash(i+seed), rbuffer[i]); 211 210 errs++; 212 211 } ··· 231 230 for (bit = 7; bit >= 0; bit--) { 232 231 if (CBIT(wbuffer[byte], bit)) { 233 232 BCLR(wbuffer[byte], bit); 234 - msg("Inserted biterror @ %u/%u\n", byte, bit); 233 + pr_info("Inserted biterror @ %u/%u\n", byte, bit); 235 234 return 0; 236 235 } 237 236 } 238 237 byte++; 239 238 } 240 - msg("biterror: Failed to find a '1' bit\n"); 239 + pr_err("biterror: Failed to find a '1' bit\n"); 241 240 return -EIO; 242 241 } 243 242 ··· 249 248 unsigned i; 250 249 unsigned errs_per_subpage = 0; 251 250 252 - msg("incremental biterrors test\n"); 251 + pr_info("incremental biterrors test\n"); 253 252 254 253 for (i = 0; i < mtd->writesize; i++) 255 254 wbuffer[i] = hash(i+seed); ··· 266 265 267 266 err = read_page(1); 268 267 if (err > 0) 269 - msg("Read reported %d corrected bit errors\n", err); 268 + pr_info("Read reported %d corrected bit errors\n", err); 270 269 if (err < 0) { 271 - msg("After %d biterrors per subpage, read reported error %d\n", 270 + pr_err("After %d biterrors per subpage, read reported error %d\n", 272 271 errs_per_subpage, err); 273 272 err = 0; 274 273 goto exit; ··· 276 275 277 276 err = verify_page(1); 278 277 if (err) { 279 - msg("ECC failure, read data is incorrect despite read success\n"); 278 + pr_err("ECC failure, read data is incorrect despite read success\n"); 280 279 goto exit; 281 280 } 282 281 283 - msg("Successfully corrected %d bit errors per subpage\n", 282 + pr_info("Successfully corrected %d bit errors per subpage\n", 284 283 errs_per_subpage); 285 284 286 285 for (i = 0; i < subcount; i++) { ··· 312 311 313 312 memset(bitstats, 0, sizeof(bitstats)); 314 313 315 - msg("overwrite biterrors test\n"); 314 + pr_info("overwrite biterrors test\n"); 316 315 317 316 for (i = 0; i < mtd->writesize; i++) 318 317 wbuffer[i] = hash(i+seed); ··· 330 329 err = read_page(0); 331 330 if (err >= 0) { 332 331 if (err >= MAXBITS) { 333 - msg("Implausible number of bit errors corrected\n"); 332 + pr_info("Implausible number of bit errors corrected\n"); 334 333 err = -EIO; 335 334 break; 336 335 } 337 336 bitstats[err]++; 338 337 if (err > max_corrected) { 339 338 max_corrected = err; 340 - msg("Read reported %d corrected bit errors\n", 339 + pr_info("Read reported %d corrected bit errors\n", 341 340 err); 342 341 } 343 342 } else { /* err < 0 */ 344 - msg("Read reported error %d\n", err); 343 + pr_info("Read reported error %d\n", err); 345 344 err = 0; 346 345 break; 347 346 } ··· 349 348 err = verify_page(0); 350 349 if (err) { 351 350 bitstats[max_corrected] = opno; 352 - msg("ECC failure, read data is incorrect despite read success\n"); 351 + pr_info("ECC failure, read data is incorrect despite read success\n"); 353 352 break; 354 353 } 355 354 ··· 358 357 359 358 /* At this point bitstats[0] contains the number of ops with no bit 360 359 * errors, bitstats[1] the number of ops with 1 bit error, etc. */ 361 - msg("Bit error histogram (%d operations total):\n", opno); 360 + pr_info("Bit error histogram (%d operations total):\n", opno); 362 361 for (i = 0; i < max_corrected; i++) 363 - msg("Page reads with %3d corrected bit errors: %d\n", 362 + pr_info("Page reads with %3d corrected bit errors: %d\n", 364 363 i, bitstats[i]); 365 364 366 365 exit: ··· 371 370 { 372 371 int err = 0; 373 372 374 - msg("\n"); 375 - msg("==================================================\n"); 376 - msg("MTD device: %d\n", dev); 373 + printk("\n"); 374 + printk(KERN_INFO "==================================================\n"); 375 + pr_info("MTD device: %d\n", dev); 377 376 378 377 mtd = get_mtd_device(NULL, dev); 379 378 if (IS_ERR(mtd)) { 380 379 err = PTR_ERR(mtd); 381 - msg("error: cannot get MTD device\n"); 380 + pr_err("error: cannot get MTD device\n"); 382 381 goto exit_mtddev; 383 382 } 384 383 385 384 if (mtd->type != MTD_NANDFLASH) { 386 - msg("this test requires NAND flash\n"); 385 + pr_info("this test requires NAND flash\n"); 387 386 err = -ENODEV; 388 387 goto exit_nand; 389 388 } 390 389 391 - msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n", 390 + pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n", 392 391 (unsigned long long)mtd->size, mtd->erasesize, 393 392 mtd->writesize, mtd->oobsize); 394 393 395 394 subsize = mtd->writesize >> mtd->subpage_sft; 396 395 subcount = mtd->writesize / subsize; 397 396 398 - msg("Device uses %d subpages of %d bytes\n", subcount, subsize); 397 + pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize); 399 398 400 399 offset = page_offset * mtd->writesize; 401 400 eraseblock = mtd_div_by_eb(offset, mtd); 402 401 403 - msg("Using page=%u, offset=%llu, eraseblock=%u\n", 402 + pr_info("Using page=%u, offset=%llu, eraseblock=%u\n", 404 403 page_offset, offset, eraseblock); 405 404 406 405 wbuffer = kmalloc(mtd->writesize, GFP_KERNEL); ··· 433 432 goto exit_error; 434 433 435 434 err = -EIO; 436 - msg("finished successfully.\n"); 437 - msg("==================================================\n"); 435 + pr_info("finished successfully.\n"); 436 + printk(KERN_INFO "==================================================\n"); 438 437 439 438 exit_error: 440 439 kfree(rbuffer);
+4 -2
drivers/mtd/tests/mtd_nandecctest.c
··· 1 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 + 1 3 #include <linux/kernel.h> 2 4 #include <linux/module.h> 3 5 #include <linux/list.h> ··· 266 264 correct_data, size); 267 265 268 266 if (err) { 269 - pr_err("mtd_nandecctest: not ok - %s-%zd\n", 267 + pr_err("not ok - %s-%zd\n", 270 268 nand_ecc_test[i].name, size); 271 269 dump_data_ecc(error_data, error_ecc, 272 270 correct_data, correct_ecc, size); 273 271 break; 274 272 } 275 - pr_info("mtd_nandecctest: ok - %s-%zd\n", 273 + pr_info("ok - %s-%zd\n", 276 274 nand_ecc_test[i].name, size); 277 275 } 278 276 error:
+84 -87
drivers/mtd/tests/mtd_oobtest.c
··· 19 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 20 20 */ 21 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 22 24 #include <asm/div64.h> 23 25 #include <linux/init.h> 24 26 #include <linux/module.h> ··· 29 27 #include <linux/mtd/mtd.h> 30 28 #include <linux/slab.h> 31 29 #include <linux/sched.h> 32 - 33 - #define PRINT_PREF KERN_INFO "mtd_oobtest: " 34 30 35 31 static int dev = -EINVAL; 36 32 module_param(dev, int, S_IRUGO); ··· 80 80 81 81 err = mtd_erase(mtd, &ei); 82 82 if (err) { 83 - printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 83 + pr_err("error %d while erasing EB %d\n", err, ebnum); 84 84 return err; 85 85 } 86 86 87 87 if (ei.state == MTD_ERASE_FAILED) { 88 - printk(PRINT_PREF "some erase error occurred at EB %d\n", 89 - ebnum); 88 + pr_err("some erase error occurred at EB %d\n", ebnum); 90 89 return -EIO; 91 90 } 92 91 ··· 97 98 int err; 98 99 unsigned int i; 99 100 100 - printk(PRINT_PREF "erasing whole device\n"); 101 + pr_info("erasing whole device\n"); 101 102 for (i = 0; i < ebcnt; ++i) { 102 103 if (bbt[i]) 103 104 continue; ··· 106 107 return err; 107 108 cond_resched(); 108 109 } 109 - printk(PRINT_PREF "erased %u eraseblocks\n", i); 110 + pr_info("erased %u eraseblocks\n", i); 110 111 return 0; 111 112 } 112 113 ··· 140 141 ops.oobbuf = writebuf; 141 142 err = mtd_write_oob(mtd, addr, &ops); 142 143 if (err || ops.oobretlen != use_len) { 143 - printk(PRINT_PREF "error: writeoob failed at %#llx\n", 144 + pr_err("error: writeoob failed at %#llx\n", 144 145 (long long)addr); 145 - printk(PRINT_PREF "error: use_len %d, use_offset %d\n", 146 + pr_err("error: use_len %d, use_offset %d\n", 146 147 use_len, use_offset); 147 148 errcnt += 1; 148 149 return err ? err : -1; ··· 159 160 int err; 160 161 unsigned int i; 161 162 162 - printk(PRINT_PREF "writing OOBs of whole device\n"); 163 + pr_info("writing OOBs of whole device\n"); 163 164 for (i = 0; i < ebcnt; ++i) { 164 165 if (bbt[i]) 165 166 continue; ··· 167 168 if (err) 168 169 return err; 169 170 if (i % 256 == 0) 170 - printk(PRINT_PREF "written up to eraseblock %u\n", i); 171 + pr_info("written up to eraseblock %u\n", i); 171 172 cond_resched(); 172 173 } 173 - printk(PRINT_PREF "written %u eraseblocks\n", i); 174 + pr_info("written %u eraseblocks\n", i); 174 175 return 0; 175 176 } 176 177 ··· 193 194 ops.oobbuf = readbuf; 194 195 err = mtd_read_oob(mtd, addr, &ops); 195 196 if (err || ops.oobretlen != use_len) { 196 - printk(PRINT_PREF "error: readoob failed at %#llx\n", 197 + pr_err("error: readoob failed at %#llx\n", 197 198 (long long)addr); 198 199 errcnt += 1; 199 200 return err ? err : -1; 200 201 } 201 202 if (memcmp(readbuf, writebuf, use_len)) { 202 - printk(PRINT_PREF "error: verify failed at %#llx\n", 203 + pr_err("error: verify failed at %#llx\n", 203 204 (long long)addr); 204 205 errcnt += 1; 205 206 if (errcnt > 1000) { 206 - printk(PRINT_PREF "error: too many errors\n"); 207 + pr_err("error: too many errors\n"); 207 208 return -1; 208 209 } 209 210 } ··· 220 221 ops.oobbuf = readbuf; 221 222 err = mtd_read_oob(mtd, addr, &ops); 222 223 if (err || ops.oobretlen != mtd->ecclayout->oobavail) { 223 - printk(PRINT_PREF "error: readoob failed at " 224 - "%#llx\n", (long long)addr); 224 + pr_err("error: readoob failed at %#llx\n", 225 + (long long)addr); 225 226 errcnt += 1; 226 227 return err ? err : -1; 227 228 } 228 229 if (memcmp(readbuf + use_offset, writebuf, use_len)) { 229 - printk(PRINT_PREF "error: verify failed at " 230 - "%#llx\n", (long long)addr); 230 + pr_err("error: verify failed at %#llx\n", 231 + (long long)addr); 231 232 errcnt += 1; 232 233 if (errcnt > 1000) { 233 - printk(PRINT_PREF "error: too many " 234 - "errors\n"); 234 + pr_err("error: too many errors\n"); 235 235 return -1; 236 236 } 237 237 } 238 238 for (k = 0; k < use_offset; ++k) 239 239 if (readbuf[k] != 0xff) { 240 - printk(PRINT_PREF "error: verify 0xff " 240 + pr_err("error: verify 0xff " 241 241 "failed at %#llx\n", 242 242 (long long)addr); 243 243 errcnt += 1; 244 244 if (errcnt > 1000) { 245 - printk(PRINT_PREF "error: too " 245 + pr_err("error: too " 246 246 "many errors\n"); 247 247 return -1; 248 248 } ··· 249 251 for (k = use_offset + use_len; 250 252 k < mtd->ecclayout->oobavail; ++k) 251 253 if (readbuf[k] != 0xff) { 252 - printk(PRINT_PREF "error: verify 0xff " 254 + pr_err("error: verify 0xff " 253 255 "failed at %#llx\n", 254 256 (long long)addr); 255 257 errcnt += 1; 256 258 if (errcnt > 1000) { 257 - printk(PRINT_PREF "error: too " 259 + pr_err("error: too " 258 260 "many errors\n"); 259 261 return -1; 260 262 } ··· 284 286 ops.oobbuf = readbuf; 285 287 err = mtd_read_oob(mtd, addr, &ops); 286 288 if (err || ops.oobretlen != len) { 287 - printk(PRINT_PREF "error: readoob failed at %#llx\n", 289 + pr_err("error: readoob failed at %#llx\n", 288 290 (long long)addr); 289 291 errcnt += 1; 290 292 return err ? err : -1; 291 293 } 292 294 if (memcmp(readbuf, writebuf, len)) { 293 - printk(PRINT_PREF "error: verify failed at %#llx\n", 295 + pr_err("error: verify failed at %#llx\n", 294 296 (long long)addr); 295 297 errcnt += 1; 296 298 if (errcnt > 1000) { 297 - printk(PRINT_PREF "error: too many errors\n"); 299 + pr_err("error: too many errors\n"); 298 300 return -1; 299 301 } 300 302 } ··· 307 309 int err; 308 310 unsigned int i; 309 311 310 - printk(PRINT_PREF "verifying all eraseblocks\n"); 312 + pr_info("verifying all eraseblocks\n"); 311 313 for (i = 0; i < ebcnt; ++i) { 312 314 if (bbt[i]) 313 315 continue; ··· 315 317 if (err) 316 318 return err; 317 319 if (i % 256 == 0) 318 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 320 + pr_info("verified up to eraseblock %u\n", i); 319 321 cond_resched(); 320 322 } 321 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 323 + pr_info("verified %u eraseblocks\n", i); 322 324 return 0; 323 325 } 324 326 ··· 329 331 330 332 ret = mtd_block_isbad(mtd, addr); 331 333 if (ret) 332 - printk(PRINT_PREF "block %d is bad\n", ebnum); 334 + pr_info("block %d is bad\n", ebnum); 333 335 return ret; 334 336 } 335 337 ··· 339 341 340 342 bbt = kmalloc(ebcnt, GFP_KERNEL); 341 343 if (!bbt) { 342 - printk(PRINT_PREF "error: cannot allocate memory\n"); 344 + pr_err("error: cannot allocate memory\n"); 343 345 return -ENOMEM; 344 346 } 345 347 346 - printk(PRINT_PREF "scanning for bad eraseblocks\n"); 348 + pr_info("scanning for bad eraseblocks\n"); 347 349 for (i = 0; i < ebcnt; ++i) { 348 350 bbt[i] = is_block_bad(i) ? 1 : 0; 349 351 if (bbt[i]) 350 352 bad += 1; 351 353 cond_resched(); 352 354 } 353 - printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 355 + pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 354 356 return 0; 355 357 } 356 358 ··· 366 368 printk(KERN_INFO "=================================================\n"); 367 369 368 370 if (dev < 0) { 369 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 370 - printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 371 + pr_info("Please specify a valid mtd-device via module parameter\n"); 372 + pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); 371 373 return -EINVAL; 372 374 } 373 375 374 - printk(PRINT_PREF "MTD device: %d\n", dev); 376 + pr_info("MTD device: %d\n", dev); 375 377 376 378 mtd = get_mtd_device(NULL, dev); 377 379 if (IS_ERR(mtd)) { 378 380 err = PTR_ERR(mtd); 379 - printk(PRINT_PREF "error: cannot get MTD device\n"); 381 + pr_err("error: cannot get MTD device\n"); 380 382 return err; 381 383 } 382 384 383 385 if (mtd->type != MTD_NANDFLASH) { 384 - printk(PRINT_PREF "this test requires NAND flash\n"); 386 + pr_info("this test requires NAND flash\n"); 385 387 goto out; 386 388 } 387 389 ··· 390 392 ebcnt = tmp; 391 393 pgcnt = mtd->erasesize / mtd->writesize; 392 394 393 - printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 395 + pr_info("MTD device size %llu, eraseblock size %u, " 394 396 "page size %u, count of eraseblocks %u, pages per " 395 397 "eraseblock %u, OOB size %u\n", 396 398 (unsigned long long)mtd->size, mtd->erasesize, ··· 399 401 err = -ENOMEM; 400 402 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); 401 403 if (!readbuf) { 402 - printk(PRINT_PREF "error: cannot allocate memory\n"); 404 + pr_err("error: cannot allocate memory\n"); 403 405 goto out; 404 406 } 405 407 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); 406 408 if (!writebuf) { 407 - printk(PRINT_PREF "error: cannot allocate memory\n"); 409 + pr_err("error: cannot allocate memory\n"); 408 410 goto out; 409 411 } 410 412 ··· 418 420 vary_offset = 0; 419 421 420 422 /* First test: write all OOB, read it back and verify */ 421 - printk(PRINT_PREF "test 1 of 5\n"); 423 + pr_info("test 1 of 5\n"); 422 424 423 425 err = erase_whole_device(); 424 426 if (err) ··· 438 440 * Second test: write all OOB, a block at a time, read it back and 439 441 * verify. 440 442 */ 441 - printk(PRINT_PREF "test 2 of 5\n"); 443 + pr_info("test 2 of 5\n"); 442 444 443 445 err = erase_whole_device(); 444 446 if (err) ··· 451 453 452 454 /* Check all eraseblocks */ 453 455 simple_srand(3); 454 - printk(PRINT_PREF "verifying all eraseblocks\n"); 456 + pr_info("verifying all eraseblocks\n"); 455 457 for (i = 0; i < ebcnt; ++i) { 456 458 if (bbt[i]) 457 459 continue; ··· 459 461 if (err) 460 462 goto out; 461 463 if (i % 256 == 0) 462 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 464 + pr_info("verified up to eraseblock %u\n", i); 463 465 cond_resched(); 464 466 } 465 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 467 + pr_info("verified %u eraseblocks\n", i); 466 468 467 469 /* 468 470 * Third test: write OOB at varying offsets and lengths, read it back 469 471 * and verify. 470 472 */ 471 - printk(PRINT_PREF "test 3 of 5\n"); 473 + pr_info("test 3 of 5\n"); 472 474 473 475 err = erase_whole_device(); 474 476 if (err) ··· 501 503 vary_offset = 0; 502 504 503 505 /* Fourth test: try to write off end of device */ 504 - printk(PRINT_PREF "test 4 of 5\n"); 506 + pr_info("test 4 of 5\n"); 505 507 506 508 err = erase_whole_device(); 507 509 if (err) ··· 520 522 ops.ooboffs = mtd->ecclayout->oobavail; 521 523 ops.datbuf = NULL; 522 524 ops.oobbuf = writebuf; 523 - printk(PRINT_PREF "attempting to start write past end of OOB\n"); 524 - printk(PRINT_PREF "an error is expected...\n"); 525 + pr_info("attempting to start write past end of OOB\n"); 526 + pr_info("an error is expected...\n"); 525 527 err = mtd_write_oob(mtd, addr0, &ops); 526 528 if (err) { 527 - printk(PRINT_PREF "error occurred as expected\n"); 529 + pr_info("error occurred as expected\n"); 528 530 err = 0; 529 531 } else { 530 - printk(PRINT_PREF "error: can write past end of OOB\n"); 532 + pr_err("error: can write past end of OOB\n"); 531 533 errcnt += 1; 532 534 } 533 535 ··· 540 542 ops.ooboffs = mtd->ecclayout->oobavail; 541 543 ops.datbuf = NULL; 542 544 ops.oobbuf = readbuf; 543 - printk(PRINT_PREF "attempting to start read past end of OOB\n"); 544 - printk(PRINT_PREF "an error is expected...\n"); 545 + pr_info("attempting to start read past end of OOB\n"); 546 + pr_info("an error is expected...\n"); 545 547 err = mtd_read_oob(mtd, addr0, &ops); 546 548 if (err) { 547 - printk(PRINT_PREF "error occurred as expected\n"); 549 + pr_info("error occurred as expected\n"); 548 550 err = 0; 549 551 } else { 550 - printk(PRINT_PREF "error: can read past end of OOB\n"); 552 + pr_err("error: can read past end of OOB\n"); 551 553 errcnt += 1; 552 554 } 553 555 554 556 if (bbt[ebcnt - 1]) 555 - printk(PRINT_PREF "skipping end of device tests because last " 557 + pr_info("skipping end of device tests because last " 556 558 "block is bad\n"); 557 559 else { 558 560 /* Attempt to write off end of device */ ··· 564 566 ops.ooboffs = 0; 565 567 ops.datbuf = NULL; 566 568 ops.oobbuf = writebuf; 567 - printk(PRINT_PREF "attempting to write past end of device\n"); 568 - printk(PRINT_PREF "an error is expected...\n"); 569 + pr_info("attempting to write past end of device\n"); 570 + pr_info("an error is expected...\n"); 569 571 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 570 572 if (err) { 571 - printk(PRINT_PREF "error occurred as expected\n"); 573 + pr_info("error occurred as expected\n"); 572 574 err = 0; 573 575 } else { 574 - printk(PRINT_PREF "error: wrote past end of device\n"); 576 + pr_err("error: wrote past end of device\n"); 575 577 errcnt += 1; 576 578 } 577 579 ··· 584 586 ops.ooboffs = 0; 585 587 ops.datbuf = NULL; 586 588 ops.oobbuf = readbuf; 587 - printk(PRINT_PREF "attempting to read past end of device\n"); 588 - printk(PRINT_PREF "an error is expected...\n"); 589 + pr_info("attempting to read past end of device\n"); 590 + pr_info("an error is expected...\n"); 589 591 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 590 592 if (err) { 591 - printk(PRINT_PREF "error occurred as expected\n"); 593 + pr_info("error occurred as expected\n"); 592 594 err = 0; 593 595 } else { 594 - printk(PRINT_PREF "error: read past end of device\n"); 596 + pr_err("error: read past end of device\n"); 595 597 errcnt += 1; 596 598 } 597 599 ··· 608 610 ops.ooboffs = 1; 609 611 ops.datbuf = NULL; 610 612 ops.oobbuf = writebuf; 611 - printk(PRINT_PREF "attempting to write past end of device\n"); 612 - printk(PRINT_PREF "an error is expected...\n"); 613 + pr_info("attempting to write past end of device\n"); 614 + pr_info("an error is expected...\n"); 613 615 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 614 616 if (err) { 615 - printk(PRINT_PREF "error occurred as expected\n"); 617 + pr_info("error occurred as expected\n"); 616 618 err = 0; 617 619 } else { 618 - printk(PRINT_PREF "error: wrote past end of device\n"); 620 + pr_err("error: wrote past end of device\n"); 619 621 errcnt += 1; 620 622 } 621 623 ··· 628 630 ops.ooboffs = 1; 629 631 ops.datbuf = NULL; 630 632 ops.oobbuf = readbuf; 631 - printk(PRINT_PREF "attempting to read past end of device\n"); 632 - printk(PRINT_PREF "an error is expected...\n"); 633 + pr_info("attempting to read past end of device\n"); 634 + pr_info("an error is expected...\n"); 633 635 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 634 636 if (err) { 635 - printk(PRINT_PREF "error occurred as expected\n"); 637 + pr_info("error occurred as expected\n"); 636 638 err = 0; 637 639 } else { 638 - printk(PRINT_PREF "error: read past end of device\n"); 640 + pr_err("error: read past end of device\n"); 639 641 errcnt += 1; 640 642 } 641 643 } 642 644 643 645 /* Fifth test: write / read across block boundaries */ 644 - printk(PRINT_PREF "test 5 of 5\n"); 646 + pr_info("test 5 of 5\n"); 645 647 646 648 /* Erase all eraseblocks */ 647 649 err = erase_whole_device(); ··· 650 652 651 653 /* Write all eraseblocks */ 652 654 simple_srand(11); 653 - printk(PRINT_PREF "writing OOBs of whole device\n"); 655 + pr_info("writing OOBs of whole device\n"); 654 656 for (i = 0; i < ebcnt - 1; ++i) { 655 657 int cnt = 2; 656 658 int pg; ··· 672 674 if (err) 673 675 goto out; 674 676 if (i % 256 == 0) 675 - printk(PRINT_PREF "written up to eraseblock " 676 - "%u\n", i); 677 + pr_info("written up to eraseblock %u\n", i); 677 678 cond_resched(); 678 679 addr += mtd->writesize; 679 680 } 680 681 } 681 - printk(PRINT_PREF "written %u eraseblocks\n", i); 682 + pr_info("written %u eraseblocks\n", i); 682 683 683 684 /* Check all eraseblocks */ 684 685 simple_srand(11); 685 - printk(PRINT_PREF "verifying all eraseblocks\n"); 686 + pr_info("verifying all eraseblocks\n"); 686 687 for (i = 0; i < ebcnt - 1; ++i) { 687 688 if (bbt[i] || bbt[i + 1]) 688 689 continue; ··· 699 702 if (err) 700 703 goto out; 701 704 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) { 702 - printk(PRINT_PREF "error: verify failed at %#llx\n", 705 + pr_err("error: verify failed at %#llx\n", 703 706 (long long)addr); 704 707 errcnt += 1; 705 708 if (errcnt > 1000) { 706 - printk(PRINT_PREF "error: too many errors\n"); 709 + pr_err("error: too many errors\n"); 707 710 goto out; 708 711 } 709 712 } 710 713 if (i % 256 == 0) 711 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 714 + pr_info("verified up to eraseblock %u\n", i); 712 715 cond_resched(); 713 716 } 714 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 717 + pr_info("verified %u eraseblocks\n", i); 715 718 716 - printk(PRINT_PREF "finished with %d errors\n", errcnt); 719 + pr_info("finished with %d errors\n", errcnt); 717 720 out: 718 721 kfree(bbt); 719 722 kfree(writebuf); 720 723 kfree(readbuf); 721 724 put_mtd_device(mtd); 722 725 if (err) 723 - printk(PRINT_PREF "error %d occurred\n", err); 726 + pr_info("error %d occurred\n", err); 724 727 printk(KERN_INFO "=================================================\n"); 725 728 return err; 726 729 }
+76 -76
drivers/mtd/tests/mtd_pagetest.c
··· 19 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 20 20 */ 21 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 22 24 #include <asm/div64.h> 23 25 #include <linux/init.h> 24 26 #include <linux/module.h> ··· 29 27 #include <linux/mtd/mtd.h> 30 28 #include <linux/slab.h> 31 29 #include <linux/sched.h> 32 - 33 - #define PRINT_PREF KERN_INFO "mtd_pagetest: " 34 30 35 31 static int dev = -EINVAL; 36 32 module_param(dev, int, S_IRUGO); ··· 79 79 80 80 err = mtd_erase(mtd, &ei); 81 81 if (err) { 82 - printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 82 + pr_err("error %d while erasing EB %d\n", err, ebnum); 83 83 return err; 84 84 } 85 85 86 86 if (ei.state == MTD_ERASE_FAILED) { 87 - printk(PRINT_PREF "some erase error occurred at EB %d\n", 87 + pr_err("some erase error occurred at EB %d\n", 88 88 ebnum); 89 89 return -EIO; 90 90 } ··· 102 102 cond_resched(); 103 103 err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf); 104 104 if (err || written != mtd->erasesize) 105 - printk(PRINT_PREF "error: write failed at %#llx\n", 105 + pr_err("error: write failed at %#llx\n", 106 106 (long long)addr); 107 107 108 108 return err; ··· 131 131 if (mtd_is_bitflip(err)) 132 132 err = 0; 133 133 if (err || read != bufsize) { 134 - printk(PRINT_PREF "error: read failed at %#llx\n", 134 + pr_err("error: read failed at %#llx\n", 135 135 (long long)addr0); 136 136 return err; 137 137 } ··· 139 139 if (mtd_is_bitflip(err)) 140 140 err = 0; 141 141 if (err || read != bufsize) { 142 - printk(PRINT_PREF "error: read failed at %#llx\n", 142 + pr_err("error: read failed at %#llx\n", 143 143 (long long)(addrn - bufsize)); 144 144 return err; 145 145 } ··· 148 148 if (mtd_is_bitflip(err)) 149 149 err = 0; 150 150 if (err || read != bufsize) { 151 - printk(PRINT_PREF "error: read failed at %#llx\n", 151 + pr_err("error: read failed at %#llx\n", 152 152 (long long)addr); 153 153 break; 154 154 } 155 155 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { 156 - printk(PRINT_PREF "error: verify failed at %#llx\n", 156 + pr_err("error: verify failed at %#llx\n", 157 157 (long long)addr); 158 158 errcnt += 1; 159 159 } ··· 166 166 if (mtd_is_bitflip(err)) 167 167 err = 0; 168 168 if (err || read != bufsize) { 169 - printk(PRINT_PREF "error: read failed at %#llx\n", 169 + pr_err("error: read failed at %#llx\n", 170 170 (long long)addr0); 171 171 return err; 172 172 } ··· 174 174 if (mtd_is_bitflip(err)) 175 175 err = 0; 176 176 if (err || read != bufsize) { 177 - printk(PRINT_PREF "error: read failed at %#llx\n", 177 + pr_err("error: read failed at %#llx\n", 178 178 (long long)(addrn - bufsize)); 179 179 return err; 180 180 } ··· 183 183 if (mtd_is_bitflip(err)) 184 184 err = 0; 185 185 if (err || read != bufsize) { 186 - printk(PRINT_PREF "error: read failed at %#llx\n", 186 + pr_err("error: read failed at %#llx\n", 187 187 (long long)addr); 188 188 return err; 189 189 } 190 190 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); 191 191 set_random_data(boundary + pgsize, pgsize); 192 192 if (memcmp(twopages, boundary, bufsize)) { 193 - printk(PRINT_PREF "error: verify failed at %#llx\n", 193 + pr_err("error: verify failed at %#llx\n", 194 194 (long long)addr); 195 195 errcnt += 1; 196 196 } ··· 206 206 loff_t addr, addr0, addrn; 207 207 unsigned char *pp1, *pp2, *pp3, *pp4; 208 208 209 - printk(PRINT_PREF "crosstest\n"); 209 + pr_info("crosstest\n"); 210 210 pp1 = kmalloc(pgsize * 4, GFP_KERNEL); 211 211 if (!pp1) { 212 - printk(PRINT_PREF "error: cannot allocate memory\n"); 212 + pr_err("error: cannot allocate memory\n"); 213 213 return -ENOMEM; 214 214 } 215 215 pp2 = pp1 + pgsize; ··· 231 231 if (mtd_is_bitflip(err)) 232 232 err = 0; 233 233 if (err || read != pgsize) { 234 - printk(PRINT_PREF "error: read failed at %#llx\n", 234 + pr_err("error: read failed at %#llx\n", 235 235 (long long)addr); 236 236 kfree(pp1); 237 237 return err; ··· 243 243 if (mtd_is_bitflip(err)) 244 244 err = 0; 245 245 if (err || read != pgsize) { 246 - printk(PRINT_PREF "error: read failed at %#llx\n", 246 + pr_err("error: read failed at %#llx\n", 247 247 (long long)addr); 248 248 kfree(pp1); 249 249 return err; ··· 251 251 252 252 /* Read first page to pp2 */ 253 253 addr = addr0; 254 - printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 254 + pr_info("reading page at %#llx\n", (long long)addr); 255 255 err = mtd_read(mtd, addr, pgsize, &read, pp2); 256 256 if (mtd_is_bitflip(err)) 257 257 err = 0; 258 258 if (err || read != pgsize) { 259 - printk(PRINT_PREF "error: read failed at %#llx\n", 259 + pr_err("error: read failed at %#llx\n", 260 260 (long long)addr); 261 261 kfree(pp1); 262 262 return err; ··· 264 264 265 265 /* Read last page to pp3 */ 266 266 addr = addrn - pgsize; 267 - printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 267 + pr_info("reading page at %#llx\n", (long long)addr); 268 268 err = mtd_read(mtd, addr, pgsize, &read, pp3); 269 269 if (mtd_is_bitflip(err)) 270 270 err = 0; 271 271 if (err || read != pgsize) { 272 - printk(PRINT_PREF "error: read failed at %#llx\n", 272 + pr_err("error: read failed at %#llx\n", 273 273 (long long)addr); 274 274 kfree(pp1); 275 275 return err; ··· 277 277 278 278 /* Read first page again to pp4 */ 279 279 addr = addr0; 280 - printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 280 + pr_info("reading page at %#llx\n", (long long)addr); 281 281 err = mtd_read(mtd, addr, pgsize, &read, pp4); 282 282 if (mtd_is_bitflip(err)) 283 283 err = 0; 284 284 if (err || read != pgsize) { 285 - printk(PRINT_PREF "error: read failed at %#llx\n", 285 + pr_err("error: read failed at %#llx\n", 286 286 (long long)addr); 287 287 kfree(pp1); 288 288 return err; 289 289 } 290 290 291 291 /* pp2 and pp4 should be the same */ 292 - printk(PRINT_PREF "verifying pages read at %#llx match\n", 292 + pr_info("verifying pages read at %#llx match\n", 293 293 (long long)addr0); 294 294 if (memcmp(pp2, pp4, pgsize)) { 295 - printk(PRINT_PREF "verify failed!\n"); 295 + pr_err("verify failed!\n"); 296 296 errcnt += 1; 297 297 } else if (!err) 298 - printk(PRINT_PREF "crosstest ok\n"); 298 + pr_info("crosstest ok\n"); 299 299 kfree(pp1); 300 300 return err; 301 301 } ··· 307 307 loff_t addr0; 308 308 char *readbuf = twopages; 309 309 310 - printk(PRINT_PREF "erasecrosstest\n"); 310 + pr_info("erasecrosstest\n"); 311 311 312 312 ebnum = 0; 313 313 addr0 = 0; ··· 320 320 while (ebnum2 && bbt[ebnum2]) 321 321 ebnum2 -= 1; 322 322 323 - printk(PRINT_PREF "erasing block %d\n", ebnum); 323 + pr_info("erasing block %d\n", ebnum); 324 324 err = erase_eraseblock(ebnum); 325 325 if (err) 326 326 return err; 327 327 328 - printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 328 + pr_info("writing 1st page of block %d\n", ebnum); 329 329 set_random_data(writebuf, pgsize); 330 330 strcpy(writebuf, "There is no data like this!"); 331 331 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 332 332 if (err || written != pgsize) { 333 - printk(PRINT_PREF "error: write failed at %#llx\n", 333 + pr_info("error: write failed at %#llx\n", 334 334 (long long)addr0); 335 335 return err ? err : -1; 336 336 } 337 337 338 - printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 338 + pr_info("reading 1st page of block %d\n", ebnum); 339 339 memset(readbuf, 0, pgsize); 340 340 err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 341 341 if (mtd_is_bitflip(err)) 342 342 err = 0; 343 343 if (err || read != pgsize) { 344 - printk(PRINT_PREF "error: read failed at %#llx\n", 344 + pr_err("error: read failed at %#llx\n", 345 345 (long long)addr0); 346 346 return err ? err : -1; 347 347 } 348 348 349 - printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum); 349 + pr_info("verifying 1st page of block %d\n", ebnum); 350 350 if (memcmp(writebuf, readbuf, pgsize)) { 351 - printk(PRINT_PREF "verify failed!\n"); 351 + pr_err("verify failed!\n"); 352 352 errcnt += 1; 353 353 return -1; 354 354 } 355 355 356 - printk(PRINT_PREF "erasing block %d\n", ebnum); 356 + pr_info("erasing block %d\n", ebnum); 357 357 err = erase_eraseblock(ebnum); 358 358 if (err) 359 359 return err; 360 360 361 - printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 361 + pr_info("writing 1st page of block %d\n", ebnum); 362 362 set_random_data(writebuf, pgsize); 363 363 strcpy(writebuf, "There is no data like this!"); 364 364 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 365 365 if (err || written != pgsize) { 366 - printk(PRINT_PREF "error: write failed at %#llx\n", 366 + pr_err("error: write failed at %#llx\n", 367 367 (long long)addr0); 368 368 return err ? err : -1; 369 369 } 370 370 371 - printk(PRINT_PREF "erasing block %d\n", ebnum2); 371 + pr_info("erasing block %d\n", ebnum2); 372 372 err = erase_eraseblock(ebnum2); 373 373 if (err) 374 374 return err; 375 375 376 - printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 376 + pr_info("reading 1st page of block %d\n", ebnum); 377 377 memset(readbuf, 0, pgsize); 378 378 err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 379 379 if (mtd_is_bitflip(err)) 380 380 err = 0; 381 381 if (err || read != pgsize) { 382 - printk(PRINT_PREF "error: read failed at %#llx\n", 382 + pr_err("error: read failed at %#llx\n", 383 383 (long long)addr0); 384 384 return err ? err : -1; 385 385 } 386 386 387 - printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum); 387 + pr_info("verifying 1st page of block %d\n", ebnum); 388 388 if (memcmp(writebuf, readbuf, pgsize)) { 389 - printk(PRINT_PREF "verify failed!\n"); 389 + pr_err("verify failed!\n"); 390 390 errcnt += 1; 391 391 return -1; 392 392 } 393 393 394 394 if (!err) 395 - printk(PRINT_PREF "erasecrosstest ok\n"); 395 + pr_info("erasecrosstest ok\n"); 396 396 return err; 397 397 } 398 398 ··· 402 402 int err = 0, i, ebnum, ok = 1; 403 403 loff_t addr0; 404 404 405 - printk(PRINT_PREF "erasetest\n"); 405 + pr_info("erasetest\n"); 406 406 407 407 ebnum = 0; 408 408 addr0 = 0; ··· 411 411 ebnum += 1; 412 412 } 413 413 414 - printk(PRINT_PREF "erasing block %d\n", ebnum); 414 + pr_info("erasing block %d\n", ebnum); 415 415 err = erase_eraseblock(ebnum); 416 416 if (err) 417 417 return err; 418 418 419 - printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 419 + pr_info("writing 1st page of block %d\n", ebnum); 420 420 set_random_data(writebuf, pgsize); 421 421 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 422 422 if (err || written != pgsize) { 423 - printk(PRINT_PREF "error: write failed at %#llx\n", 423 + pr_err("error: write failed at %#llx\n", 424 424 (long long)addr0); 425 425 return err ? err : -1; 426 426 } 427 427 428 - printk(PRINT_PREF "erasing block %d\n", ebnum); 428 + pr_info("erasing block %d\n", ebnum); 429 429 err = erase_eraseblock(ebnum); 430 430 if (err) 431 431 return err; 432 432 433 - printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 433 + pr_info("reading 1st page of block %d\n", ebnum); 434 434 err = mtd_read(mtd, addr0, pgsize, &read, twopages); 435 435 if (mtd_is_bitflip(err)) 436 436 err = 0; 437 437 if (err || read != pgsize) { 438 - printk(PRINT_PREF "error: read failed at %#llx\n", 438 + pr_err("error: read failed at %#llx\n", 439 439 (long long)addr0); 440 440 return err ? err : -1; 441 441 } 442 442 443 - printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n", 443 + pr_info("verifying 1st page of block %d is all 0xff\n", 444 444 ebnum); 445 445 for (i = 0; i < pgsize; ++i) 446 446 if (twopages[i] != 0xff) { 447 - printk(PRINT_PREF "verifying all 0xff failed at %d\n", 447 + pr_err("verifying all 0xff failed at %d\n", 448 448 i); 449 449 errcnt += 1; 450 450 ok = 0; ··· 452 452 } 453 453 454 454 if (ok && !err) 455 - printk(PRINT_PREF "erasetest ok\n"); 455 + pr_info("erasetest ok\n"); 456 456 457 457 return err; 458 458 } ··· 464 464 465 465 ret = mtd_block_isbad(mtd, addr); 466 466 if (ret) 467 - printk(PRINT_PREF "block %d is bad\n", ebnum); 467 + pr_info("block %d is bad\n", ebnum); 468 468 return ret; 469 469 } 470 470 ··· 474 474 475 475 bbt = kzalloc(ebcnt, GFP_KERNEL); 476 476 if (!bbt) { 477 - printk(PRINT_PREF "error: cannot allocate memory\n"); 477 + pr_err("error: cannot allocate memory\n"); 478 478 return -ENOMEM; 479 479 } 480 480 481 - printk(PRINT_PREF "scanning for bad eraseblocks\n"); 481 + pr_info("scanning for bad eraseblocks\n"); 482 482 for (i = 0; i < ebcnt; ++i) { 483 483 bbt[i] = is_block_bad(i) ? 1 : 0; 484 484 if (bbt[i]) 485 485 bad += 1; 486 486 cond_resched(); 487 487 } 488 - printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 488 + pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 489 489 return 0; 490 490 } 491 491 ··· 499 499 printk(KERN_INFO "=================================================\n"); 500 500 501 501 if (dev < 0) { 502 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 503 - printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 502 + pr_info("Please specify a valid mtd-device via module parameter\n"); 503 + pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); 504 504 return -EINVAL; 505 505 } 506 506 507 - printk(PRINT_PREF "MTD device: %d\n", dev); 507 + pr_info("MTD device: %d\n", dev); 508 508 509 509 mtd = get_mtd_device(NULL, dev); 510 510 if (IS_ERR(mtd)) { 511 511 err = PTR_ERR(mtd); 512 - printk(PRINT_PREF "error: cannot get MTD device\n"); 512 + pr_err("error: cannot get MTD device\n"); 513 513 return err; 514 514 } 515 515 516 516 if (mtd->type != MTD_NANDFLASH) { 517 - printk(PRINT_PREF "this test requires NAND flash\n"); 517 + pr_info("this test requires NAND flash\n"); 518 518 goto out; 519 519 } 520 520 ··· 524 524 pgcnt = mtd->erasesize / mtd->writesize; 525 525 pgsize = mtd->writesize; 526 526 527 - printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 527 + pr_info("MTD device size %llu, eraseblock size %u, " 528 528 "page size %u, count of eraseblocks %u, pages per " 529 529 "eraseblock %u, OOB size %u\n", 530 530 (unsigned long long)mtd->size, mtd->erasesize, ··· 534 534 bufsize = pgsize * 2; 535 535 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); 536 536 if (!writebuf) { 537 - printk(PRINT_PREF "error: cannot allocate memory\n"); 537 + pr_err("error: cannot allocate memory\n"); 538 538 goto out; 539 539 } 540 540 twopages = kmalloc(bufsize, GFP_KERNEL); 541 541 if (!twopages) { 542 - printk(PRINT_PREF "error: cannot allocate memory\n"); 542 + pr_err("error: cannot allocate memory\n"); 543 543 goto out; 544 544 } 545 545 boundary = kmalloc(bufsize, GFP_KERNEL); 546 546 if (!boundary) { 547 - printk(PRINT_PREF "error: cannot allocate memory\n"); 547 + pr_err("error: cannot allocate memory\n"); 548 548 goto out; 549 549 } 550 550 ··· 553 553 goto out; 554 554 555 555 /* Erase all eraseblocks */ 556 - printk(PRINT_PREF "erasing whole device\n"); 556 + pr_info("erasing whole device\n"); 557 557 for (i = 0; i < ebcnt; ++i) { 558 558 if (bbt[i]) 559 559 continue; ··· 562 562 goto out; 563 563 cond_resched(); 564 564 } 565 - printk(PRINT_PREF "erased %u eraseblocks\n", i); 565 + pr_info("erased %u eraseblocks\n", i); 566 566 567 567 /* Write all eraseblocks */ 568 568 simple_srand(1); 569 - printk(PRINT_PREF "writing whole device\n"); 569 + pr_info("writing whole device\n"); 570 570 for (i = 0; i < ebcnt; ++i) { 571 571 if (bbt[i]) 572 572 continue; ··· 574 574 if (err) 575 575 goto out; 576 576 if (i % 256 == 0) 577 - printk(PRINT_PREF "written up to eraseblock %u\n", i); 577 + pr_info("written up to eraseblock %u\n", i); 578 578 cond_resched(); 579 579 } 580 - printk(PRINT_PREF "written %u eraseblocks\n", i); 580 + pr_info("written %u eraseblocks\n", i); 581 581 582 582 /* Check all eraseblocks */ 583 583 simple_srand(1); 584 - printk(PRINT_PREF "verifying all eraseblocks\n"); 584 + pr_info("verifying all eraseblocks\n"); 585 585 for (i = 0; i < ebcnt; ++i) { 586 586 if (bbt[i]) 587 587 continue; ··· 589 589 if (err) 590 590 goto out; 591 591 if (i % 256 == 0) 592 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 592 + pr_info("verified up to eraseblock %u\n", i); 593 593 cond_resched(); 594 594 } 595 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 595 + pr_info("verified %u eraseblocks\n", i); 596 596 597 597 err = crosstest(); 598 598 if (err) ··· 606 606 if (err) 607 607 goto out; 608 608 609 - printk(PRINT_PREF "finished with %d errors\n", errcnt); 609 + pr_info("finished with %d errors\n", errcnt); 610 610 out: 611 611 612 612 kfree(bbt); ··· 615 615 kfree(writebuf); 616 616 put_mtd_device(mtd); 617 617 if (err) 618 - printk(PRINT_PREF "error %d occurred\n", err); 618 + pr_info("error %d occurred\n", err); 619 619 printk(KERN_INFO "=================================================\n"); 620 620 return err; 621 621 }
+22 -22
drivers/mtd/tests/mtd_readtest.c
··· 19 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 20 20 */ 21 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 22 24 #include <linux/init.h> 23 25 #include <linux/module.h> 24 26 #include <linux/moduleparam.h> ··· 28 26 #include <linux/mtd/mtd.h> 29 27 #include <linux/slab.h> 30 28 #include <linux/sched.h> 31 - 32 - #define PRINT_PREF KERN_INFO "mtd_readtest: " 33 29 34 30 static int dev = -EINVAL; 35 31 module_param(dev, int, S_IRUGO); ··· 51 51 void *oobbuf = iobuf1; 52 52 53 53 for (i = 0; i < pgcnt; i++) { 54 - memset(buf, 0 , pgcnt); 54 + memset(buf, 0 , pgsize); 55 55 ret = mtd_read(mtd, addr, pgsize, &read, buf); 56 56 if (ret == -EUCLEAN) 57 57 ret = 0; 58 58 if (ret || read != pgsize) { 59 - printk(PRINT_PREF "error: read failed at %#llx\n", 59 + pr_err("error: read failed at %#llx\n", 60 60 (long long)addr); 61 61 if (!err) 62 62 err = ret; ··· 77 77 ret = mtd_read_oob(mtd, addr, &ops); 78 78 if ((ret && !mtd_is_bitflip(ret)) || 79 79 ops.oobretlen != mtd->oobsize) { 80 - printk(PRINT_PREF "error: read oob failed at " 80 + pr_err("error: read oob failed at " 81 81 "%#llx\n", (long long)addr); 82 82 if (!err) 83 83 err = ret; ··· 99 99 char line[128]; 100 100 int pg, oob; 101 101 102 - printk(PRINT_PREF "dumping eraseblock %d\n", ebnum); 102 + pr_info("dumping eraseblock %d\n", ebnum); 103 103 n = mtd->erasesize; 104 104 for (i = 0; i < n;) { 105 105 char *p = line; ··· 112 112 } 113 113 if (!mtd->oobsize) 114 114 return; 115 - printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum); 115 + pr_info("dumping oob from eraseblock %d\n", ebnum); 116 116 n = mtd->oobsize; 117 117 for (pg = 0, i = 0; pg < pgcnt; pg++) 118 118 for (oob = 0; oob < n;) { ··· 134 134 135 135 ret = mtd_block_isbad(mtd, addr); 136 136 if (ret) 137 - printk(PRINT_PREF "block %d is bad\n", ebnum); 137 + pr_info("block %d is bad\n", ebnum); 138 138 return ret; 139 139 } 140 140 ··· 144 144 145 145 bbt = kzalloc(ebcnt, GFP_KERNEL); 146 146 if (!bbt) { 147 - printk(PRINT_PREF "error: cannot allocate memory\n"); 147 + pr_err("error: cannot allocate memory\n"); 148 148 return -ENOMEM; 149 149 } 150 150 151 151 if (!mtd_can_have_bb(mtd)) 152 152 return 0; 153 153 154 - printk(PRINT_PREF "scanning for bad eraseblocks\n"); 154 + pr_info("scanning for bad eraseblocks\n"); 155 155 for (i = 0; i < ebcnt; ++i) { 156 156 bbt[i] = is_block_bad(i) ? 1 : 0; 157 157 if (bbt[i]) 158 158 bad += 1; 159 159 cond_resched(); 160 160 } 161 - printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 161 + pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 162 162 return 0; 163 163 } 164 164 ··· 171 171 printk(KERN_INFO "=================================================\n"); 172 172 173 173 if (dev < 0) { 174 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 174 + pr_info("Please specify a valid mtd-device via module parameter\n"); 175 175 return -EINVAL; 176 176 } 177 177 178 - printk(PRINT_PREF "MTD device: %d\n", dev); 178 + pr_info("MTD device: %d\n", dev); 179 179 180 180 mtd = get_mtd_device(NULL, dev); 181 181 if (IS_ERR(mtd)) { 182 182 err = PTR_ERR(mtd); 183 - printk(PRINT_PREF "error: Cannot get MTD device\n"); 183 + pr_err("error: Cannot get MTD device\n"); 184 184 return err; 185 185 } 186 186 187 187 if (mtd->writesize == 1) { 188 - printk(PRINT_PREF "not NAND flash, assume page size is 512 " 188 + pr_info("not NAND flash, assume page size is 512 " 189 189 "bytes.\n"); 190 190 pgsize = 512; 191 191 } else ··· 196 196 ebcnt = tmp; 197 197 pgcnt = mtd->erasesize / pgsize; 198 198 199 - printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 199 + pr_info("MTD device size %llu, eraseblock size %u, " 200 200 "page size %u, count of eraseblocks %u, pages per " 201 201 "eraseblock %u, OOB size %u\n", 202 202 (unsigned long long)mtd->size, mtd->erasesize, ··· 205 205 err = -ENOMEM; 206 206 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); 207 207 if (!iobuf) { 208 - printk(PRINT_PREF "error: cannot allocate memory\n"); 208 + pr_err("error: cannot allocate memory\n"); 209 209 goto out; 210 210 } 211 211 iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL); 212 212 if (!iobuf1) { 213 - printk(PRINT_PREF "error: cannot allocate memory\n"); 213 + pr_err("error: cannot allocate memory\n"); 214 214 goto out; 215 215 } 216 216 ··· 219 219 goto out; 220 220 221 221 /* Read all eraseblocks 1 page at a time */ 222 - printk(PRINT_PREF "testing page read\n"); 222 + pr_info("testing page read\n"); 223 223 for (i = 0; i < ebcnt; ++i) { 224 224 int ret; 225 225 ··· 235 235 } 236 236 237 237 if (err) 238 - printk(PRINT_PREF "finished with errors\n"); 238 + pr_info("finished with errors\n"); 239 239 else 240 - printk(PRINT_PREF "finished\n"); 240 + pr_info("finished\n"); 241 241 242 242 out: 243 243 ··· 246 246 kfree(bbt); 247 247 put_mtd_device(mtd); 248 248 if (err) 249 - printk(PRINT_PREF "error %d occurred\n", err); 249 + pr_info("error %d occurred\n", err); 250 250 printk(KERN_INFO "=================================================\n"); 251 251 return err; 252 252 }
+44 -44
drivers/mtd/tests/mtd_speedtest.c
··· 19 19 * Author: Adrian Hunter <adrian.hunter@nokia.com> 20 20 */ 21 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 22 24 #include <linux/init.h> 23 25 #include <linux/module.h> 24 26 #include <linux/moduleparam.h> ··· 29 27 #include <linux/slab.h> 30 28 #include <linux/sched.h> 31 29 #include <linux/random.h> 32 - 33 - #define PRINT_PREF KERN_INFO "mtd_speedtest: " 34 30 35 31 static int dev = -EINVAL; 36 32 module_param(dev, int, S_IRUGO); ··· 70 70 71 71 err = mtd_erase(mtd, &ei); 72 72 if (err) { 73 - printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 73 + pr_err("error %d while erasing EB %d\n", err, ebnum); 74 74 return err; 75 75 } 76 76 77 77 if (ei.state == MTD_ERASE_FAILED) { 78 - printk(PRINT_PREF "some erase error occurred at EB %d\n", 78 + pr_err("some erase error occurred at EB %d\n", 79 79 ebnum); 80 80 return -EIO; 81 81 } ··· 96 96 97 97 err = mtd_erase(mtd, &ei); 98 98 if (err) { 99 - printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n", 99 + pr_err("error %d while erasing EB %d, blocks %d\n", 100 100 err, ebnum, blocks); 101 101 return err; 102 102 } 103 103 104 104 if (ei.state == MTD_ERASE_FAILED) { 105 - printk(PRINT_PREF "some erase error occurred at EB %d," 105 + pr_err("some erase error occurred at EB %d," 106 106 "blocks %d\n", ebnum, blocks); 107 107 return -EIO; 108 108 } ··· 134 134 135 135 err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf); 136 136 if (err || written != mtd->erasesize) { 137 - printk(PRINT_PREF "error: write failed at %#llx\n", addr); 137 + pr_err("error: write failed at %#llx\n", addr); 138 138 if (!err) 139 139 err = -EINVAL; 140 140 } ··· 152 152 for (i = 0; i < pgcnt; i++) { 153 153 err = mtd_write(mtd, addr, pgsize, &written, buf); 154 154 if (err || written != pgsize) { 155 - printk(PRINT_PREF "error: write failed at %#llx\n", 155 + pr_err("error: write failed at %#llx\n", 156 156 addr); 157 157 if (!err) 158 158 err = -EINVAL; ··· 175 175 for (i = 0; i < n; i++) { 176 176 err = mtd_write(mtd, addr, sz, &written, buf); 177 177 if (err || written != sz) { 178 - printk(PRINT_PREF "error: write failed at %#llx\n", 178 + pr_err("error: write failed at %#llx\n", 179 179 addr); 180 180 if (!err) 181 181 err = -EINVAL; ··· 187 187 if (pgcnt % 2) { 188 188 err = mtd_write(mtd, addr, pgsize, &written, buf); 189 189 if (err || written != pgsize) { 190 - printk(PRINT_PREF "error: write failed at %#llx\n", 190 + pr_err("error: write failed at %#llx\n", 191 191 addr); 192 192 if (!err) 193 193 err = -EINVAL; ··· 208 208 if (mtd_is_bitflip(err)) 209 209 err = 0; 210 210 if (err || read != mtd->erasesize) { 211 - printk(PRINT_PREF "error: read failed at %#llx\n", addr); 211 + pr_err("error: read failed at %#llx\n", addr); 212 212 if (!err) 213 213 err = -EINVAL; 214 214 } ··· 229 229 if (mtd_is_bitflip(err)) 230 230 err = 0; 231 231 if (err || read != pgsize) { 232 - printk(PRINT_PREF "error: read failed at %#llx\n", 232 + pr_err("error: read failed at %#llx\n", 233 233 addr); 234 234 if (!err) 235 235 err = -EINVAL; ··· 255 255 if (mtd_is_bitflip(err)) 256 256 err = 0; 257 257 if (err || read != sz) { 258 - printk(PRINT_PREF "error: read failed at %#llx\n", 258 + pr_err("error: read failed at %#llx\n", 259 259 addr); 260 260 if (!err) 261 261 err = -EINVAL; ··· 270 270 if (mtd_is_bitflip(err)) 271 271 err = 0; 272 272 if (err || read != pgsize) { 273 - printk(PRINT_PREF "error: read failed at %#llx\n", 273 + pr_err("error: read failed at %#llx\n", 274 274 addr); 275 275 if (!err) 276 276 err = -EINVAL; ··· 287 287 288 288 ret = mtd_block_isbad(mtd, addr); 289 289 if (ret) 290 - printk(PRINT_PREF "block %d is bad\n", ebnum); 290 + pr_info("block %d is bad\n", ebnum); 291 291 return ret; 292 292 } 293 293 ··· 321 321 322 322 bbt = kzalloc(ebcnt, GFP_KERNEL); 323 323 if (!bbt) { 324 - printk(PRINT_PREF "error: cannot allocate memory\n"); 324 + pr_err("error: cannot allocate memory\n"); 325 325 return -ENOMEM; 326 326 } 327 327 328 328 if (!mtd_can_have_bb(mtd)) 329 329 goto out; 330 330 331 - printk(PRINT_PREF "scanning for bad eraseblocks\n"); 331 + pr_info("scanning for bad eraseblocks\n"); 332 332 for (i = 0; i < ebcnt; ++i) { 333 333 bbt[i] = is_block_bad(i) ? 1 : 0; 334 334 if (bbt[i]) 335 335 bad += 1; 336 336 cond_resched(); 337 337 } 338 - printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 338 + pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 339 339 out: 340 340 goodebcnt = ebcnt - bad; 341 341 return 0; ··· 351 351 printk(KERN_INFO "=================================================\n"); 352 352 353 353 if (dev < 0) { 354 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 355 - printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 354 + pr_info("Please specify a valid mtd-device via module parameter\n"); 355 + pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); 356 356 return -EINVAL; 357 357 } 358 358 359 359 if (count) 360 - printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); 360 + pr_info("MTD device: %d count: %d\n", dev, count); 361 361 else 362 - printk(PRINT_PREF "MTD device: %d\n", dev); 362 + pr_info("MTD device: %d\n", dev); 363 363 364 364 mtd = get_mtd_device(NULL, dev); 365 365 if (IS_ERR(mtd)) { 366 366 err = PTR_ERR(mtd); 367 - printk(PRINT_PREF "error: cannot get MTD device\n"); 367 + pr_err("error: cannot get MTD device\n"); 368 368 return err; 369 369 } 370 370 371 371 if (mtd->writesize == 1) { 372 - printk(PRINT_PREF "not NAND flash, assume page size is 512 " 372 + pr_info("not NAND flash, assume page size is 512 " 373 373 "bytes.\n"); 374 374 pgsize = 512; 375 375 } else ··· 380 380 ebcnt = tmp; 381 381 pgcnt = mtd->erasesize / pgsize; 382 382 383 - printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 383 + pr_info("MTD device size %llu, eraseblock size %u, " 384 384 "page size %u, count of eraseblocks %u, pages per " 385 385 "eraseblock %u, OOB size %u\n", 386 386 (unsigned long long)mtd->size, mtd->erasesize, ··· 392 392 err = -ENOMEM; 393 393 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); 394 394 if (!iobuf) { 395 - printk(PRINT_PREF "error: cannot allocate memory\n"); 395 + pr_err("error: cannot allocate memory\n"); 396 396 goto out; 397 397 } 398 398 ··· 407 407 goto out; 408 408 409 409 /* Write all eraseblocks, 1 eraseblock at a time */ 410 - printk(PRINT_PREF "testing eraseblock write speed\n"); 410 + pr_info("testing eraseblock write speed\n"); 411 411 start_timing(); 412 412 for (i = 0; i < ebcnt; ++i) { 413 413 if (bbt[i]) ··· 419 419 } 420 420 stop_timing(); 421 421 speed = calc_speed(); 422 - printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed); 422 + pr_info("eraseblock write speed is %ld KiB/s\n", speed); 423 423 424 424 /* Read all eraseblocks, 1 eraseblock at a time */ 425 - printk(PRINT_PREF "testing eraseblock read speed\n"); 425 + pr_info("testing eraseblock read speed\n"); 426 426 start_timing(); 427 427 for (i = 0; i < ebcnt; ++i) { 428 428 if (bbt[i]) ··· 434 434 } 435 435 stop_timing(); 436 436 speed = calc_speed(); 437 - printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed); 437 + pr_info("eraseblock read speed is %ld KiB/s\n", speed); 438 438 439 439 err = erase_whole_device(); 440 440 if (err) 441 441 goto out; 442 442 443 443 /* Write all eraseblocks, 1 page at a time */ 444 - printk(PRINT_PREF "testing page write speed\n"); 444 + pr_info("testing page write speed\n"); 445 445 start_timing(); 446 446 for (i = 0; i < ebcnt; ++i) { 447 447 if (bbt[i]) ··· 453 453 } 454 454 stop_timing(); 455 455 speed = calc_speed(); 456 - printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed); 456 + pr_info("page write speed is %ld KiB/s\n", speed); 457 457 458 458 /* Read all eraseblocks, 1 page at a time */ 459 - printk(PRINT_PREF "testing page read speed\n"); 459 + pr_info("testing page read speed\n"); 460 460 start_timing(); 461 461 for (i = 0; i < ebcnt; ++i) { 462 462 if (bbt[i]) ··· 468 468 } 469 469 stop_timing(); 470 470 speed = calc_speed(); 471 - printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed); 471 + pr_info("page read speed is %ld KiB/s\n", speed); 472 472 473 473 err = erase_whole_device(); 474 474 if (err) 475 475 goto out; 476 476 477 477 /* Write all eraseblocks, 2 pages at a time */ 478 - printk(PRINT_PREF "testing 2 page write speed\n"); 478 + pr_info("testing 2 page write speed\n"); 479 479 start_timing(); 480 480 for (i = 0; i < ebcnt; ++i) { 481 481 if (bbt[i]) ··· 487 487 } 488 488 stop_timing(); 489 489 speed = calc_speed(); 490 - printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed); 490 + pr_info("2 page write speed is %ld KiB/s\n", speed); 491 491 492 492 /* Read all eraseblocks, 2 pages at a time */ 493 - printk(PRINT_PREF "testing 2 page read speed\n"); 493 + pr_info("testing 2 page read speed\n"); 494 494 start_timing(); 495 495 for (i = 0; i < ebcnt; ++i) { 496 496 if (bbt[i]) ··· 502 502 } 503 503 stop_timing(); 504 504 speed = calc_speed(); 505 - printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed); 505 + pr_info("2 page read speed is %ld KiB/s\n", speed); 506 506 507 507 /* Erase all eraseblocks */ 508 - printk(PRINT_PREF "Testing erase speed\n"); 508 + pr_info("Testing erase speed\n"); 509 509 start_timing(); 510 510 for (i = 0; i < ebcnt; ++i) { 511 511 if (bbt[i]) ··· 517 517 } 518 518 stop_timing(); 519 519 speed = calc_speed(); 520 - printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); 520 + pr_info("erase speed is %ld KiB/s\n", speed); 521 521 522 522 /* Multi-block erase all eraseblocks */ 523 523 for (k = 1; k < 7; k++) { 524 524 blocks = 1 << k; 525 - printk(PRINT_PREF "Testing %dx multi-block erase speed\n", 525 + pr_info("Testing %dx multi-block erase speed\n", 526 526 blocks); 527 527 start_timing(); 528 528 for (i = 0; i < ebcnt; ) { ··· 541 541 } 542 542 stop_timing(); 543 543 speed = calc_speed(); 544 - printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n", 544 + pr_info("%dx multi-block erase speed is %ld KiB/s\n", 545 545 blocks, speed); 546 546 } 547 - printk(PRINT_PREF "finished\n"); 547 + pr_info("finished\n"); 548 548 out: 549 549 kfree(iobuf); 550 550 kfree(bbt); 551 551 put_mtd_device(mtd); 552 552 if (err) 553 - printk(PRINT_PREF "error %d occurred\n", err); 553 + pr_info("error %d occurred\n", err); 554 554 printk(KERN_INFO "=================================================\n"); 555 555 return err; 556 556 }
+22 -22
drivers/mtd/tests/mtd_stresstest.c
··· 19 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 20 20 */ 21 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 22 24 #include <linux/init.h> 23 25 #include <linux/module.h> 24 26 #include <linux/moduleparam.h> ··· 30 28 #include <linux/sched.h> 31 29 #include <linux/vmalloc.h> 32 30 #include <linux/random.h> 33 - 34 - #define PRINT_PREF KERN_INFO "mtd_stresstest: " 35 31 36 32 static int dev = -EINVAL; 37 33 module_param(dev, int, S_IRUGO); ··· 94 94 95 95 err = mtd_erase(mtd, &ei); 96 96 if (unlikely(err)) { 97 - printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 97 + pr_err("error %d while erasing EB %d\n", err, ebnum); 98 98 return err; 99 99 } 100 100 101 101 if (unlikely(ei.state == MTD_ERASE_FAILED)) { 102 - printk(PRINT_PREF "some erase error occurred at EB %d\n", 102 + pr_err("some erase error occurred at EB %d\n", 103 103 ebnum); 104 104 return -EIO; 105 105 } ··· 114 114 115 115 ret = mtd_block_isbad(mtd, addr); 116 116 if (ret) 117 - printk(PRINT_PREF "block %d is bad\n", ebnum); 117 + pr_info("block %d is bad\n", ebnum); 118 118 return ret; 119 119 } 120 120 ··· 137 137 if (mtd_is_bitflip(err)) 138 138 err = 0; 139 139 if (unlikely(err || read != len)) { 140 - printk(PRINT_PREF "error: read failed at 0x%llx\n", 140 + pr_err("error: read failed at 0x%llx\n", 141 141 (long long)addr); 142 142 if (!err) 143 143 err = -EINVAL; ··· 174 174 addr = eb * mtd->erasesize + offs; 175 175 err = mtd_write(mtd, addr, len, &written, writebuf); 176 176 if (unlikely(err || written != len)) { 177 - printk(PRINT_PREF "error: write failed at 0x%llx\n", 177 + pr_err("error: write failed at 0x%llx\n", 178 178 (long long)addr); 179 179 if (!err) 180 180 err = -EINVAL; ··· 203 203 204 204 bbt = kzalloc(ebcnt, GFP_KERNEL); 205 205 if (!bbt) { 206 - printk(PRINT_PREF "error: cannot allocate memory\n"); 206 + pr_err("error: cannot allocate memory\n"); 207 207 return -ENOMEM; 208 208 } 209 209 210 210 if (!mtd_can_have_bb(mtd)) 211 211 return 0; 212 212 213 - printk(PRINT_PREF "scanning for bad eraseblocks\n"); 213 + pr_info("scanning for bad eraseblocks\n"); 214 214 for (i = 0; i < ebcnt; ++i) { 215 215 bbt[i] = is_block_bad(i) ? 1 : 0; 216 216 if (bbt[i]) 217 217 bad += 1; 218 218 cond_resched(); 219 219 } 220 - printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 220 + pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 221 221 return 0; 222 222 } 223 223 ··· 231 231 printk(KERN_INFO "=================================================\n"); 232 232 233 233 if (dev < 0) { 234 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 235 - printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 234 + pr_info("Please specify a valid mtd-device via module parameter\n"); 235 + pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); 236 236 return -EINVAL; 237 237 } 238 238 239 - printk(PRINT_PREF "MTD device: %d\n", dev); 239 + pr_info("MTD device: %d\n", dev); 240 240 241 241 mtd = get_mtd_device(NULL, dev); 242 242 if (IS_ERR(mtd)) { 243 243 err = PTR_ERR(mtd); 244 - printk(PRINT_PREF "error: cannot get MTD device\n"); 244 + pr_err("error: cannot get MTD device\n"); 245 245 return err; 246 246 } 247 247 248 248 if (mtd->writesize == 1) { 249 - printk(PRINT_PREF "not NAND flash, assume page size is 512 " 249 + pr_info("not NAND flash, assume page size is 512 " 250 250 "bytes.\n"); 251 251 pgsize = 512; 252 252 } else ··· 257 257 ebcnt = tmp; 258 258 pgcnt = mtd->erasesize / pgsize; 259 259 260 - printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 260 + pr_info("MTD device size %llu, eraseblock size %u, " 261 261 "page size %u, count of eraseblocks %u, pages per " 262 262 "eraseblock %u, OOB size %u\n", 263 263 (unsigned long long)mtd->size, mtd->erasesize, 264 264 pgsize, ebcnt, pgcnt, mtd->oobsize); 265 265 266 266 if (ebcnt < 2) { 267 - printk(PRINT_PREF "error: need at least 2 eraseblocks\n"); 267 + pr_err("error: need at least 2 eraseblocks\n"); 268 268 err = -ENOSPC; 269 269 goto out_put_mtd; 270 270 } ··· 277 277 writebuf = vmalloc(bufsize); 278 278 offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL); 279 279 if (!readbuf || !writebuf || !offsets) { 280 - printk(PRINT_PREF "error: cannot allocate memory\n"); 280 + pr_err("error: cannot allocate memory\n"); 281 281 goto out; 282 282 } 283 283 for (i = 0; i < ebcnt; i++) ··· 290 290 goto out; 291 291 292 292 /* Do operations */ 293 - printk(PRINT_PREF "doing operations\n"); 293 + pr_info("doing operations\n"); 294 294 for (op = 0; op < count; op++) { 295 295 if ((op & 1023) == 0) 296 - printk(PRINT_PREF "%d operations done\n", op); 296 + pr_info("%d operations done\n", op); 297 297 err = do_operation(); 298 298 if (err) 299 299 goto out; 300 300 cond_resched(); 301 301 } 302 - printk(PRINT_PREF "finished, %d operations done\n", op); 302 + pr_info("finished, %d operations done\n", op); 303 303 304 304 out: 305 305 kfree(offsets); ··· 309 309 out_put_mtd: 310 310 put_mtd_device(mtd); 311 311 if (err) 312 - printk(PRINT_PREF "error %d occurred\n", err); 312 + pr_info("error %d occurred\n", err); 313 313 printk(KERN_INFO "=================================================\n"); 314 314 return err; 315 315 }
+62 -62
drivers/mtd/tests/mtd_subpagetest.c
··· 19 19 * 20 20 */ 21 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 22 24 #include <linux/init.h> 23 25 #include <linux/module.h> 24 26 #include <linux/moduleparam.h> ··· 28 26 #include <linux/mtd/mtd.h> 29 27 #include <linux/slab.h> 30 28 #include <linux/sched.h> 31 - 32 - #define PRINT_PREF KERN_INFO "mtd_subpagetest: " 33 29 34 30 static int dev = -EINVAL; 35 31 module_param(dev, int, S_IRUGO); ··· 82 82 83 83 err = mtd_erase(mtd, &ei); 84 84 if (err) { 85 - printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 85 + pr_err("error %d while erasing EB %d\n", err, ebnum); 86 86 return err; 87 87 } 88 88 89 89 if (ei.state == MTD_ERASE_FAILED) { 90 - printk(PRINT_PREF "some erase error occurred at EB %d\n", 90 + pr_err("some erase error occurred at EB %d\n", 91 91 ebnum); 92 92 return -EIO; 93 93 } ··· 100 100 int err; 101 101 unsigned int i; 102 102 103 - printk(PRINT_PREF "erasing whole device\n"); 103 + pr_info("erasing whole device\n"); 104 104 for (i = 0; i < ebcnt; ++i) { 105 105 if (bbt[i]) 106 106 continue; ··· 109 109 return err; 110 110 cond_resched(); 111 111 } 112 - printk(PRINT_PREF "erased %u eraseblocks\n", i); 112 + pr_info("erased %u eraseblocks\n", i); 113 113 return 0; 114 114 } 115 115 ··· 122 122 set_random_data(writebuf, subpgsize); 123 123 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 124 124 if (unlikely(err || written != subpgsize)) { 125 - printk(PRINT_PREF "error: write failed at %#llx\n", 125 + pr_err("error: write failed at %#llx\n", 126 126 (long long)addr); 127 127 if (written != subpgsize) { 128 - printk(PRINT_PREF " write size: %#x\n", subpgsize); 129 - printk(PRINT_PREF " written: %#zx\n", written); 128 + pr_err(" write size: %#x\n", subpgsize); 129 + pr_err(" written: %#zx\n", written); 130 130 } 131 131 return err ? err : -1; 132 132 } ··· 136 136 set_random_data(writebuf, subpgsize); 137 137 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 138 138 if (unlikely(err || written != subpgsize)) { 139 - printk(PRINT_PREF "error: write failed at %#llx\n", 139 + pr_err("error: write failed at %#llx\n", 140 140 (long long)addr); 141 141 if (written != subpgsize) { 142 - printk(PRINT_PREF " write size: %#x\n", subpgsize); 143 - printk(PRINT_PREF " written: %#zx\n", written); 142 + pr_err(" write size: %#x\n", subpgsize); 143 + pr_err(" written: %#zx\n", written); 144 144 } 145 145 return err ? err : -1; 146 146 } ··· 160 160 set_random_data(writebuf, subpgsize * k); 161 161 err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf); 162 162 if (unlikely(err || written != subpgsize * k)) { 163 - printk(PRINT_PREF "error: write failed at %#llx\n", 163 + pr_err("error: write failed at %#llx\n", 164 164 (long long)addr); 165 165 if (written != subpgsize) { 166 - printk(PRINT_PREF " write size: %#x\n", 166 + pr_err(" write size: %#x\n", 167 167 subpgsize * k); 168 - printk(PRINT_PREF " written: %#08zx\n", 168 + pr_err(" written: %#08zx\n", 169 169 written); 170 170 } 171 171 return err ? err : -1; ··· 198 198 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 199 199 if (unlikely(err || read != subpgsize)) { 200 200 if (mtd_is_bitflip(err) && read == subpgsize) { 201 - printk(PRINT_PREF "ECC correction at %#llx\n", 201 + pr_info("ECC correction at %#llx\n", 202 202 (long long)addr); 203 203 err = 0; 204 204 } else { 205 - printk(PRINT_PREF "error: read failed at %#llx\n", 205 + pr_err("error: read failed at %#llx\n", 206 206 (long long)addr); 207 207 return err ? err : -1; 208 208 } 209 209 } 210 210 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 211 - printk(PRINT_PREF "error: verify failed at %#llx\n", 211 + pr_err("error: verify failed at %#llx\n", 212 212 (long long)addr); 213 - printk(PRINT_PREF "------------- written----------------\n"); 213 + pr_info("------------- written----------------\n"); 214 214 print_subpage(writebuf); 215 - printk(PRINT_PREF "------------- read ------------------\n"); 215 + pr_info("------------- read ------------------\n"); 216 216 print_subpage(readbuf); 217 - printk(PRINT_PREF "-------------------------------------\n"); 217 + pr_info("-------------------------------------\n"); 218 218 errcnt += 1; 219 219 } 220 220 ··· 225 225 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 226 226 if (unlikely(err || read != subpgsize)) { 227 227 if (mtd_is_bitflip(err) && read == subpgsize) { 228 - printk(PRINT_PREF "ECC correction at %#llx\n", 228 + pr_info("ECC correction at %#llx\n", 229 229 (long long)addr); 230 230 err = 0; 231 231 } else { 232 - printk(PRINT_PREF "error: read failed at %#llx\n", 232 + pr_err("error: read failed at %#llx\n", 233 233 (long long)addr); 234 234 return err ? err : -1; 235 235 } 236 236 } 237 237 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 238 - printk(PRINT_PREF "error: verify failed at %#llx\n", 238 + pr_info("error: verify failed at %#llx\n", 239 239 (long long)addr); 240 - printk(PRINT_PREF "------------- written----------------\n"); 240 + pr_info("------------- written----------------\n"); 241 241 print_subpage(writebuf); 242 - printk(PRINT_PREF "------------- read ------------------\n"); 242 + pr_info("------------- read ------------------\n"); 243 243 print_subpage(readbuf); 244 - printk(PRINT_PREF "-------------------------------------\n"); 244 + pr_info("-------------------------------------\n"); 245 245 errcnt += 1; 246 246 } 247 247 ··· 262 262 err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf); 263 263 if (unlikely(err || read != subpgsize * k)) { 264 264 if (mtd_is_bitflip(err) && read == subpgsize * k) { 265 - printk(PRINT_PREF "ECC correction at %#llx\n", 265 + pr_info("ECC correction at %#llx\n", 266 266 (long long)addr); 267 267 err = 0; 268 268 } else { 269 - printk(PRINT_PREF "error: read failed at " 269 + pr_err("error: read failed at " 270 270 "%#llx\n", (long long)addr); 271 271 return err ? err : -1; 272 272 } 273 273 } 274 274 if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) { 275 - printk(PRINT_PREF "error: verify failed at %#llx\n", 275 + pr_err("error: verify failed at %#llx\n", 276 276 (long long)addr); 277 277 errcnt += 1; 278 278 } ··· 295 295 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 296 296 if (unlikely(err || read != subpgsize)) { 297 297 if (mtd_is_bitflip(err) && read == subpgsize) { 298 - printk(PRINT_PREF "ECC correction at %#llx\n", 298 + pr_info("ECC correction at %#llx\n", 299 299 (long long)addr); 300 300 err = 0; 301 301 } else { 302 - printk(PRINT_PREF "error: read failed at " 302 + pr_err("error: read failed at " 303 303 "%#llx\n", (long long)addr); 304 304 return err ? err : -1; 305 305 } 306 306 } 307 307 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 308 - printk(PRINT_PREF "error: verify 0xff failed at " 308 + pr_err("error: verify 0xff failed at " 309 309 "%#llx\n", (long long)addr); 310 310 errcnt += 1; 311 311 } ··· 320 320 int err; 321 321 unsigned int i; 322 322 323 - printk(PRINT_PREF "verifying all eraseblocks for 0xff\n"); 323 + pr_info("verifying all eraseblocks for 0xff\n"); 324 324 for (i = 0; i < ebcnt; ++i) { 325 325 if (bbt[i]) 326 326 continue; ··· 328 328 if (err) 329 329 return err; 330 330 if (i % 256 == 0) 331 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 331 + pr_info("verified up to eraseblock %u\n", i); 332 332 cond_resched(); 333 333 } 334 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 334 + pr_info("verified %u eraseblocks\n", i); 335 335 return 0; 336 336 } 337 337 ··· 342 342 343 343 ret = mtd_block_isbad(mtd, addr); 344 344 if (ret) 345 - printk(PRINT_PREF "block %d is bad\n", ebnum); 345 + pr_info("block %d is bad\n", ebnum); 346 346 return ret; 347 347 } 348 348 ··· 352 352 353 353 bbt = kzalloc(ebcnt, GFP_KERNEL); 354 354 if (!bbt) { 355 - printk(PRINT_PREF "error: cannot allocate memory\n"); 355 + pr_err("error: cannot allocate memory\n"); 356 356 return -ENOMEM; 357 357 } 358 358 359 - printk(PRINT_PREF "scanning for bad eraseblocks\n"); 359 + pr_info("scanning for bad eraseblocks\n"); 360 360 for (i = 0; i < ebcnt; ++i) { 361 361 bbt[i] = is_block_bad(i) ? 1 : 0; 362 362 if (bbt[i]) 363 363 bad += 1; 364 364 cond_resched(); 365 365 } 366 - printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 366 + pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 367 367 return 0; 368 368 } 369 369 ··· 377 377 printk(KERN_INFO "=================================================\n"); 378 378 379 379 if (dev < 0) { 380 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 381 - printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 380 + pr_info("Please specify a valid mtd-device via module parameter\n"); 381 + pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); 382 382 return -EINVAL; 383 383 } 384 384 385 - printk(PRINT_PREF "MTD device: %d\n", dev); 385 + pr_info("MTD device: %d\n", dev); 386 386 387 387 mtd = get_mtd_device(NULL, dev); 388 388 if (IS_ERR(mtd)) { 389 389 err = PTR_ERR(mtd); 390 - printk(PRINT_PREF "error: cannot get MTD device\n"); 390 + pr_err("error: cannot get MTD device\n"); 391 391 return err; 392 392 } 393 393 394 394 if (mtd->type != MTD_NANDFLASH) { 395 - printk(PRINT_PREF "this test requires NAND flash\n"); 395 + pr_info("this test requires NAND flash\n"); 396 396 goto out; 397 397 } 398 398 ··· 402 402 ebcnt = tmp; 403 403 pgcnt = mtd->erasesize / mtd->writesize; 404 404 405 - printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 405 + pr_info("MTD device size %llu, eraseblock size %u, " 406 406 "page size %u, subpage size %u, count of eraseblocks %u, " 407 407 "pages per eraseblock %u, OOB size %u\n", 408 408 (unsigned long long)mtd->size, mtd->erasesize, ··· 412 412 bufsize = subpgsize * 32; 413 413 writebuf = kmalloc(bufsize, GFP_KERNEL); 414 414 if (!writebuf) { 415 - printk(PRINT_PREF "error: cannot allocate memory\n"); 415 + pr_info("error: cannot allocate memory\n"); 416 416 goto out; 417 417 } 418 418 readbuf = kmalloc(bufsize, GFP_KERNEL); 419 419 if (!readbuf) { 420 - printk(PRINT_PREF "error: cannot allocate memory\n"); 420 + pr_info("error: cannot allocate memory\n"); 421 421 goto out; 422 422 } 423 423 ··· 429 429 if (err) 430 430 goto out; 431 431 432 - printk(PRINT_PREF "writing whole device\n"); 432 + pr_info("writing whole device\n"); 433 433 simple_srand(1); 434 434 for (i = 0; i < ebcnt; ++i) { 435 435 if (bbt[i]) ··· 438 438 if (unlikely(err)) 439 439 goto out; 440 440 if (i % 256 == 0) 441 - printk(PRINT_PREF "written up to eraseblock %u\n", i); 441 + pr_info("written up to eraseblock %u\n", i); 442 442 cond_resched(); 443 443 } 444 - printk(PRINT_PREF "written %u eraseblocks\n", i); 444 + pr_info("written %u eraseblocks\n", i); 445 445 446 446 simple_srand(1); 447 - printk(PRINT_PREF "verifying all eraseblocks\n"); 447 + pr_info("verifying all eraseblocks\n"); 448 448 for (i = 0; i < ebcnt; ++i) { 449 449 if (bbt[i]) 450 450 continue; ··· 452 452 if (unlikely(err)) 453 453 goto out; 454 454 if (i % 256 == 0) 455 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 455 + pr_info("verified up to eraseblock %u\n", i); 456 456 cond_resched(); 457 457 } 458 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 458 + pr_info("verified %u eraseblocks\n", i); 459 459 460 460 err = erase_whole_device(); 461 461 if (err) ··· 467 467 468 468 /* Write all eraseblocks */ 469 469 simple_srand(3); 470 - printk(PRINT_PREF "writing whole device\n"); 470 + pr_info("writing whole device\n"); 471 471 for (i = 0; i < ebcnt; ++i) { 472 472 if (bbt[i]) 473 473 continue; ··· 475 475 if (unlikely(err)) 476 476 goto out; 477 477 if (i % 256 == 0) 478 - printk(PRINT_PREF "written up to eraseblock %u\n", i); 478 + pr_info("written up to eraseblock %u\n", i); 479 479 cond_resched(); 480 480 } 481 - printk(PRINT_PREF "written %u eraseblocks\n", i); 481 + pr_info("written %u eraseblocks\n", i); 482 482 483 483 /* Check all eraseblocks */ 484 484 simple_srand(3); 485 - printk(PRINT_PREF "verifying all eraseblocks\n"); 485 + pr_info("verifying all eraseblocks\n"); 486 486 for (i = 0; i < ebcnt; ++i) { 487 487 if (bbt[i]) 488 488 continue; ··· 490 490 if (unlikely(err)) 491 491 goto out; 492 492 if (i % 256 == 0) 493 - printk(PRINT_PREF "verified up to eraseblock %u\n", i); 493 + pr_info("verified up to eraseblock %u\n", i); 494 494 cond_resched(); 495 495 } 496 - printk(PRINT_PREF "verified %u eraseblocks\n", i); 496 + pr_info("verified %u eraseblocks\n", i); 497 497 498 498 err = erase_whole_device(); 499 499 if (err) ··· 503 503 if (err) 504 504 goto out; 505 505 506 - printk(PRINT_PREF "finished with %d errors\n", errcnt); 506 + pr_info("finished with %d errors\n", errcnt); 507 507 508 508 out: 509 509 kfree(bbt); ··· 511 511 kfree(writebuf); 512 512 put_mtd_device(mtd); 513 513 if (err) 514 - printk(PRINT_PREF "error %d occurred\n", err); 514 + pr_info("error %d occurred\n", err); 515 515 printk(KERN_INFO "=================================================\n"); 516 516 return err; 517 517 }
+37 -36
drivers/mtd/tests/mtd_torturetest.c
··· 23 23 * damage caused by this program. 24 24 */ 25 25 26 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 + 26 28 #include <linux/init.h> 27 29 #include <linux/module.h> 28 30 #include <linux/moduleparam.h> ··· 33 31 #include <linux/slab.h> 34 32 #include <linux/sched.h> 35 33 36 - #define PRINT_PREF KERN_INFO "mtd_torturetest: " 37 34 #define RETRIES 3 38 35 39 36 static int eb = 8; ··· 108 107 109 108 err = mtd_erase(mtd, &ei); 110 109 if (err) { 111 - printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 110 + pr_err("error %d while erasing EB %d\n", err, ebnum); 112 111 return err; 113 112 } 114 113 115 114 if (ei.state == MTD_ERASE_FAILED) { 116 - printk(PRINT_PREF "some erase error occurred at EB %d\n", 115 + pr_err("some erase error occurred at EB %d\n", 117 116 ebnum); 118 117 return -EIO; 119 118 } ··· 140 139 retry: 141 140 err = mtd_read(mtd, addr, len, &read, check_buf); 142 141 if (mtd_is_bitflip(err)) 143 - printk(PRINT_PREF "single bit flip occurred at EB %d " 142 + pr_err("single bit flip occurred at EB %d " 144 143 "MTD reported that it was fixed.\n", ebnum); 145 144 else if (err) { 146 - printk(PRINT_PREF "error %d while reading EB %d, " 145 + pr_err("error %d while reading EB %d, " 147 146 "read %zd\n", err, ebnum, read); 148 147 return err; 149 148 } 150 149 151 150 if (read != len) { 152 - printk(PRINT_PREF "failed to read %zd bytes from EB %d, " 151 + pr_err("failed to read %zd bytes from EB %d, " 153 152 "read only %zd, but no error reported\n", 154 153 len, ebnum, read); 155 154 return -EIO; 156 155 } 157 156 158 157 if (memcmp(buf, check_buf, len)) { 159 - printk(PRINT_PREF "read wrong data from EB %d\n", ebnum); 158 + pr_err("read wrong data from EB %d\n", ebnum); 160 159 report_corrupt(check_buf, buf); 161 160 162 161 if (retries++ < RETRIES) { 163 162 /* Try read again */ 164 163 yield(); 165 - printk(PRINT_PREF "re-try reading data from EB %d\n", 164 + pr_info("re-try reading data from EB %d\n", 166 165 ebnum); 167 166 goto retry; 168 167 } else { 169 - printk(PRINT_PREF "retried %d times, still errors, " 168 + pr_info("retried %d times, still errors, " 170 169 "give-up\n", RETRIES); 171 170 return -EINVAL; 172 171 } 173 172 } 174 173 175 174 if (retries != 0) 176 - printk(PRINT_PREF "only attempt number %d was OK (!!!)\n", 175 + pr_info("only attempt number %d was OK (!!!)\n", 177 176 retries); 178 177 179 178 return 0; ··· 192 191 } 193 192 err = mtd_write(mtd, addr, len, &written, buf); 194 193 if (err) { 195 - printk(PRINT_PREF "error %d while writing EB %d, written %zd" 194 + pr_err("error %d while writing EB %d, written %zd" 196 195 " bytes\n", err, ebnum, written); 197 196 return err; 198 197 } 199 198 if (written != len) { 200 - printk(PRINT_PREF "written only %zd bytes of %zd, but no error" 199 + pr_info("written only %zd bytes of %zd, but no error" 201 200 " reported\n", written, len); 202 201 return -EIO; 203 202 } ··· 212 211 213 212 printk(KERN_INFO "\n"); 214 213 printk(KERN_INFO "=================================================\n"); 215 - printk(PRINT_PREF "Warning: this program is trying to wear out your " 214 + pr_info("Warning: this program is trying to wear out your " 216 215 "flash, stop it if this is not wanted.\n"); 217 216 218 217 if (dev < 0) { 219 - printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 220 - printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 218 + pr_info("Please specify a valid mtd-device via module parameter\n"); 219 + pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); 221 220 return -EINVAL; 222 221 } 223 222 224 - printk(PRINT_PREF "MTD device: %d\n", dev); 225 - printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", 223 + pr_info("MTD device: %d\n", dev); 224 + pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n", 226 225 ebcnt, eb, eb + ebcnt - 1, dev); 227 226 if (pgcnt) 228 - printk(PRINT_PREF "torturing just %d pages per eraseblock\n", 227 + pr_info("torturing just %d pages per eraseblock\n", 229 228 pgcnt); 230 - printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled"); 229 + pr_info("write verify %s\n", check ? "enabled" : "disabled"); 231 230 232 231 mtd = get_mtd_device(NULL, dev); 233 232 if (IS_ERR(mtd)) { 234 233 err = PTR_ERR(mtd); 235 - printk(PRINT_PREF "error: cannot get MTD device\n"); 234 + pr_err("error: cannot get MTD device\n"); 236 235 return err; 237 236 } 238 237 239 238 if (mtd->writesize == 1) { 240 - printk(PRINT_PREF "not NAND flash, assume page size is 512 " 239 + pr_info("not NAND flash, assume page size is 512 " 241 240 "bytes.\n"); 242 241 pgsize = 512; 243 242 } else 244 243 pgsize = mtd->writesize; 245 244 246 245 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { 247 - printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt); 246 + pr_err("error: invalid pgcnt value %d\n", pgcnt); 248 247 goto out_mtd; 249 248 } 250 249 251 250 err = -ENOMEM; 252 251 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); 253 252 if (!patt_5A5) { 254 - printk(PRINT_PREF "error: cannot allocate memory\n"); 253 + pr_err("error: cannot allocate memory\n"); 255 254 goto out_mtd; 256 255 } 257 256 258 257 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); 259 258 if (!patt_A5A) { 260 - printk(PRINT_PREF "error: cannot allocate memory\n"); 259 + pr_err("error: cannot allocate memory\n"); 261 260 goto out_patt_5A5; 262 261 } 263 262 264 263 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); 265 264 if (!patt_FF) { 266 - printk(PRINT_PREF "error: cannot allocate memory\n"); 265 + pr_err("error: cannot allocate memory\n"); 267 266 goto out_patt_A5A; 268 267 } 269 268 270 269 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); 271 270 if (!check_buf) { 272 - printk(PRINT_PREF "error: cannot allocate memory\n"); 271 + pr_err("error: cannot allocate memory\n"); 273 272 goto out_patt_FF; 274 273 } 275 274 ··· 296 295 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); 297 296 298 297 if (err < 0) { 299 - printk(PRINT_PREF "block_isbad() returned %d " 298 + pr_info("block_isbad() returned %d " 300 299 "for EB %d\n", err, i); 301 300 goto out; 302 301 } 303 302 304 303 if (err) { 305 - printk("EB %d is bad. Skip it.\n", i); 304 + pr_err("EB %d is bad. Skip it.\n", i); 306 305 bad_ebs[i - eb] = 1; 307 306 } 308 307 } ··· 330 329 continue; 331 330 err = check_eraseblock(i, patt_FF); 332 331 if (err) { 333 - printk(PRINT_PREF "verify failed" 332 + pr_info("verify failed" 334 333 " for 0xFF... pattern\n"); 335 334 goto out; 336 335 } ··· 363 362 patt = patt_A5A; 364 363 err = check_eraseblock(i, patt); 365 364 if (err) { 366 - printk(PRINT_PREF "verify failed for %s" 365 + pr_info("verify failed for %s" 367 366 " pattern\n", 368 367 ((eb + erase_cycles) & 1) ? 369 368 "0x55AA55..." : "0xAA55AA..."); ··· 381 380 stop_timing(); 382 381 ms = (finish.tv_sec - start.tv_sec) * 1000 + 383 382 (finish.tv_usec - start.tv_usec) / 1000; 384 - printk(PRINT_PREF "%08u erase cycles done, took %lu " 383 + pr_info("%08u erase cycles done, took %lu " 385 384 "milliseconds (%lu seconds)\n", 386 385 erase_cycles, ms, ms / 1000); 387 386 start_timing(); ··· 392 391 } 393 392 out: 394 393 395 - printk(PRINT_PREF "finished after %u erase cycles\n", 394 + pr_info("finished after %u erase cycles\n", 396 395 erase_cycles); 397 396 kfree(check_buf); 398 397 out_patt_FF: ··· 404 403 out_mtd: 405 404 put_mtd_device(mtd); 406 405 if (err) 407 - printk(PRINT_PREF "error %d occurred during torturing\n", err); 406 + pr_info("error %d occurred during torturing\n", err); 408 407 printk(KERN_INFO "=================================================\n"); 409 408 return err; 410 409 } ··· 442 441 &bits) >= 0) 443 442 pages++; 444 443 445 - printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n", 444 + pr_info("verify fails on %d pages, %d bytes/%d bits\n", 446 445 pages, bytes, bits); 447 - printk(PRINT_PREF "The following is a list of all differences between" 446 + pr_info("The following is a list of all differences between" 448 447 " what was read from flash and what was expected\n"); 449 448 450 449 for (i = 0; i < check_len; i += pgsize) { ··· 458 457 printk("-------------------------------------------------------" 459 458 "----------------------------------\n"); 460 459 461 - printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify," 460 + pr_info("Page %zd has %d bytes/%d bits failing verify," 462 461 " starting at offset 0x%x\n", 463 462 (mtd->erasesize - check_len + i) / pgsize, 464 463 bytes, bits, first);
+4 -2
fs/jffs2/nodemgmt.c
··· 417 417 spin_unlock(&c->erase_completion_lock); 418 418 419 419 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); 420 - if (ret) 421 - return ret; 420 + 422 421 /* Just lock it again and continue. Nothing much can change because 423 422 we hold c->alloc_sem anyway. In fact, it's not entirely clear why 424 423 we hold c->erase_completion_lock in the majority of this function... 425 424 but that's a question for another (more caffeine-rich) day. */ 426 425 spin_lock(&c->erase_completion_lock); 426 + 427 + if (ret) 428 + return ret; 427 429 428 430 waste = jeb->free_size; 429 431 jffs2_link_node_ref(c, jeb,
+1
include/linux/bcma/bcma.h
··· 350 350 enum bcma_clkmode clkmode); 351 351 extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, 352 352 bool on); 353 + extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); 353 354 #define BCMA_DMA_TRANSLATION_MASK 0xC0000000 354 355 #define BCMA_DMA_TRANSLATION_NONE 0x00000000 355 356 #define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */
+3 -1
include/linux/mtd/blktrans.h
··· 23 23 #include <linux/mutex.h> 24 24 #include <linux/kref.h> 25 25 #include <linux/sysfs.h> 26 + #include <linux/workqueue.h> 26 27 27 28 struct hd_geometry; 28 29 struct mtd_info; ··· 44 43 struct kref ref; 45 44 struct gendisk *disk; 46 45 struct attribute_group *disk_attributes; 47 - struct task_struct *thread; 46 + struct workqueue_struct *wq; 47 + struct work_struct work; 48 48 struct request_queue *rq; 49 49 spinlock_t queue_lock; 50 50 void *priv;
+18 -4
include/linux/mtd/doc2000.h
··· 92 92 * Others use readb/writeb 93 93 */ 94 94 #if defined(__arm__) 95 - #define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)))) 96 - #define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0) 95 + static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg) 96 + { 97 + return __raw_readl(addr + reg); 98 + } 99 + static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg) 100 + { 101 + __raw_writel(data, addr + reg); 102 + wmb(); 103 + } 97 104 #define DOC_IOREMAP_LEN 0x8000 98 105 #elif defined(__ppc__) 99 - #define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)))) 100 - #define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0) 106 + static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg) 107 + { 108 + return __raw_readw(addr + reg); 109 + } 110 + static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg) 111 + { 112 + __raw_writew(data, addr + reg); 113 + wmb(); 114 + } 101 115 #define DOC_IOREMAP_LEN 0x4000 102 116 #else 103 117 #define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg))
-3
include/linux/mtd/fsmc.h
··· 155 155 unsigned int width; 156 156 unsigned int bank; 157 157 158 - /* CLE, ALE offsets */ 159 - unsigned int cle_off; 160 - unsigned int ale_off; 161 158 enum access_mode mode; 162 159 163 160 void (*select_bank)(uint32_t bank, uint32_t busw);
-68
include/linux/mtd/gpmi-nand.h
··· 1 - /* 2 - * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along 15 - * with this program; if not, write to the Free Software Foundation, Inc., 16 - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 17 - */ 18 - 19 - #ifndef __MACH_MXS_GPMI_NAND_H__ 20 - #define __MACH_MXS_GPMI_NAND_H__ 21 - 22 - /* The size of the resources is fixed. */ 23 - #define GPMI_NAND_RES_SIZE 6 24 - 25 - /* Resource names for the GPMI NAND driver. */ 26 - #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" 27 - #define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt" 28 - #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" 29 - #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" 30 - #define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels" 31 - #define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma" 32 - 33 - /** 34 - * struct gpmi_nand_platform_data - GPMI NAND driver platform data. 35 - * 36 - * This structure communicates platform-specific information to the GPMI NAND 37 - * driver that can't be expressed as resources. 38 - * 39 - * @platform_init: A pointer to a function the driver will call to 40 - * initialize the platform (e.g., set up the pin mux). 41 - * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and 42 - * from the NAND Flash device, in nanoseconds. 43 - * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and 44 - * from the NAND Flash device, in nanoseconds. 45 - * @max_chip_count: The maximum number of chips for which the driver 46 - * should configure the hardware. This value most 47 - * likely reflects the number of pins that are 48 - * connected to a NAND Flash device. If this is 49 - * greater than the SoC hardware can support, the 50 - * driver will print a message and fail to initialize. 51 - * @partitions: An optional pointer to an array of partition 52 - * descriptions. 53 - * @partition_count: The number of elements in the partitions array. 54 - */ 55 - struct gpmi_nand_platform_data { 56 - /* SoC hardware information. */ 57 - int (*platform_init)(void); 58 - 59 - /* NAND Flash information. */ 60 - unsigned int min_prop_delay_in_ns; 61 - unsigned int max_prop_delay_in_ns; 62 - unsigned int max_chip_count; 63 - 64 - /* Medium information. */ 65 - struct mtd_partition *partitions; 66 - unsigned partition_count; 67 - }; 68 - #endif
+2 -2
include/linux/mtd/map.h
··· 328 328 329 329 static inline map_word map_word_load(struct map_info *map, const void *ptr) 330 330 { 331 - map_word r; 331 + map_word r = {{0} }; 332 332 333 333 if (map_bankwidth_is_1(map)) 334 334 r.x[0] = *(unsigned char *)ptr; ··· 391 391 392 392 static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) 393 393 { 394 - map_word r; 394 + map_word uninitialized_var(r); 395 395 396 396 if (map_bankwidth_is_1(map)) 397 397 r.x[0] = __raw_readb(map->virt + ofs);
+1 -1
include/linux/mtd/mtd.h
··· 98 98 }; 99 99 100 100 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 101 - #define MTD_MAX_ECCPOS_ENTRIES_LARGE 448 101 + #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 102 102 /* 103 103 * Internal ECC layout control structure. For historical reasons, there is a 104 104 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
+9 -2
include/linux/mtd/nand.h
··· 219 219 #define NAND_OWN_BUFFERS 0x00020000 220 220 /* Chip may not exist, so silence any errors in scan */ 221 221 #define NAND_SCAN_SILENT_NODEV 0x00040000 222 + /* 223 + * Autodetect nand buswidth with readid/onfi. 224 + * This suppose the driver will configure the hardware in 8 bits mode 225 + * when calling nand_scan_ident, and update its configuration 226 + * before calling nand_scan_tail. 227 + */ 228 + #define NAND_BUSWIDTH_AUTO 0x00080000 222 229 223 230 /* Options set by nand scan */ 224 231 /* Nand scan has allocated controller struct */ ··· 478 471 * non 0 if ONFI supported. 479 472 * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is 480 473 * supported, 0 otherwise. 481 - * @onfi_set_features [REPLACEABLE] set the features for ONFI nand 482 - * @onfi_get_features [REPLACEABLE] get the features for ONFI nand 474 + * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand 475 + * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand 483 476 * @ecclayout: [REPLACEABLE] the default ECC placement scheme 484 477 * @bbt: [INTERN] bad block table pointer 485 478 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
+13 -1
include/linux/mtd/sh_flctl.h
··· 20 20 #ifndef __SH_FLCTL_H__ 21 21 #define __SH_FLCTL_H__ 22 22 23 + #include <linux/completion.h> 23 24 #include <linux/mtd/mtd.h> 24 25 #include <linux/mtd/nand.h> 25 26 #include <linux/mtd/partitions.h> ··· 108 107 #define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ 109 108 #define AC1CLR (0x1 << 19) /* ECC FIFO clear */ 110 109 #define AC0CLR (0x1 << 18) /* Data FIFO clear */ 110 + #define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */ 111 111 #define ECERB (0x1 << 9) /* ECC error */ 112 112 #define STERB (0x1 << 8) /* Status error */ 113 113 #define STERINTE (0x1 << 4) /* Status error enable */ ··· 140 138 FL_TIMEOUT 141 139 }; 142 140 141 + struct dma_chan; 142 + 143 143 struct sh_flctl { 144 144 struct mtd_info mtd; 145 145 struct nand_chip chip; ··· 151 147 152 148 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ 153 149 int read_bytes; 154 - int index; 150 + unsigned int index; 155 151 int seqin_column; /* column in SEQIN cmd */ 156 152 int seqin_page_addr; /* page_addr in SEQIN cmd */ 157 153 uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */ ··· 165 161 unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ 166 162 unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ 167 163 unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ 164 + 165 + /* DMA related objects */ 166 + struct dma_chan *chan_fifo0_rx; 167 + struct dma_chan *chan_fifo0_tx; 168 + struct completion dma_complete; 168 169 }; 169 170 170 171 struct sh_flctl_platform_data { ··· 179 170 180 171 unsigned has_hwecc:1; 181 172 unsigned use_holden:1; 173 + 174 + unsigned int slave_id_fifo0_tx; 175 + unsigned int slave_id_fifo0_rx; 182 176 }; 183 177 184 178 static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
-16
include/linux/platform_data/mtd-nomadik-nand.h
··· 1 - #ifndef __ASM_ARCH_NAND_H 2 - #define __ASM_ARCH_NAND_H 3 - 4 - struct nomadik_nand_platform_data { 5 - struct mtd_partition *parts; 6 - int nparts; 7 - int options; 8 - int (*init) (void); 9 - int (*exit) (void); 10 - }; 11 - 12 - #define NAND_IO_DATA 0x40000000 13 - #define NAND_IO_CMD 0x40800000 14 - #define NAND_IO_ADDR 0x41000000 15 - 16 - #endif /* __ASM_ARCH_NAND_H */