Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-3.3' of git://git.infradead.org/mtd-2.6

MTD pull for 3.3

* tag 'for-linus-3.3' of git://git.infradead.org/mtd-2.6: (113 commits)
mtd: Fix dependency for MTD_DOC200x
mtd: do not use mtd->block_markbad directly
logfs: do not use 'mtd->block_isbad' directly
mtd: introduce mtd_can_have_bb helper
mtd: do not use mtd->suspend and mtd->resume directly
mtd: do not use mtd->lock, unlock and is_locked directly
mtd: do not use mtd->sync directly
mtd: harmonize mtd_writev usage
mtd: do not use mtd->lock_user_prot_reg directly
mtd: mtd->write_user_prot_reg directly
mtd: do not use mtd->read_*_prot_reg directly
mtd: do not use mtd->get_*_prot_info directly
mtd: do not use mtd->read_oob directly
mtd: mtdoops: do not use mtd->panic_write directly
romfs: do not use mtd->get_unmapped_area directly
mtd: do not use mtd->get_unmapped_area directly
mtd: do use mtd->point directly
mtd: introduce mtd_has_oob helper
mtd: mtdcore: export symbols cleanup
mtd: clean-up the default_mtd_writev function
...

Fix up trivial edit/remove conflict in drivers/staging/spectra/lld_mtd.c

+2793 -1824
+34
Documentation/ABI/testing/sysfs-devices-platform-docg3
··· 1 + What: /sys/devices/platform/docg3/f[0-3]_dps[01]_is_keylocked 2 + Date: November 2011 3 + KernelVersion: 3.3 4 + Contact: Robert Jarzmik <robert.jarzmik@free.fr> 5 + Description: 6 + Show whether the floor (0 to 4), protection area (0 or 1) is 7 + keylocked. Each docg3 chip (or floor) has 2 protection areas, 8 + which can cover any part of it, block aligned, called DPS. 9 + The protection has information embedded whether it blocks reads, 10 + writes or both. 11 + The result is: 12 + 0 -> the DPS is not keylocked 13 + 1 -> the DPS is keylocked 14 + Users: None identified so far. 15 + 16 + What: /sys/devices/platform/docg3/f[0-3]_dps[01]_protection_key 17 + Date: November 2011 18 + KernelVersion: 3.3 19 + Contact: Robert Jarzmik <robert.jarzmik@free.fr> 20 + Description: 21 + Enter the protection key for the floor (0 to 4), protection area 22 + (0 or 1). Each docg3 chip (or floor) has 2 protection areas, 23 + which can cover any part of it, block aligned, called DPS. 24 + The protection has information embedded whether it blocks reads, 25 + writes or both. 26 + The protection key is a string of 8 bytes (value 0-255). 27 + Entering the correct value toggle the lock, and can be observed 28 + through f[0-3]_dps[01]_is_keylocked. 29 + Possible values are: 30 + - 8 bytes 31 + Typical values are: 32 + - "00000000" 33 + - "12345678" 34 + Users: None identified so far.
+44
Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
··· 1 + GPIO assisted NAND flash 2 + 3 + The GPIO assisted NAND flash uses a memory mapped interface to 4 + read/write the NAND commands and data and GPIO pins for the control 5 + signals. 6 + 7 + Required properties: 8 + - compatible : "gpio-control-nand" 9 + - reg : should specify localbus chip select and size used for the chip. The 10 + resource describes the data bus connected to the NAND flash and all accesses 11 + are made in native endianness. 12 + - #address-cells, #size-cells : Must be present if the device has sub-nodes 13 + representing partitions. 14 + - gpios : specifies the gpio pins to control the NAND device. nwp is an 15 + optional gpio and may be set to 0 if not present. 16 + 17 + Optional properties: 18 + - bank-width : Width (in bytes) of the device. If not present, the width 19 + defaults to 1 byte. 20 + - chip-delay : chip dependent delay for transferring data from array to 21 + read registers (tR). If not present then a default of 20us is used. 22 + - gpio-control-nand,io-sync-reg : A 64-bit physical address for a read 23 + location used to guard against bus reordering with regards to accesses to 24 + the GPIO's and the NAND flash data bus. If present, then after changing 25 + GPIO state and before and after command byte writes, this register will be 26 + read to ensure that the GPIO accesses have completed. 27 + 28 + Examples: 29 + 30 + gpio-nand@1,0 { 31 + compatible = "gpio-control-nand"; 32 + reg = <1 0x0000 0x2>; 33 + #address-cells = <1>; 34 + #size-cells = <1>; 35 + gpios = <&banka 1 0 /* rdy */ 36 + &banka 2 0 /* nce */ 37 + &banka 3 0 /* ale */ 38 + &banka 4 0 /* cle */ 39 + 0 /* nwp */>; 40 + 41 + partition@0 { 42 + ... 43 + }; 44 + };
+1 -1
arch/arm/mach-davinci/board-da850-evm.c
··· 127 127 size_t retlen; 128 128 129 129 if (!strcmp(mtd->name, "MAC-Address")) { 130 - mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr); 130 + mtd_read(mtd, 0, ETH_ALEN, &retlen, mac_addr); 131 131 if (retlen == ETH_ALEN) 132 132 pr_info("Read MAC addr from SPI Flash: %pM\n", 133 133 mac_addr);
+3 -4
arch/cris/arch-v32/drivers/axisflashmap.c
··· 404 404 */ 405 405 int blockstat; 406 406 do { 407 - blockstat = main_mtd->block_isbad(main_mtd, 408 - ptable_sector); 407 + blockstat = mtd_block_isbad(main_mtd, ptable_sector); 409 408 if (blockstat < 0) 410 409 ptable_sector = 0; /* read error */ 411 410 else if (blockstat) ··· 412 413 } while (blockstat && ptable_sector); 413 414 #endif 414 415 if (ptable_sector) { 415 - main_mtd->read(main_mtd, ptable_sector, PAGESIZE, 416 - &len, page); 416 + mtd_read(main_mtd, ptable_sector, PAGESIZE, &len, 417 + page); 417 418 ptable_head = &((struct partitiontable *) page)->head; 418 419 } 419 420
+3
arch/mips/bcm63xx/boards/board_bcm963xx.c
··· 834 834 } 835 835 }; 836 836 837 + static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL }; 838 + 837 839 static struct physmap_flash_data flash_data = { 838 840 .width = 2, 839 841 .nr_parts = ARRAY_SIZE(mtd_partitions), 840 842 .parts = mtd_partitions, 843 + .part_probe_types = bcm63xx_part_types, 841 844 }; 842 845 843 846 static struct resource mtd_resources[] = {
+5 -6
arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
··· 16 16 #define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */ 17 17 #define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */ 18 18 #define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */ 19 - #define CRC_LEN 4 /* Length of CRC in bytes */ 20 19 #define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */ 21 20 22 21 #define NUM_PIRELLI 2 ··· 76 77 /* 192-195: Version flash layout */ 77 78 char flash_layout_ver[FLASHLAYOUTVER_LEN]; 78 79 /* 196-199: kernel+rootfs CRC32 */ 79 - char fskernel_crc[CRC_LEN]; 80 + __u32 fskernel_crc; 80 81 /* 200-215: Unused except on Alice Gate where is is information */ 81 82 char information2[TAGINFO2_LEN]; 82 83 /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */ 83 - char image_crc[CRC_LEN]; 84 + __u32 image_crc; 84 85 /* 220-223: CRC32 of rootfs partition */ 85 - char rootfs_crc[CRC_LEN]; 86 + __u32 rootfs_crc; 86 87 /* 224-227: CRC32 of kernel partition */ 87 - char kernel_crc[CRC_LEN]; 88 + __u32 kernel_crc; 88 89 /* 228-235: Unused at present */ 89 90 char reserved1[8]; 90 91 /* 236-239: CRC32 of header excluding last 20 bytes */ 91 - char header_crc[CRC_LEN]; 92 + __u32 header_crc; 92 93 /* 240-255: Unused at present */ 93 94 char reserved2[16]; 94 95 };
+8
drivers/mtd/Kconfig
··· 140 140 ---help--- 141 141 TI AR7 partitioning support 142 142 143 + config MTD_BCM63XX_PARTS 144 + tristate "BCM63XX CFE partitioning support" 145 + depends on BCM63XX 146 + select CRC32 147 + help 148 + This provides partions parsing for BCM63xx devices with CFE 149 + bootloaders. 150 + 143 151 comment "User Modules And Translation Layers" 144 152 145 153 config MTD_CHAR
+1
drivers/mtd/Makefile
··· 11 11 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 12 12 obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 13 13 obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o 14 + obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o 14 15 15 16 # 'Users' - code which presents functionality to userspace. 16 17 obj-$(CONFIG_MTD_CHAR) += mtdchar.o
+2 -2
drivers/mtd/afs.c
··· 75 75 size_t sz; 76 76 int ret; 77 77 78 - ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs); 78 + ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs); 79 79 if (ret >= 0 && sz != sizeof(fs)) 80 80 ret = -EINVAL; 81 81 ··· 132 132 int ret, i; 133 133 134 134 memset(iis, 0, sizeof(*iis)); 135 - ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis); 135 + ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis); 136 136 if (ret < 0) 137 137 goto failed; 138 138
+7 -8
drivers/mtd/ar7part.c
··· 73 73 74 74 do { /* Try 10 blocks starting from master->erasesize */ 75 75 offset = pre_size; 76 - master->read(master, offset, 77 - sizeof(header), &len, (uint8_t *)&header); 76 + mtd_read(master, offset, sizeof(header), &len, 77 + (uint8_t *)&header); 78 78 if (!strncmp((char *)&header, "TIENV0.8", 8)) 79 79 ar7_parts[1].offset = pre_size; 80 80 if (header.checksum == LOADER_MAGIC1) ··· 95 95 case LOADER_MAGIC1: 96 96 while (header.length) { 97 97 offset += sizeof(header) + header.length; 98 - master->read(master, offset, sizeof(header), 99 - &len, (uint8_t *)&header); 98 + mtd_read(master, offset, sizeof(header), &len, 99 + (uint8_t *)&header); 100 100 } 101 101 root_offset = offset + sizeof(header) + 4; 102 102 break; 103 103 case LOADER_MAGIC2: 104 104 while (header.length) { 105 105 offset += sizeof(header) + header.length; 106 - master->read(master, offset, sizeof(header), 107 - &len, (uint8_t *)&header); 106 + mtd_read(master, offset, sizeof(header), &len, 107 + (uint8_t *)&header); 108 108 } 109 109 root_offset = offset + sizeof(header) + 4 + 0xff; 110 110 root_offset &= ~(uint32_t)0xff; ··· 114 114 break; 115 115 } 116 116 117 - master->read(master, root_offset, 118 - sizeof(header), &len, (u8 *)&header); 117 + mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header); 119 118 if (header.checksum != SQUASHFS_MAGIC) { 120 119 root_offset += master->erasesize - 1; 121 120 root_offset &= ~(master->erasesize - 1);
+222
drivers/mtd/bcm63xxpart.c
··· 1 + /* 2 + * BCM63XX CFE image tag parser 3 + * 4 + * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org> 5 + * Mike Albon <malbon@openwrt.org> 6 + * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net> 7 + * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 + * 23 + */ 24 + 25 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26 + 27 + #include <linux/crc32.h> 28 + #include <linux/module.h> 29 + #include <linux/kernel.h> 30 + #include <linux/slab.h> 31 + #include <linux/vmalloc.h> 32 + #include <linux/mtd/mtd.h> 33 + #include <linux/mtd/partitions.h> 34 + 35 + #include <asm/mach-bcm63xx/bcm963xx_tag.h> 36 + #include <asm/mach-bcm63xx/board_bcm963xx.h> 37 + 38 + #define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */ 39 + 40 + #define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */ 41 + #define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */ 42 + 43 + #define BCM63XX_CFE_MAGIC_OFFSET 0x4e0 44 + 45 + static int bcm63xx_detect_cfe(struct mtd_info *master) 46 + { 47 + char buf[9]; 48 + int ret; 49 + size_t retlen; 50 + 51 + ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen, 52 + (void *)buf); 53 + buf[retlen] = 0; 54 + 55 + if (ret) 56 + return ret; 57 + 58 + if (strncmp("cfe-v", buf, 5) == 0) 59 + return 0; 60 + 61 + /* very old CFE's do not have the cfe-v string, so check for magic */ 62 + ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen, 63 + (void *)buf); 64 + buf[retlen] = 0; 65 + 66 + return strncmp("CFE1CFE1", buf, 8); 67 + } 68 + 69 + static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, 70 + struct mtd_partition **pparts, 71 + struct mtd_part_parser_data *data) 72 + { 73 + /* CFE, NVRAM and global Linux are always present */ 74 + int nrparts = 3, curpart = 0; 75 + struct bcm_tag *buf; 76 + struct mtd_partition *parts; 77 + int ret; 78 + size_t retlen; 79 + unsigned int rootfsaddr, kerneladdr, spareaddr; 80 + unsigned int rootfslen, kernellen, sparelen, totallen; 81 + unsigned int cfelen, nvramlen; 82 + int namelen = 0; 83 + int i; 84 + u32 computed_crc; 85 + 86 + if (bcm63xx_detect_cfe(master)) 87 + return -EINVAL; 88 + 89 + cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE); 90 + nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE); 91 + 92 + /* Allocate memory for buffer */ 93 + buf = vmalloc(sizeof(struct bcm_tag)); 94 + if (!buf) 95 + return -ENOMEM; 96 + 97 + /* Get the tag */ 98 + ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen, 99 + (void *)buf); 100 + 101 + if (retlen != sizeof(struct bcm_tag)) { 102 + vfree(buf); 103 + return -EIO; 104 + } 105 + 106 + computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf, 107 + offsetof(struct bcm_tag, header_crc)); 108 + if (computed_crc == buf->header_crc) { 109 + char *boardid = &(buf->board_id[0]); 110 + char *tagversion = &(buf->tag_version[0]); 111 + 112 + sscanf(buf->kernel_address, "%u", &kerneladdr); 113 + sscanf(buf->kernel_length, "%u", &kernellen); 114 + sscanf(buf->total_length, "%u", &totallen); 115 + 116 + pr_info("CFE boot tag found with version %s and board type %s\n", 117 + tagversion, boardid); 118 + 119 + kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; 120 + rootfsaddr = kerneladdr + kernellen; 121 + spareaddr = roundup(totallen, master->erasesize) + cfelen; 122 + sparelen = master->size - spareaddr - nvramlen; 123 + rootfslen = spareaddr - rootfsaddr; 124 + } else { 125 + pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n", 126 + buf->header_crc, computed_crc); 127 + kernellen = 0; 128 + rootfslen = 0; 129 + rootfsaddr = 0; 130 + spareaddr = cfelen; 131 + sparelen = master->size - cfelen - nvramlen; 132 + } 133 + 134 + /* Determine number of partitions */ 135 + namelen = 8; 136 + if (rootfslen > 0) { 137 + nrparts++; 138 + namelen += 6; 139 + } 140 + if (kernellen > 0) { 141 + nrparts++; 142 + namelen += 6; 143 + } 144 + 145 + /* Ask kernel for more memory */ 146 + parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); 147 + if (!parts) { 148 + vfree(buf); 149 + return -ENOMEM; 150 + } 151 + 152 + /* Start building partition list */ 153 + parts[curpart].name = "CFE"; 154 + parts[curpart].offset = 0; 155 + parts[curpart].size = cfelen; 156 + curpart++; 157 + 158 + if (kernellen > 0) { 159 + parts[curpart].name = "kernel"; 160 + parts[curpart].offset = kerneladdr; 161 + parts[curpart].size = kernellen; 162 + curpart++; 163 + } 164 + 165 + if (rootfslen > 0) { 166 + parts[curpart].name = "rootfs"; 167 + parts[curpart].offset = rootfsaddr; 168 + parts[curpart].size = rootfslen; 169 + if (sparelen > 0) 170 + parts[curpart].size += sparelen; 171 + curpart++; 172 + } 173 + 174 + parts[curpart].name = "nvram"; 175 + parts[curpart].offset = master->size - nvramlen; 176 + parts[curpart].size = nvramlen; 177 + 178 + /* Global partition "linux" to make easy firmware upgrade */ 179 + curpart++; 180 + parts[curpart].name = "linux"; 181 + parts[curpart].offset = cfelen; 182 + parts[curpart].size = master->size - cfelen - nvramlen; 183 + 184 + for (i = 0; i < nrparts; i++) 185 + pr_info("Partition %d is %s offset %lx and length %lx\n", i, 186 + parts[i].name, (long unsigned int)(parts[i].offset), 187 + (long unsigned int)(parts[i].size)); 188 + 189 + pr_info("Spare partition is offset %x and length %x\n", spareaddr, 190 + sparelen); 191 + 192 + *pparts = parts; 193 + vfree(buf); 194 + 195 + return nrparts; 196 + }; 197 + 198 + static struct mtd_part_parser bcm63xx_cfe_parser = { 199 + .owner = THIS_MODULE, 200 + .parse_fn = bcm63xx_parse_cfe_partitions, 201 + .name = "bcm63xxpart", 202 + }; 203 + 204 + static int __init bcm63xx_cfe_parser_init(void) 205 + { 206 + return register_mtd_parser(&bcm63xx_cfe_parser); 207 + } 208 + 209 + static void __exit bcm63xx_cfe_parser_exit(void) 210 + { 211 + deregister_mtd_parser(&bcm63xx_cfe_parser); 212 + } 213 + 214 + module_init(bcm63xx_cfe_parser_init); 215 + module_exit(bcm63xx_cfe_parser_exit); 216 + 217 + MODULE_LICENSE("GPL"); 218 + MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>"); 219 + MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); 220 + MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>"); 221 + MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com"); 222 + MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
+8 -5
drivers/mtd/chips/cfi_cmdset_0020.c
··· 139 139 } 140 140 141 141 /* Do some byteswapping if necessary */ 142 - extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport); 143 - extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask); 142 + extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport); 143 + extp->BlkStatusRegMask = cfi32_to_cpu(map, 144 + extp->BlkStatusRegMask); 144 145 145 146 #ifdef DEBUG_CFI_FEATURES 146 147 /* Tell the user about it in lots of lovely detail */ ··· 699 698 continue; 700 699 } 701 700 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen); 702 - ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer); 701 + ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen, 702 + buffer); 703 703 totlen += thislen; 704 704 if (ret || thislen != ECCBUF_SIZE) 705 705 goto write_error; ··· 709 707 to += ECCBUF_SIZE; 710 708 } 711 709 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */ 712 - ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base); 710 + ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len), 711 + &thislen, elem_base); 713 712 totlen += thislen; 714 713 if (ret || thislen != ECCBUF_DIV(elem_len)) 715 714 goto write_error; ··· 724 721 } 725 722 if (buflen) { /* flush last page, even if not full */ 726 723 /* This is sometimes intended behaviour, really */ 727 - ret = mtd->write(mtd, to, buflen, &thislen, buffer); 724 + ret = mtd_write(mtd, to, buflen, &thislen, buffer); 728 725 totlen += thislen; 729 726 if (ret || thislen != ECCBUF_SIZE) 730 727 goto write_error;
+12
drivers/mtd/devices/Kconfig
··· 191 191 192 192 config MTD_DOC2000 193 193 tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)" 194 + depends on MTD_NAND 194 195 select MTD_DOCPROBE 195 196 select MTD_NAND_IDS 196 197 ---help--- ··· 214 213 215 214 config MTD_DOC2001 216 215 tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)" 216 + depends on MTD_NAND 217 217 select MTD_DOCPROBE 218 218 select MTD_NAND_IDS 219 219 ---help--- ··· 236 234 237 235 config MTD_DOC2001PLUS 238 236 tristate "M-Systems Disk-On-Chip Millennium Plus" 237 + depends on MTD_NAND 239 238 select MTD_DOCPROBE 240 239 select MTD_NAND_IDS 241 240 ---help--- ··· 254 251 255 252 config MTD_DOCG3 256 253 tristate "M-Systems Disk-On-Chip G3" 254 + select BCH 255 + select BCH_CONST_PARAMS 257 256 ---help--- 258 257 This provides an MTD device driver for the M-Systems DiskOnChip 259 258 G3 devices. ··· 263 258 The driver provides access to G3 DiskOnChip, distributed by 264 259 M-Systems and now Sandisk. The support is very experimental, 265 260 and doesn't give access to any write operations. 261 + 262 + if MTD_DOCG3 263 + config BCH_CONST_M 264 + default 14 265 + config BCH_CONST_T 266 + default 4 267 + endif 266 268 267 269 config MTD_DOCPROBE 268 270 tristate
+1 -1
drivers/mtd/devices/block2mtd.c
··· 287 287 dev->mtd.flags = MTD_CAP_RAM; 288 288 dev->mtd.erase = block2mtd_erase; 289 289 dev->mtd.write = block2mtd_write; 290 - dev->mtd.writev = default_mtd_writev; 290 + dev->mtd.writev = mtd_writev; 291 291 dev->mtd.sync = block2mtd_sync; 292 292 dev->mtd.read = block2mtd_read; 293 293 dev->mtd.priv = dev;
-9
drivers/mtd/devices/doc2000.c
··· 562 562 563 563 mtd->type = MTD_NANDFLASH; 564 564 mtd->flags = MTD_CAP_NANDFLASH; 565 - mtd->size = 0; 566 - mtd->erasesize = 0; 567 565 mtd->writesize = 512; 568 566 mtd->oobsize = 16; 569 567 mtd->owner = THIS_MODULE; 570 568 mtd->erase = doc_erase; 571 - mtd->point = NULL; 572 - mtd->unpoint = NULL; 573 569 mtd->read = doc_read; 574 570 mtd->write = doc_write; 575 571 mtd->read_oob = doc_read_oob; 576 572 mtd->write_oob = doc_write_oob; 577 - mtd->sync = NULL; 578 - 579 - this->totlen = 0; 580 - this->numchips = 0; 581 - 582 573 this->curfloor = -1; 583 574 this->curchip = -1; 584 575 mutex_init(&this->lock);
-8
drivers/mtd/devices/doc2001.c
··· 343 343 344 344 mtd->type = MTD_NANDFLASH; 345 345 mtd->flags = MTD_CAP_NANDFLASH; 346 - mtd->size = 0; 347 346 348 347 /* FIXME: erase size is not always 8KiB */ 349 348 mtd->erasesize = 0x2000; 350 - 351 349 mtd->writesize = 512; 352 350 mtd->oobsize = 16; 353 351 mtd->owner = THIS_MODULE; 354 352 mtd->erase = doc_erase; 355 - mtd->point = NULL; 356 - mtd->unpoint = NULL; 357 353 mtd->read = doc_read; 358 354 mtd->write = doc_write; 359 355 mtd->read_oob = doc_read_oob; 360 356 mtd->write_oob = doc_write_oob; 361 - mtd->sync = NULL; 362 - 363 - this->totlen = 0; 364 - this->numchips = 0; 365 357 this->curfloor = -1; 366 358 this->curchip = -1; 367 359
-9
drivers/mtd/devices/doc2001plus.c
··· 467 467 468 468 mtd->type = MTD_NANDFLASH; 469 469 mtd->flags = MTD_CAP_NANDFLASH; 470 - mtd->size = 0; 471 - 472 - mtd->erasesize = 0; 473 470 mtd->writesize = 512; 474 471 mtd->oobsize = 16; 475 472 mtd->owner = THIS_MODULE; 476 473 mtd->erase = doc_erase; 477 - mtd->point = NULL; 478 - mtd->unpoint = NULL; 479 474 mtd->read = doc_read; 480 475 mtd->write = doc_write; 481 476 mtd->read_oob = doc_read_oob; 482 477 mtd->write_oob = doc_write_oob; 483 - mtd->sync = NULL; 484 - 485 - this->totlen = 0; 486 - this->numchips = 0; 487 478 this->curfloor = -1; 488 479 this->curchip = -1; 489 480
+1266 -255
drivers/mtd/devices/docg3.c
··· 29 29 #include <linux/delay.h> 30 30 #include <linux/mtd/mtd.h> 31 31 #include <linux/mtd/partitions.h> 32 + #include <linux/bitmap.h> 33 + #include <linux/bitrev.h> 34 + #include <linux/bch.h> 32 35 33 36 #include <linux/debugfs.h> 34 37 #include <linux/seq_file.h> ··· 44 41 * 45 42 * As no specification is available from M-Systems/Sandisk, this drivers lacks 46 43 * several functions available on the chip, as : 47 - * - block erase 48 - * - page write 49 44 * - IPL write 50 - * - ECC fixing (lack of BCH algorith understanding) 51 - * - powerdown / powerup 52 45 * 53 46 * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and 54 47 * the driver assumes a 16bits data bus. ··· 52 53 * DocG3 relies on 2 ECC algorithms, which are handled in hardware : 53 54 * - a 1 byte Hamming code stored in the OOB for each page 54 55 * - a 7 bytes BCH code stored in the OOB for each page 55 - * The BCH part is only used for check purpose, no correction is available as 56 - * some information is missing. What is known is that : 56 + * The BCH ECC is : 57 57 * - BCH is in GF(2^14) 58 58 * - BCH is over data of 520 bytes (512 page + 7 page_info bytes 59 59 * + 1 hamming byte) ··· 60 62 * - BCH syndroms are calculated in hardware, and checked in hardware as well 61 63 * 62 64 */ 65 + 66 + static unsigned int reliable_mode; 67 + module_param(reliable_mode, uint, 0); 68 + MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, " 69 + "2=reliable) : MLC normal operations are in normal mode"); 70 + 71 + /** 72 + * struct docg3_oobinfo - DiskOnChip G3 OOB layout 73 + * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC) 74 + * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC) 75 + * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15 76 + * @oobavail: 8 available bytes remaining after ECC toll 77 + */ 78 + static struct nand_ecclayout docg3_oobinfo = { 79 + .eccbytes = 8, 80 + .eccpos = {7, 8, 9, 10, 11, 12, 13, 14}, 81 + .oobfree = {{0, 7}, {15, 1} }, 82 + .oobavail = 8, 83 + }; 84 + 85 + /** 86 + * struct docg3_bch - BCH engine 87 + */ 88 + static struct bch_control *docg3_bch; 63 89 64 90 static inline u8 doc_readb(struct docg3 *docg3, u16 reg) 65 91 { ··· 104 82 static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) 105 83 { 106 84 writeb(val, docg3->base + reg); 107 - trace_docg3_io(1, 16, reg, val); 85 + trace_docg3_io(1, 8, reg, val); 108 86 } 109 87 110 88 static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) ··· 165 143 { 166 144 int i; 167 145 168 - doc_dbg("NOP x %d\n", nbNOPs); 146 + doc_vdbg("NOP x %d\n", nbNOPs); 169 147 for (i = 0; i < nbNOPs; i++) 170 148 doc_writeb(docg3, 0, DOC_NOP); 171 149 } ··· 218 196 /** 219 197 * doc_read_data_area - Read data from data area 220 198 * @docg3: the device 221 - * @buf: the buffer to fill in 222 - * @len: the lenght to read 199 + * @buf: the buffer to fill in (might be NULL is dummy reads) 200 + * @len: the length to read 223 201 * @first: first time read, DOC_READADDRESS should be set 224 202 * 225 203 * Reads bytes from flash data. Handles the single byte / even bytes reads. ··· 240 218 dst16 = buf; 241 219 for (i = 0; i < len4; i += 2) { 242 220 data16 = doc_readw(docg3, DOC_IOSPACE_DATA); 243 - *dst16 = data16; 244 - dst16++; 221 + if (dst16) { 222 + *dst16 = data16; 223 + dst16++; 224 + } 245 225 } 246 226 247 227 if (cdr) { ··· 253 229 dst8 = (u8 *)dst16; 254 230 for (i = 0; i < cdr; i++) { 255 231 data8 = doc_readb(docg3, DOC_IOSPACE_DATA); 256 - *dst8 = data8; 257 - dst8++; 232 + if (dst8) { 233 + *dst8 = data8; 234 + dst8++; 235 + } 258 236 } 259 237 } 260 238 } 261 239 262 240 /** 263 - * doc_set_data_mode - Sets the flash to reliable data mode 241 + * doc_write_data_area - Write data into data area 242 + * @docg3: the device 243 + * @buf: the buffer to get input bytes from 244 + * @len: the length to write 245 + * 246 + * Writes bytes into flash data. Handles the single byte / even bytes writes. 247 + */ 248 + static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len) 249 + { 250 + int i, cdr, len4; 251 + u16 *src16; 252 + u8 *src8; 253 + 254 + doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len); 255 + cdr = len & 0x3; 256 + len4 = len - cdr; 257 + 258 + doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS); 259 + src16 = (u16 *)buf; 260 + for (i = 0; i < len4; i += 2) { 261 + doc_writew(docg3, *src16, DOC_IOSPACE_DATA); 262 + src16++; 263 + } 264 + 265 + src8 = (u8 *)src16; 266 + for (i = 0; i < cdr; i++) { 267 + doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE, 268 + DOC_READADDRESS); 269 + doc_writeb(docg3, *src8, DOC_IOSPACE_DATA); 270 + src8++; 271 + } 272 + } 273 + 274 + /** 275 + * doc_set_data_mode - Sets the flash to normal or reliable data mode 264 276 * @docg3: the device 265 277 * 266 278 * The reliable data mode is a bit slower than the fast mode, but less errors 267 279 * occur. Entering the reliable mode cannot be done without entering the fast 268 280 * mode first. 281 + * 282 + * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks 283 + * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading 284 + * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same 285 + * result, which is a logical and between bytes from page 0 and page 1 (which is 286 + * consistent with the fact that writing to a page is _clearing_ bits of that 287 + * page). 269 288 */ 270 289 static void doc_set_reliable_mode(struct docg3 *docg3) 271 290 { 272 - doc_dbg("doc_set_reliable_mode()\n"); 273 - doc_flash_sequence(docg3, DOC_SEQ_SET_MODE); 274 - doc_flash_command(docg3, DOC_CMD_FAST_MODE); 275 - doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE); 291 + static char *strmode[] = { "normal", "fast", "reliable", "invalid" }; 292 + 293 + doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]); 294 + switch (docg3->reliable) { 295 + case 0: 296 + break; 297 + case 1: 298 + doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE); 299 + doc_flash_command(docg3, DOC_CMD_FAST_MODE); 300 + break; 301 + case 2: 302 + doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE); 303 + doc_flash_command(docg3, DOC_CMD_FAST_MODE); 304 + doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE); 305 + break; 306 + default: 307 + doc_err("doc_set_reliable_mode(): invalid mode\n"); 308 + break; 309 + } 276 310 doc_delay(docg3, 2); 277 311 } 278 312 ··· 407 325 } 408 326 409 327 /** 328 + * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane 329 + * @docg3: the device 330 + * @sector: the sector 331 + */ 332 + static void doc_setup_addr_sector(struct docg3 *docg3, int sector) 333 + { 334 + doc_delay(docg3, 1); 335 + doc_flash_address(docg3, sector & 0xff); 336 + doc_flash_address(docg3, (sector >> 8) & 0xff); 337 + doc_flash_address(docg3, (sector >> 16) & 0xff); 338 + doc_delay(docg3, 1); 339 + } 340 + 341 + /** 342 + * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane 343 + * @docg3: the device 344 + * @sector: the sector 345 + * @ofs: the offset in the page, between 0 and (512 + 16 + 512) 346 + */ 347 + static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) 348 + { 349 + ofs = ofs >> 2; 350 + doc_delay(docg3, 1); 351 + doc_flash_address(docg3, ofs & 0xff); 352 + doc_flash_address(docg3, sector & 0xff); 353 + doc_flash_address(docg3, (sector >> 8) & 0xff); 354 + doc_flash_address(docg3, (sector >> 16) & 0xff); 355 + doc_delay(docg3, 1); 356 + } 357 + 358 + /** 410 359 * doc_seek - Set both flash planes to the specified block, page for reading 411 360 * @docg3: the device 412 361 * @block0: the first plane block index ··· 473 360 if (ret) 474 361 goto out; 475 362 476 - sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 477 363 doc_flash_sequence(docg3, DOC_SEQ_READ); 364 + sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 478 365 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); 479 - doc_delay(docg3, 1); 480 - doc_flash_address(docg3, sector & 0xff); 481 - doc_flash_address(docg3, (sector >> 8) & 0xff); 482 - doc_flash_address(docg3, (sector >> 16) & 0xff); 483 - doc_delay(docg3, 1); 366 + doc_setup_addr_sector(docg3, sector); 484 367 485 368 sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 486 369 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); 370 + doc_setup_addr_sector(docg3, sector); 487 371 doc_delay(docg3, 1); 488 - doc_flash_address(docg3, sector & 0xff); 489 - doc_flash_address(docg3, (sector >> 8) & 0xff); 490 - doc_flash_address(docg3, (sector >> 16) & 0xff); 491 - doc_delay(docg3, 2); 492 372 493 373 out: 494 374 return ret; 495 375 } 376 + 377 + /** 378 + * doc_write_seek - Set both flash planes to the specified block, page for writing 379 + * @docg3: the device 380 + * @block0: the first plane block index 381 + * @block1: the second plane block index 382 + * @page: the page index within the block 383 + * @ofs: offset in page to write 384 + * 385 + * Programs the flash even and odd planes to the specific block and page. 386 + * Alternatively, programs the flash to the wear area of the specified page. 387 + */ 388 + static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page, 389 + int ofs) 390 + { 391 + int ret = 0, sector; 392 + 393 + doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n", 394 + block0, block1, page, ofs); 395 + 396 + doc_set_reliable_mode(docg3); 397 + 398 + if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) { 399 + doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1); 400 + doc_flash_command(docg3, DOC_CMD_READ_PLANE1); 401 + doc_delay(docg3, 2); 402 + } else { 403 + doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2); 404 + doc_flash_command(docg3, DOC_CMD_READ_PLANE2); 405 + doc_delay(docg3, 2); 406 + } 407 + 408 + doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP); 409 + doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1); 410 + 411 + sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 412 + doc_setup_writeaddr_sector(docg3, sector, ofs); 413 + 414 + doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3); 415 + doc_delay(docg3, 2); 416 + ret = doc_wait_ready(docg3); 417 + if (ret) 418 + goto out; 419 + 420 + doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1); 421 + sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 422 + doc_setup_writeaddr_sector(docg3, sector, ofs); 423 + doc_delay(docg3, 1); 424 + 425 + out: 426 + return ret; 427 + } 428 + 496 429 497 430 /** 498 431 * doc_read_page_ecc_init - Initialize hardware ECC engine ··· 546 387 * @len: the number of bytes covered by the ECC (BCH covered) 547 388 * 548 389 * The function does initialize the hardware ECC engine to compute the Hamming 549 - * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes). 390 + * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes). 550 391 * 551 392 * Return 0 if succeeded, -EIO on error 552 393 */ ··· 560 401 doc_register_readb(docg3, DOC_FLASHCONTROL); 561 402 return doc_wait_ready(docg3); 562 403 } 404 + 405 + /** 406 + * doc_write_page_ecc_init - Initialize hardware BCH ECC engine 407 + * @docg3: the device 408 + * @len: the number of bytes covered by the ECC (BCH covered) 409 + * 410 + * The function does initialize the hardware ECC engine to compute the Hamming 411 + * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes). 412 + * 413 + * Return 0 if succeeded, -EIO on error 414 + */ 415 + static int doc_write_page_ecc_init(struct docg3 *docg3, int len) 416 + { 417 + doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE 418 + | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE 419 + | (len & DOC_ECCCONF0_DATA_BYTES_MASK), 420 + DOC_ECCCONF0); 421 + doc_delay(docg3, 4); 422 + doc_register_readb(docg3, DOC_FLASHCONTROL); 423 + return doc_wait_ready(docg3); 424 + } 425 + 426 + /** 427 + * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator 428 + * @docg3: the device 429 + * 430 + * Disables the hardware ECC generator and checker, for unchecked reads (as when 431 + * reading OOB only or write status byte). 432 + */ 433 + static void doc_ecc_disable(struct docg3 *docg3) 434 + { 435 + doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0); 436 + doc_delay(docg3, 4); 437 + } 438 + 439 + /** 440 + * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine 441 + * @docg3: the device 442 + * @nb_bytes: the number of bytes covered by the ECC (Hamming covered) 443 + * 444 + * This function programs the ECC hardware to compute the hamming code on the 445 + * last provided N bytes to the hardware generator. 446 + */ 447 + static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes) 448 + { 449 + u8 ecc_conf1; 450 + 451 + ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1); 452 + ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK; 453 + ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK); 454 + doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1); 455 + } 456 + 457 + /** 458 + * doc_ecc_bch_fix_data - Fix if need be read data from flash 459 + * @docg3: the device 460 + * @buf: the buffer of read data (512 + 7 + 1 bytes) 461 + * @hwecc: the hardware calculated ECC. 462 + * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB 463 + * area data, and calc_ecc the ECC calculated by the hardware generator. 464 + * 465 + * Checks if the received data matches the ECC, and if an error is detected, 466 + * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3 467 + * understands the (data, ecc, syndroms) in an inverted order in comparison to 468 + * the BCH library, the function reverses the order of bits (ie. bit7 and bit0, 469 + * bit6 and bit 1, ...) for all ECC data. 470 + * 471 + * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch 472 + * algorithm is used to decode this. However the hw operates on page 473 + * data in a bit order that is the reverse of that of the bch alg, 474 + * requiring that the bits be reversed on the result. Thanks to Ivan 475 + * Djelic for his analysis. 476 + * 477 + * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit 478 + * errors were detected and cannot be fixed. 479 + */ 480 + static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc) 481 + { 482 + u8 ecc[DOC_ECC_BCH_SIZE]; 483 + int errorpos[DOC_ECC_BCH_T], i, numerrs; 484 + 485 + for (i = 0; i < DOC_ECC_BCH_SIZE; i++) 486 + ecc[i] = bitrev8(hwecc[i]); 487 + numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES, 488 + NULL, ecc, NULL, errorpos); 489 + BUG_ON(numerrs == -EINVAL); 490 + if (numerrs < 0) 491 + goto out; 492 + 493 + for (i = 0; i < numerrs; i++) 494 + errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7)); 495 + for (i = 0; i < numerrs; i++) 496 + if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8) 497 + /* error is located in data, correct it */ 498 + change_bit(errorpos[i], buf); 499 + out: 500 + doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs); 501 + return numerrs; 502 + } 503 + 563 504 564 505 /** 565 506 * doc_read_page_prepare - Prepares reading data from a flash page ··· 747 488 } 748 489 749 490 /** 750 - * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms 491 + * doc_write_page_putbytes - Writes bytes into a prepared page 751 492 * @docg3: the device 752 - * @syns: the array of 7 integers where the syndroms will be stored 493 + * @len: the number of bytes to be written 494 + * @buf: the buffer of input bytes 495 + * 753 496 */ 754 - static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns) 497 + static void doc_write_page_putbytes(struct docg3 *docg3, int len, 498 + const u_char *buf) 499 + { 500 + doc_write_data_area(docg3, buf, len); 501 + doc_delay(docg3, 2); 502 + } 503 + 504 + /** 505 + * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC 506 + * @docg3: the device 507 + * @hwecc: the array of 7 integers where the hardware ecc will be stored 508 + */ 509 + static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc) 755 510 { 756 511 int i; 757 512 758 513 for (i = 0; i < DOC_ECC_BCH_SIZE; i++) 759 - syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i)); 514 + hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i)); 515 + } 516 + 517 + /** 518 + * doc_page_finish - Ends reading/writing of a flash page 519 + * @docg3: the device 520 + */ 521 + static void doc_page_finish(struct docg3 *docg3) 522 + { 523 + doc_writeb(docg3, 0, DOC_DATAEND); 524 + doc_delay(docg3, 2); 760 525 } 761 526 762 527 /** ··· 793 510 */ 794 511 static void doc_read_page_finish(struct docg3 *docg3) 795 512 { 796 - doc_writeb(docg3, 0, DOC_DATAEND); 797 - doc_delay(docg3, 2); 513 + doc_page_finish(docg3); 798 514 doc_set_device_id(docg3, 0); 799 515 } 800 516 ··· 805 523 * @block1: second plane block index calculated 806 524 * @page: page calculated 807 525 * @ofs: offset in page 526 + * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in 527 + * reliable mode. 528 + * 529 + * The calculation is based on the reliable/normal mode. In normal mode, the 64 530 + * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are 531 + * clones, only 32 pages per block are available. 808 532 */ 809 533 static void calc_block_sector(loff_t from, int *block0, int *block1, int *page, 810 - int *ofs) 534 + int *ofs, int reliable) 811 535 { 812 - uint sector; 536 + uint sector, pages_biblock; 537 + 538 + pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES; 539 + if (reliable == 1 || reliable == 2) 540 + pages_biblock /= 2; 813 541 814 542 sector = from / DOC_LAYOUT_PAGE_SIZE; 815 - *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES) 816 - * DOC_LAYOUT_NBPLANES; 543 + *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES; 817 544 *block1 = *block0 + 1; 818 - *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES); 545 + *page = sector % pages_biblock; 819 546 *page /= DOC_LAYOUT_NBPLANES; 547 + if (reliable == 1 || reliable == 2) 548 + *page *= 2; 820 549 if (sector % 2) 821 550 *ofs = DOC_LAYOUT_PAGE_OOB_SIZE; 822 551 else 823 552 *ofs = 0; 553 + } 554 + 555 + /** 556 + * doc_read_oob - Read out of band bytes from flash 557 + * @mtd: the device 558 + * @from: the offset from first block and first page, in bytes, aligned on page 559 + * size 560 + * @ops: the mtd oob structure 561 + * 562 + * Reads flash memory OOB area of pages. 563 + * 564 + * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured 565 + */ 566 + static int doc_read_oob(struct mtd_info *mtd, loff_t from, 567 + struct mtd_oob_ops *ops) 568 + { 569 + struct docg3 *docg3 = mtd->priv; 570 + int block0, block1, page, ret, ofs = 0; 571 + u8 *oobbuf = ops->oobbuf; 572 + u8 *buf = ops->datbuf; 573 + size_t len, ooblen, nbdata, nboob; 574 + u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1; 575 + 576 + if (buf) 577 + len = ops->len; 578 + else 579 + len = 0; 580 + if (oobbuf) 581 + ooblen = ops->ooblen; 582 + else 583 + ooblen = 0; 584 + 585 + if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB) 586 + oobbuf += ops->ooboffs; 587 + 588 + doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", 589 + from, ops->mode, buf, len, oobbuf, ooblen); 590 + if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) || 591 + (from % DOC_LAYOUT_PAGE_SIZE)) 592 + return -EINVAL; 593 + 594 + ret = -EINVAL; 595 + calc_block_sector(from + len, &block0, &block1, &page, &ofs, 596 + docg3->reliable); 597 + if (block1 > docg3->max_block) 598 + goto err; 599 + 600 + ops->oobretlen = 0; 601 + ops->retlen = 0; 602 + ret = 0; 603 + while (!ret && (len > 0 || ooblen > 0)) { 604 + calc_block_sector(from, &block0, &block1, &page, &ofs, 605 + docg3->reliable); 606 + nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 607 + nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE); 608 + ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); 609 + if (ret < 0) 610 + goto err; 611 + ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); 612 + if (ret < 0) 613 + goto err_in_read; 614 + ret = doc_read_page_getbytes(docg3, nbdata, buf, 1); 615 + if (ret < nbdata) 616 + goto err_in_read; 617 + doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata, 618 + NULL, 0); 619 + ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); 620 + if (ret < nboob) 621 + goto err_in_read; 622 + doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob, 623 + NULL, 0); 624 + 625 + doc_get_bch_hw_ecc(docg3, hwecc); 626 + eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); 627 + 628 + if (nboob >= DOC_LAYOUT_OOB_SIZE) { 629 + doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 630 + oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3], 631 + oobbuf[4], oobbuf[5], oobbuf[6]); 632 + doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]); 633 + doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 634 + oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11], 635 + oobbuf[12], oobbuf[13], oobbuf[14]); 636 + doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]); 637 + } 638 + doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); 639 + doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 640 + hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4], 641 + hwecc[5], hwecc[6]); 642 + 643 + ret = -EIO; 644 + if (is_prot_seq_error(docg3)) 645 + goto err_in_read; 646 + ret = 0; 647 + if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) && 648 + (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) && 649 + (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) && 650 + (ops->mode != MTD_OPS_RAW) && 651 + (nbdata == DOC_LAYOUT_PAGE_SIZE)) { 652 + ret = doc_ecc_bch_fix_data(docg3, buf, hwecc); 653 + if (ret < 0) { 654 + mtd->ecc_stats.failed++; 655 + ret = -EBADMSG; 656 + } 657 + if (ret > 0) { 658 + mtd->ecc_stats.corrected += ret; 659 + ret = -EUCLEAN; 660 + } 661 + } 662 + 663 + doc_read_page_finish(docg3); 664 + ops->retlen += nbdata; 665 + ops->oobretlen += nboob; 666 + buf += nbdata; 667 + oobbuf += nboob; 668 + len -= nbdata; 669 + ooblen -= nboob; 670 + from += DOC_LAYOUT_PAGE_SIZE; 671 + } 672 + 673 + return ret; 674 + err_in_read: 675 + doc_read_page_finish(docg3); 676 + err: 677 + return ret; 824 678 } 825 679 826 680 /** ··· 976 558 static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, 977 559 size_t *retlen, u_char *buf) 978 560 { 979 - struct docg3 *docg3 = mtd->priv; 980 - int block0, block1, page, readlen, ret, ofs = 0; 981 - int syn[DOC_ECC_BCH_SIZE], eccconf1; 982 - u8 oob[DOC_LAYOUT_OOB_SIZE]; 561 + struct mtd_oob_ops ops; 562 + size_t ret; 983 563 984 - ret = -EINVAL; 985 - doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf); 986 - if (from % DOC_LAYOUT_PAGE_SIZE) 987 - goto err; 988 - if (len % 4) 989 - goto err; 990 - calc_block_sector(from, &block0, &block1, &page, &ofs); 991 - if (block1 > docg3->max_block) 992 - goto err; 564 + memset(&ops, 0, sizeof(ops)); 565 + ops.datbuf = buf; 566 + ops.len = len; 567 + ops.mode = MTD_OPS_AUTO_OOB; 993 568 994 - *retlen = 0; 995 - ret = 0; 996 - readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 997 - while (!ret && len > 0) { 998 - readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 999 - ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); 1000 - if (ret < 0) 1001 - goto err; 1002 - ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES); 1003 - if (ret < 0) 1004 - goto err_in_read; 1005 - ret = doc_read_page_getbytes(docg3, readlen, buf, 1); 1006 - if (ret < readlen) 1007 - goto err_in_read; 1008 - ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE, 1009 - oob, 0); 1010 - if (ret < DOC_LAYOUT_OOB_SIZE) 1011 - goto err_in_read; 1012 - 1013 - *retlen += readlen; 1014 - buf += readlen; 1015 - len -= readlen; 1016 - 1017 - ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE; 1018 - if (ofs == 0) 1019 - page += 2; 1020 - if (page > DOC_ADDR_PAGE_MASK) { 1021 - page = 0; 1022 - block0 += 2; 1023 - block1 += 2; 1024 - } 1025 - 1026 - /* 1027 - * There should be a BCH bitstream fixing algorithm here ... 1028 - * By now, a page read failure is triggered by BCH error 1029 - */ 1030 - doc_get_hw_bch_syndroms(docg3, syn); 1031 - eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); 1032 - 1033 - doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 1034 - oob[0], oob[1], oob[2], oob[3], oob[4], 1035 - oob[5], oob[6]); 1036 - doc_dbg("OOB - HAMMING: %02x\n", oob[7]); 1037 - doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 1038 - oob[8], oob[9], oob[10], oob[11], oob[12], 1039 - oob[13], oob[14]); 1040 - doc_dbg("OOB - UNUSED: %02x\n", oob[15]); 1041 - doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); 1042 - doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 1043 - syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]); 1044 - 1045 - ret = -EBADMSG; 1046 - if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) { 1047 - if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) 1048 - goto err_in_read; 1049 - if (is_prot_seq_error(docg3)) 1050 - goto err_in_read; 1051 - } 1052 - doc_read_page_finish(docg3); 1053 - } 1054 - 1055 - return 0; 1056 - err_in_read: 1057 - doc_read_page_finish(docg3); 1058 - err: 569 + ret = doc_read_oob(mtd, from, &ops); 570 + *retlen = ops.retlen; 1059 571 return ret; 1060 - } 1061 - 1062 - /** 1063 - * doc_read_oob - Read out of band bytes from flash 1064 - * @mtd: the device 1065 - * @from: the offset from first block and first page, in bytes, aligned on page 1066 - * size 1067 - * @ops: the mtd oob structure 1068 - * 1069 - * Reads flash memory OOB area of pages. 1070 - * 1071 - * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured 1072 - */ 1073 - static int doc_read_oob(struct mtd_info *mtd, loff_t from, 1074 - struct mtd_oob_ops *ops) 1075 - { 1076 - struct docg3 *docg3 = mtd->priv; 1077 - int block0, block1, page, ofs, ret; 1078 - u8 *buf = ops->oobbuf; 1079 - size_t len = ops->ooblen; 1080 - 1081 - doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len); 1082 - if (len != DOC_LAYOUT_OOB_SIZE) 1083 - return -EINVAL; 1084 - 1085 - switch (ops->mode) { 1086 - case MTD_OPS_PLACE_OOB: 1087 - buf += ops->ooboffs; 1088 - break; 1089 - default: 1090 - break; 1091 - } 1092 - 1093 - calc_block_sector(from, &block0, &block1, &page, &ofs); 1094 - if (block1 > docg3->max_block) 1095 - return -EINVAL; 1096 - 1097 - ret = doc_read_page_prepare(docg3, block0, block1, page, 1098 - ofs + DOC_LAYOUT_PAGE_SIZE); 1099 - if (!ret) 1100 - ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE); 1101 - if (!ret) 1102 - ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE, 1103 - buf, 1); 1104 - doc_read_page_finish(docg3); 1105 - 1106 - if (ret > 0) 1107 - ops->oobretlen = ret; 1108 - else 1109 - ops->oobretlen = 0; 1110 - return (ret > 0) ? 0 : ret; 1111 572 } 1112 573 1113 574 static int doc_reload_bbt(struct docg3 *docg3) ··· 1023 726 struct docg3 *docg3 = mtd->priv; 1024 727 int block0, block1, page, ofs, is_good; 1025 728 1026 - calc_block_sector(from, &block0, &block1, &page, &ofs); 729 + calc_block_sector(from, &block0, &block1, &page, &ofs, 730 + docg3->reliable); 1027 731 doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n", 1028 732 from, block0, block1, page, ofs); 1029 733 ··· 1037 739 return !is_good; 1038 740 } 1039 741 742 + #if 0 1040 743 /** 1041 744 * doc_get_erase_count - Get block erase count 1042 745 * @docg3: the device ··· 1057 758 doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf); 1058 759 if (from % DOC_LAYOUT_PAGE_SIZE) 1059 760 return -EINVAL; 1060 - calc_block_sector(from, &block0, &block1, &page, &ofs); 761 + calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable); 1061 762 if (block1 > docg3->max_block) 1062 763 return -EINVAL; 1063 764 ··· 1078 779 | ((u8)(~buf[7]) << 16); 1079 780 1080 781 return max(plane1_erase_count, plane2_erase_count); 782 + } 783 + #endif 784 + 785 + /** 786 + * doc_get_op_status - get erase/write operation status 787 + * @docg3: the device 788 + * 789 + * Queries the status from the chip, and returns it 790 + * 791 + * Returns the status (bits DOC_PLANES_STATUS_*) 792 + */ 793 + static int doc_get_op_status(struct docg3 *docg3) 794 + { 795 + u8 status; 796 + 797 + doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS); 798 + doc_flash_command(docg3, DOC_CMD_PLANES_STATUS); 799 + doc_delay(docg3, 5); 800 + 801 + doc_ecc_disable(docg3); 802 + doc_read_data_area(docg3, &status, 1, 1); 803 + return status; 804 + } 805 + 806 + /** 807 + * doc_write_erase_wait_status - wait for write or erase completion 808 + * @docg3: the device 809 + * 810 + * Wait for the chip to be ready again after erase or write operation, and check 811 + * erase/write status. 812 + * 813 + * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if 814 + * timeout 815 + */ 816 + static int doc_write_erase_wait_status(struct docg3 *docg3) 817 + { 818 + int status, ret = 0; 819 + 820 + if (!doc_is_ready(docg3)) 821 + usleep_range(3000, 3000); 822 + if (!doc_is_ready(docg3)) { 823 + doc_dbg("Timeout reached and the chip is still not ready\n"); 824 + ret = -EAGAIN; 825 + goto out; 826 + } 827 + 828 + status = doc_get_op_status(docg3); 829 + if (status & DOC_PLANES_STATUS_FAIL) { 830 + doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n", 831 + status); 832 + ret = -EIO; 833 + } 834 + 835 + out: 836 + doc_page_finish(docg3); 837 + return ret; 838 + } 839 + 840 + /** 841 + * doc_erase_block - Erase a couple of blocks 842 + * @docg3: the device 843 + * @block0: the first block to erase (leftmost plane) 844 + * @block1: the second block to erase (rightmost plane) 845 + * 846 + * Erase both blocks, and return operation status 847 + * 848 + * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not 849 + * ready for too long 850 + */ 851 + static int doc_erase_block(struct docg3 *docg3, int block0, int block1) 852 + { 853 + int ret, sector; 854 + 855 + doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1); 856 + ret = doc_reset_seq(docg3); 857 + if (ret) 858 + return -EIO; 859 + 860 + doc_set_reliable_mode(docg3); 861 + doc_flash_sequence(docg3, DOC_SEQ_ERASE); 862 + 863 + sector = block0 << DOC_ADDR_BLOCK_SHIFT; 864 + doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); 865 + doc_setup_addr_sector(docg3, sector); 866 + sector = block1 << DOC_ADDR_BLOCK_SHIFT; 867 + doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); 868 + doc_setup_addr_sector(docg3, sector); 869 + doc_delay(docg3, 1); 870 + 871 + doc_flash_command(docg3, DOC_CMD_ERASECYCLE2); 872 + doc_delay(docg3, 2); 873 + 874 + if (is_prot_seq_error(docg3)) { 875 + doc_err("Erase blocks %d,%d error\n", block0, block1); 876 + return -EIO; 877 + } 878 + 879 + return doc_write_erase_wait_status(docg3); 880 + } 881 + 882 + /** 883 + * doc_erase - Erase a portion of the chip 884 + * @mtd: the device 885 + * @info: the erase info 886 + * 887 + * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is 888 + * split into 2 pages of 512 bytes on 2 contiguous blocks. 889 + * 890 + * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase 891 + * issue 892 + */ 893 + static int doc_erase(struct mtd_info *mtd, struct erase_info *info) 894 + { 895 + struct docg3 *docg3 = mtd->priv; 896 + uint64_t len; 897 + int block0, block1, page, ret, ofs = 0; 898 + 899 + doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len); 900 + doc_set_device_id(docg3, docg3->device_id); 901 + 902 + info->state = MTD_ERASE_PENDING; 903 + calc_block_sector(info->addr + info->len, &block0, &block1, &page, 904 + &ofs, docg3->reliable); 905 + ret = -EINVAL; 906 + if (block1 > docg3->max_block || page || ofs) 907 + goto reset_err; 908 + 909 + ret = 0; 910 + calc_block_sector(info->addr, &block0, &block1, &page, &ofs, 911 + docg3->reliable); 912 + doc_set_reliable_mode(docg3); 913 + for (len = info->len; !ret && len > 0; len -= mtd->erasesize) { 914 + info->state = MTD_ERASING; 915 + ret = doc_erase_block(docg3, block0, block1); 916 + block0 += 2; 917 + block1 += 2; 918 + } 919 + 920 + if (ret) 921 + goto reset_err; 922 + 923 + info->state = MTD_ERASE_DONE; 924 + return 0; 925 + 926 + reset_err: 927 + info->state = MTD_ERASE_FAILED; 928 + return ret; 929 + } 930 + 931 + /** 932 + * doc_write_page - Write a single page to the chip 933 + * @docg3: the device 934 + * @to: the offset from first block and first page, in bytes, aligned on page 935 + * size 936 + * @buf: buffer to get bytes from 937 + * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be 938 + * written) 939 + * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or 940 + * BCH computations. If 1, only bytes 0-7 and byte 15 are taken, 941 + * remaining ones are filled with hardware Hamming and BCH 942 + * computations. Its value is not meaningfull is oob == NULL. 943 + * 944 + * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the 945 + * OOB data. The OOB ECC is automatically computed by the hardware Hamming and 946 + * BCH generator if autoecc is not null. 947 + * 948 + * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout 949 + */ 950 + static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf, 951 + const u_char *oob, int autoecc) 952 + { 953 + int block0, block1, page, ret, ofs = 0; 954 + u8 hwecc[DOC_ECC_BCH_SIZE], hamming; 955 + 956 + doc_dbg("doc_write_page(to=%lld)\n", to); 957 + calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable); 958 + 959 + doc_set_device_id(docg3, docg3->device_id); 960 + ret = doc_reset_seq(docg3); 961 + if (ret) 962 + goto err; 963 + 964 + /* Program the flash address block and page */ 965 + ret = doc_write_seek(docg3, block0, block1, page, ofs); 966 + if (ret) 967 + goto err; 968 + 969 + doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); 970 + doc_delay(docg3, 2); 971 + doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf); 972 + 973 + if (oob && autoecc) { 974 + doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob); 975 + doc_delay(docg3, 2); 976 + oob += DOC_LAYOUT_OOB_UNUSED_OFS; 977 + 978 + hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY); 979 + doc_delay(docg3, 2); 980 + doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ, 981 + &hamming); 982 + doc_delay(docg3, 2); 983 + 984 + doc_get_bch_hw_ecc(docg3, hwecc); 985 + doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc); 986 + doc_delay(docg3, 2); 987 + 988 + doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob); 989 + } 990 + if (oob && !autoecc) 991 + doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob); 992 + 993 + doc_delay(docg3, 2); 994 + doc_page_finish(docg3); 995 + doc_delay(docg3, 2); 996 + doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2); 997 + doc_delay(docg3, 2); 998 + 999 + /* 1000 + * The wait status will perform another doc_page_finish() call, but that 1001 + * seems to please the docg3, so leave it. 1002 + */ 1003 + ret = doc_write_erase_wait_status(docg3); 1004 + return ret; 1005 + err: 1006 + doc_read_page_finish(docg3); 1007 + return ret; 1008 + } 1009 + 1010 + /** 1011 + * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops 1012 + * @ops: the oob operations 1013 + * 1014 + * Returns 0 or 1 if success, -EINVAL if invalid oob mode 1015 + */ 1016 + static int doc_guess_autoecc(struct mtd_oob_ops *ops) 1017 + { 1018 + int autoecc; 1019 + 1020 + switch (ops->mode) { 1021 + case MTD_OPS_PLACE_OOB: 1022 + case MTD_OPS_AUTO_OOB: 1023 + autoecc = 1; 1024 + break; 1025 + case MTD_OPS_RAW: 1026 + autoecc = 0; 1027 + break; 1028 + default: 1029 + autoecc = -EINVAL; 1030 + } 1031 + return autoecc; 1032 + } 1033 + 1034 + /** 1035 + * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes 1036 + * @dst: the target 16 bytes OOB buffer 1037 + * @oobsrc: the source 8 bytes non-ECC OOB buffer 1038 + * 1039 + */ 1040 + static void doc_fill_autooob(u8 *dst, u8 *oobsrc) 1041 + { 1042 + memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ); 1043 + dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ]; 1044 + } 1045 + 1046 + /** 1047 + * doc_backup_oob - Backup OOB into docg3 structure 1048 + * @docg3: the device 1049 + * @to: the page offset in the chip 1050 + * @ops: the OOB size and buffer 1051 + * 1052 + * As the docg3 should write a page with its OOB in one pass, and some userland 1053 + * applications do write_oob() to setup the OOB and then write(), store the OOB 1054 + * into a temporary storage. This is very dangerous, as 2 concurrent 1055 + * applications could store an OOB, and then write their pages (which will 1056 + * result into one having its OOB corrupted). 1057 + * 1058 + * The only reliable way would be for userland to call doc_write_oob() with both 1059 + * the page data _and_ the OOB area. 1060 + * 1061 + * Returns 0 if success, -EINVAL if ops content invalid 1062 + */ 1063 + static int doc_backup_oob(struct docg3 *docg3, loff_t to, 1064 + struct mtd_oob_ops *ops) 1065 + { 1066 + int ooblen = ops->ooblen, autoecc; 1067 + 1068 + if (ooblen != DOC_LAYOUT_OOB_SIZE) 1069 + return -EINVAL; 1070 + autoecc = doc_guess_autoecc(ops); 1071 + if (autoecc < 0) 1072 + return autoecc; 1073 + 1074 + docg3->oob_write_ofs = to; 1075 + docg3->oob_autoecc = autoecc; 1076 + if (ops->mode == MTD_OPS_AUTO_OOB) { 1077 + doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf); 1078 + ops->oobretlen = 8; 1079 + } else { 1080 + memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE); 1081 + ops->oobretlen = DOC_LAYOUT_OOB_SIZE; 1082 + } 1083 + return 0; 1084 + } 1085 + 1086 + /** 1087 + * doc_write_oob - Write out of band bytes to flash 1088 + * @mtd: the device 1089 + * @ofs: the offset from first block and first page, in bytes, aligned on page 1090 + * size 1091 + * @ops: the mtd oob structure 1092 + * 1093 + * Either write OOB data into a temporary buffer, for the subsequent write 1094 + * page. The provided OOB should be 16 bytes long. If a data buffer is provided 1095 + * as well, issue the page write. 1096 + * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will 1097 + * still be filled in if asked for). 1098 + * 1099 + * Returns 0 is successfull, EINVAL if length is not 14 bytes 1100 + */ 1101 + static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, 1102 + struct mtd_oob_ops *ops) 1103 + { 1104 + struct docg3 *docg3 = mtd->priv; 1105 + int block0, block1, page, ret, pofs = 0, autoecc, oobdelta; 1106 + u8 *oobbuf = ops->oobbuf; 1107 + u8 *buf = ops->datbuf; 1108 + size_t len, ooblen; 1109 + u8 oob[DOC_LAYOUT_OOB_SIZE]; 1110 + 1111 + if (buf) 1112 + len = ops->len; 1113 + else 1114 + len = 0; 1115 + if (oobbuf) 1116 + ooblen = ops->ooblen; 1117 + else 1118 + ooblen = 0; 1119 + 1120 + if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB) 1121 + oobbuf += ops->ooboffs; 1122 + 1123 + doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", 1124 + ofs, ops->mode, buf, len, oobbuf, ooblen); 1125 + switch (ops->mode) { 1126 + case MTD_OPS_PLACE_OOB: 1127 + case MTD_OPS_RAW: 1128 + oobdelta = mtd->oobsize; 1129 + break; 1130 + case MTD_OPS_AUTO_OOB: 1131 + oobdelta = mtd->ecclayout->oobavail; 1132 + break; 1133 + default: 1134 + oobdelta = 0; 1135 + } 1136 + if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) || 1137 + (ofs % DOC_LAYOUT_PAGE_SIZE)) 1138 + return -EINVAL; 1139 + if (len && ooblen && 1140 + (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta)) 1141 + return -EINVAL; 1142 + 1143 + ret = -EINVAL; 1144 + calc_block_sector(ofs + len, &block0, &block1, &page, &pofs, 1145 + docg3->reliable); 1146 + if (block1 > docg3->max_block) 1147 + goto err; 1148 + 1149 + ops->oobretlen = 0; 1150 + ops->retlen = 0; 1151 + ret = 0; 1152 + if (len == 0 && ooblen == 0) 1153 + return -EINVAL; 1154 + if (len == 0 && ooblen > 0) 1155 + return doc_backup_oob(docg3, ofs, ops); 1156 + 1157 + autoecc = doc_guess_autoecc(ops); 1158 + if (autoecc < 0) 1159 + return autoecc; 1160 + 1161 + while (!ret && len > 0) { 1162 + memset(oob, 0, sizeof(oob)); 1163 + if (ofs == docg3->oob_write_ofs) 1164 + memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE); 1165 + else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB) 1166 + doc_fill_autooob(oob, oobbuf); 1167 + else if (ooblen > 0) 1168 + memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE); 1169 + ret = doc_write_page(docg3, ofs, buf, oob, autoecc); 1170 + 1171 + ofs += DOC_LAYOUT_PAGE_SIZE; 1172 + len -= DOC_LAYOUT_PAGE_SIZE; 1173 + buf += DOC_LAYOUT_PAGE_SIZE; 1174 + if (ooblen) { 1175 + oobbuf += oobdelta; 1176 + ooblen -= oobdelta; 1177 + ops->oobretlen += oobdelta; 1178 + } 1179 + ops->retlen += DOC_LAYOUT_PAGE_SIZE; 1180 + } 1181 + err: 1182 + doc_set_device_id(docg3, 0); 1183 + return ret; 1184 + } 1185 + 1186 + /** 1187 + * doc_write - Write a buffer to the chip 1188 + * @mtd: the device 1189 + * @to: the offset from first block and first page, in bytes, aligned on page 1190 + * size 1191 + * @len: the number of bytes to write (must be a full page size, ie. 512) 1192 + * @retlen: the number of bytes actually written (0 or 512) 1193 + * @buf: the buffer to get bytes from 1194 + * 1195 + * Writes data to the chip. 1196 + * 1197 + * Returns 0 if write successful, -EIO if write error 1198 + */ 1199 + static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, 1200 + size_t *retlen, const u_char *buf) 1201 + { 1202 + struct docg3 *docg3 = mtd->priv; 1203 + int ret; 1204 + struct mtd_oob_ops ops; 1205 + 1206 + doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len); 1207 + ops.datbuf = (char *)buf; 1208 + ops.len = len; 1209 + ops.mode = MTD_OPS_PLACE_OOB; 1210 + ops.oobbuf = NULL; 1211 + ops.ooblen = 0; 1212 + ops.ooboffs = 0; 1213 + 1214 + ret = doc_write_oob(mtd, to, &ops); 1215 + *retlen = ops.retlen; 1216 + return ret; 1217 + } 1218 + 1219 + static struct docg3 *sysfs_dev2docg3(struct device *dev, 1220 + struct device_attribute *attr) 1221 + { 1222 + int floor; 1223 + struct platform_device *pdev = to_platform_device(dev); 1224 + struct mtd_info **docg3_floors = platform_get_drvdata(pdev); 1225 + 1226 + floor = attr->attr.name[1] - '0'; 1227 + if (floor < 0 || floor >= DOC_MAX_NBFLOORS) 1228 + return NULL; 1229 + else 1230 + return docg3_floors[floor]->priv; 1231 + } 1232 + 1233 + static ssize_t dps0_is_key_locked(struct device *dev, 1234 + struct device_attribute *attr, char *buf) 1235 + { 1236 + struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1237 + int dps0; 1238 + 1239 + doc_set_device_id(docg3, docg3->device_id); 1240 + dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 1241 + doc_set_device_id(docg3, 0); 1242 + 1243 + return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK)); 1244 + } 1245 + 1246 + static ssize_t dps1_is_key_locked(struct device *dev, 1247 + struct device_attribute *attr, char *buf) 1248 + { 1249 + struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1250 + int dps1; 1251 + 1252 + doc_set_device_id(docg3, docg3->device_id); 1253 + dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 1254 + doc_set_device_id(docg3, 0); 1255 + 1256 + return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK)); 1257 + } 1258 + 1259 + static ssize_t dps0_insert_key(struct device *dev, 1260 + struct device_attribute *attr, 1261 + const char *buf, size_t count) 1262 + { 1263 + struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1264 + int i; 1265 + 1266 + if (count != DOC_LAYOUT_DPS_KEY_LENGTH) 1267 + return -EINVAL; 1268 + 1269 + doc_set_device_id(docg3, docg3->device_id); 1270 + for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) 1271 + doc_writeb(docg3, buf[i], DOC_DPS0_KEY); 1272 + doc_set_device_id(docg3, 0); 1273 + return count; 1274 + } 1275 + 1276 + static ssize_t dps1_insert_key(struct device *dev, 1277 + struct device_attribute *attr, 1278 + const char *buf, size_t count) 1279 + { 1280 + struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1281 + int i; 1282 + 1283 + if (count != DOC_LAYOUT_DPS_KEY_LENGTH) 1284 + return -EINVAL; 1285 + 1286 + doc_set_device_id(docg3, docg3->device_id); 1287 + for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) 1288 + doc_writeb(docg3, buf[i], DOC_DPS1_KEY); 1289 + doc_set_device_id(docg3, 0); 1290 + return count; 1291 + } 1292 + 1293 + #define FLOOR_SYSFS(id) { \ 1294 + __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \ 1295 + __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \ 1296 + __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \ 1297 + __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \ 1298 + } 1299 + 1300 + static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = { 1301 + FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3) 1302 + }; 1303 + 1304 + static int doc_register_sysfs(struct platform_device *pdev, 1305 + struct mtd_info **floors) 1306 + { 1307 + int ret = 0, floor, i = 0; 1308 + struct device *dev = &pdev->dev; 1309 + 1310 + for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor]; 1311 + floor++) 1312 + for (i = 0; !ret && i < 4; i++) 1313 + ret = device_create_file(dev, &doc_sys_attrs[floor][i]); 1314 + if (!ret) 1315 + return 0; 1316 + do { 1317 + while (--i >= 0) 1318 + device_remove_file(dev, &doc_sys_attrs[floor][i]); 1319 + i = 4; 1320 + } while (--floor >= 0); 1321 + return ret; 1322 + } 1323 + 1324 + static void doc_unregister_sysfs(struct platform_device *pdev, 1325 + struct mtd_info **floors) 1326 + { 1327 + struct device *dev = &pdev->dev; 1328 + int floor, i; 1329 + 1330 + for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor]; 1331 + floor++) 1332 + for (i = 0; i < 4; i++) 1333 + device_remove_file(dev, &doc_sys_attrs[floor][i]); 1081 1334 } 1082 1335 1083 1336 /* ··· 1703 852 { 1704 853 struct docg3 *docg3 = (struct docg3 *)s->private; 1705 854 int pos = 0; 1706 - int protect = doc_register_readb(docg3, DOC_PROTECTION); 1707 - int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 1708 - int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW); 1709 - int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH); 1710 - int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 1711 - int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW); 1712 - int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH); 855 + int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high; 856 + 857 + protect = doc_register_readb(docg3, DOC_PROTECTION); 858 + dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 859 + dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW); 860 + dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH); 861 + dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 862 + dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW); 863 + dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH); 1713 864 1714 865 pos += seq_printf(s, "Protection = 0x%02x (", 1715 866 protect); ··· 1800 947 1801 948 cfg = doc_register_readb(docg3, DOC_CONFIGURATION); 1802 949 docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0); 950 + docg3->reliable = reliable_mode; 1803 951 1804 952 switch (chip_id) { 1805 953 case DOC_CHIPID_G3: 1806 - mtd->name = "DiskOnChip G3"; 954 + mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d", 955 + docg3->device_id); 1807 956 docg3->max_block = 2047; 1808 957 break; 1809 958 } 1810 959 mtd->type = MTD_NANDFLASH; 1811 - /* 1812 - * Once write methods are added, the correct flags will be set. 1813 - * mtd->flags = MTD_CAP_NANDFLASH; 1814 - */ 1815 - mtd->flags = MTD_CAP_ROM; 960 + mtd->flags = MTD_CAP_NANDFLASH; 1816 961 mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE; 962 + if (docg3->reliable == 2) 963 + mtd->size /= 2; 1817 964 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; 965 + if (docg3->reliable == 2) 966 + mtd->erasesize /= 2; 1818 967 mtd->writesize = DOC_LAYOUT_PAGE_SIZE; 1819 968 mtd->oobsize = DOC_LAYOUT_OOB_SIZE; 1820 969 mtd->owner = THIS_MODULE; 1821 - mtd->erase = NULL; 1822 - mtd->point = NULL; 1823 - mtd->unpoint = NULL; 970 + mtd->erase = doc_erase; 1824 971 mtd->read = doc_read; 1825 - mtd->write = NULL; 972 + mtd->write = doc_write; 1826 973 mtd->read_oob = doc_read_oob; 1827 - mtd->write_oob = NULL; 1828 - mtd->sync = NULL; 974 + mtd->write_oob = doc_write_oob; 1829 975 mtd->block_isbad = doc_block_isbad; 976 + mtd->ecclayout = &docg3_oobinfo; 1830 977 } 1831 978 1832 979 /** 1833 - * doc_probe - Probe the IO space for a DiskOnChip G3 chip 1834 - * @pdev: platform device 980 + * doc_probe_device - Check if a device is available 981 + * @base: the io space where the device is probed 982 + * @floor: the floor of the probed device 983 + * @dev: the device 1835 984 * 1836 - * Probes for a G3 chip at the specified IO space in the platform data 1837 - * ressources. 985 + * Checks whether a device at the specified IO range, and floor is available. 1838 986 * 1839 - * Returns 0 on success, -ENOMEM, -ENXIO on error 987 + * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM 988 + * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is 989 + * launched. 1840 990 */ 1841 - static int __init docg3_probe(struct platform_device *pdev) 991 + static struct mtd_info *doc_probe_device(void __iomem *base, int floor, 992 + struct device *dev) 1842 993 { 1843 - struct device *dev = &pdev->dev; 1844 - struct docg3 *docg3; 1845 - struct mtd_info *mtd; 1846 - struct resource *ress; 1847 994 int ret, bbt_nbpages; 1848 995 u16 chip_id, chip_id_inv; 996 + struct docg3 *docg3; 997 + struct mtd_info *mtd; 1849 998 1850 999 ret = -ENOMEM; 1851 1000 docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL); ··· 1857 1002 if (!mtd) 1858 1003 goto nomem2; 1859 1004 mtd->priv = docg3; 1005 + bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1, 1006 + 8 * DOC_LAYOUT_PAGE_SIZE); 1007 + docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL); 1008 + if (!docg3->bbt) 1009 + goto nomem3; 1010 + 1011 + docg3->dev = dev; 1012 + docg3->device_id = floor; 1013 + docg3->base = base; 1014 + doc_set_device_id(docg3, docg3->device_id); 1015 + if (!floor) 1016 + doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); 1017 + doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL); 1018 + 1019 + chip_id = doc_register_readw(docg3, DOC_CHIPID); 1020 + chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV); 1021 + 1022 + ret = 0; 1023 + if (chip_id != (u16)(~chip_id_inv)) { 1024 + goto nomem3; 1025 + } 1026 + 1027 + switch (chip_id) { 1028 + case DOC_CHIPID_G3: 1029 + doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n", 1030 + base, floor); 1031 + break; 1032 + default: 1033 + doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); 1034 + goto nomem3; 1035 + } 1036 + 1037 + doc_set_driver_info(chip_id, mtd); 1038 + 1039 + doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ); 1040 + doc_reload_bbt(docg3); 1041 + return mtd; 1042 + 1043 + nomem3: 1044 + kfree(mtd); 1045 + nomem2: 1046 + kfree(docg3); 1047 + nomem1: 1048 + return ERR_PTR(ret); 1049 + } 1050 + 1051 + /** 1052 + * doc_release_device - Release a docg3 floor 1053 + * @mtd: the device 1054 + */ 1055 + static void doc_release_device(struct mtd_info *mtd) 1056 + { 1057 + struct docg3 *docg3 = mtd->priv; 1058 + 1059 + mtd_device_unregister(mtd); 1060 + kfree(docg3->bbt); 1061 + kfree(docg3); 1062 + kfree(mtd->name); 1063 + kfree(mtd); 1064 + } 1065 + 1066 + /** 1067 + * docg3_resume - Awakens docg3 floor 1068 + * @pdev: platfrom device 1069 + * 1070 + * Returns 0 (always successfull) 1071 + */ 1072 + static int docg3_resume(struct platform_device *pdev) 1073 + { 1074 + int i; 1075 + struct mtd_info **docg3_floors, *mtd; 1076 + struct docg3 *docg3; 1077 + 1078 + docg3_floors = platform_get_drvdata(pdev); 1079 + mtd = docg3_floors[0]; 1080 + docg3 = mtd->priv; 1081 + 1082 + doc_dbg("docg3_resume()\n"); 1083 + for (i = 0; i < 12; i++) 1084 + doc_readb(docg3, DOC_IOSPACE_IPL); 1085 + return 0; 1086 + } 1087 + 1088 + /** 1089 + * docg3_suspend - Put in low power mode the docg3 floor 1090 + * @pdev: platform device 1091 + * @state: power state 1092 + * 1093 + * Shuts off most of docg3 circuitery to lower power consumption. 1094 + * 1095 + * Returns 0 if suspend succeeded, -EIO if chip refused suspend 1096 + */ 1097 + static int docg3_suspend(struct platform_device *pdev, pm_message_t state) 1098 + { 1099 + int floor, i; 1100 + struct mtd_info **docg3_floors, *mtd; 1101 + struct docg3 *docg3; 1102 + u8 ctrl, pwr_down; 1103 + 1104 + docg3_floors = platform_get_drvdata(pdev); 1105 + for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { 1106 + mtd = docg3_floors[floor]; 1107 + if (!mtd) 1108 + continue; 1109 + docg3 = mtd->priv; 1110 + 1111 + doc_writeb(docg3, floor, DOC_DEVICESELECT); 1112 + ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 1113 + ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE; 1114 + doc_writeb(docg3, ctrl, DOC_FLASHCONTROL); 1115 + 1116 + for (i = 0; i < 10; i++) { 1117 + usleep_range(3000, 4000); 1118 + pwr_down = doc_register_readb(docg3, DOC_POWERMODE); 1119 + if (pwr_down & DOC_POWERDOWN_READY) 1120 + break; 1121 + } 1122 + if (pwr_down & DOC_POWERDOWN_READY) { 1123 + doc_dbg("docg3_suspend(): floor %d powerdown ok\n", 1124 + floor); 1125 + } else { 1126 + doc_err("docg3_suspend(): floor %d powerdown failed\n", 1127 + floor); 1128 + return -EIO; 1129 + } 1130 + } 1131 + 1132 + mtd = docg3_floors[0]; 1133 + docg3 = mtd->priv; 1134 + doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN); 1135 + return 0; 1136 + } 1137 + 1138 + /** 1139 + * doc_probe - Probe the IO space for a DiskOnChip G3 chip 1140 + * @pdev: platform device 1141 + * 1142 + * Probes for a G3 chip at the specified IO space in the platform data 1143 + * ressources. The floor 0 must be available. 1144 + * 1145 + * Returns 0 on success, -ENOMEM, -ENXIO on error 1146 + */ 1147 + static int __init docg3_probe(struct platform_device *pdev) 1148 + { 1149 + struct device *dev = &pdev->dev; 1150 + struct mtd_info *mtd; 1151 + struct resource *ress; 1152 + void __iomem *base; 1153 + int ret, floor, found = 0; 1154 + struct mtd_info **docg3_floors; 1860 1155 1861 1156 ret = -ENXIO; 1862 1157 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 2014 1009 dev_err(dev, "No I/O memory resource defined\n"); 2015 1010 goto noress; 2016 1011 } 2017 - docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE); 2018 - 2019 - docg3->dev = &pdev->dev; 2020 - docg3->device_id = 0; 2021 - doc_set_device_id(docg3, docg3->device_id); 2022 - doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); 2023 - doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL); 2024 - 2025 - chip_id = doc_register_readw(docg3, DOC_CHIPID); 2026 - chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV); 2027 - 2028 - ret = -ENODEV; 2029 - if (chip_id != (u16)(~chip_id_inv)) { 2030 - doc_info("No device found at IO addr %p\n", 2031 - (void *)ress->start); 2032 - goto nochipfound; 2033 - } 2034 - 2035 - switch (chip_id) { 2036 - case DOC_CHIPID_G3: 2037 - doc_info("Found a G3 DiskOnChip at addr %p\n", 2038 - (void *)ress->start); 2039 - break; 2040 - default: 2041 - doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); 2042 - goto nochipfound; 2043 - } 2044 - 2045 - doc_set_driver_info(chip_id, mtd); 2046 - platform_set_drvdata(pdev, mtd); 1012 + base = ioremap(ress->start, DOC_IOSPACE_SIZE); 2047 1013 2048 1014 ret = -ENOMEM; 2049 - bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1, 2050 - 8 * DOC_LAYOUT_PAGE_SIZE); 2051 - docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL); 2052 - if (!docg3->bbt) 2053 - goto nochipfound; 2054 - doc_reload_bbt(docg3); 1015 + docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS, 1016 + GFP_KERNEL); 1017 + if (!docg3_floors) 1018 + goto nomem1; 1019 + docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, 1020 + DOC_ECC_BCH_PRIMPOLY); 1021 + if (!docg3_bch) 1022 + goto nomem2; 2055 1023 2056 - ret = mtd_device_parse_register(mtd, part_probes, 2057 - NULL, NULL, 0); 1024 + for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { 1025 + mtd = doc_probe_device(base, floor, dev); 1026 + if (IS_ERR(mtd)) { 1027 + ret = PTR_ERR(mtd); 1028 + goto err_probe; 1029 + } 1030 + if (!mtd) { 1031 + if (floor == 0) 1032 + goto notfound; 1033 + else 1034 + continue; 1035 + } 1036 + docg3_floors[floor] = mtd; 1037 + ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 1038 + 0); 1039 + if (ret) 1040 + goto err_probe; 1041 + found++; 1042 + } 1043 + 1044 + ret = doc_register_sysfs(pdev, docg3_floors); 2058 1045 if (ret) 2059 - goto register_error; 1046 + goto err_probe; 1047 + if (!found) 1048 + goto notfound; 2060 1049 2061 - doc_dbg_register(docg3); 1050 + platform_set_drvdata(pdev, docg3_floors); 1051 + doc_dbg_register(docg3_floors[0]->priv); 2062 1052 return 0; 2063 1053 2064 - register_error: 2065 - kfree(docg3->bbt); 2066 - nochipfound: 2067 - iounmap(docg3->base); 2068 - noress: 2069 - kfree(mtd); 1054 + notfound: 1055 + ret = -ENODEV; 1056 + dev_info(dev, "No supported DiskOnChip found\n"); 1057 + err_probe: 1058 + free_bch(docg3_bch); 1059 + for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) 1060 + if (docg3_floors[floor]) 1061 + doc_release_device(docg3_floors[floor]); 2070 1062 nomem2: 2071 - kfree(docg3); 1063 + kfree(docg3_floors); 2072 1064 nomem1: 1065 + iounmap(base); 1066 + noress: 2073 1067 return ret; 2074 1068 } 2075 1069 ··· 2080 1076 */ 2081 1077 static int __exit docg3_release(struct platform_device *pdev) 2082 1078 { 2083 - struct mtd_info *mtd = platform_get_drvdata(pdev); 2084 - struct docg3 *docg3 = mtd->priv; 1079 + struct mtd_info **docg3_floors = platform_get_drvdata(pdev); 1080 + struct docg3 *docg3 = docg3_floors[0]->priv; 1081 + void __iomem *base = docg3->base; 1082 + int floor; 2085 1083 1084 + doc_unregister_sysfs(pdev, docg3_floors); 2086 1085 doc_dbg_unregister(docg3); 2087 - mtd_device_unregister(mtd); 2088 - iounmap(docg3->base); 2089 - kfree(docg3->bbt); 2090 - kfree(docg3); 2091 - kfree(mtd); 1086 + for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) 1087 + if (docg3_floors[floor]) 1088 + doc_release_device(docg3_floors[floor]); 1089 + 1090 + kfree(docg3_floors); 1091 + free_bch(docg3_bch); 1092 + iounmap(base); 2092 1093 return 0; 2093 1094 } 2094 1095 ··· 2102 1093 .name = "docg3", 2103 1094 .owner = THIS_MODULE, 2104 1095 }, 1096 + .suspend = docg3_suspend, 1097 + .resume = docg3_resume, 2105 1098 .remove = __exit_p(docg3_release), 2106 1099 }; 2107 1100
+61 -4
drivers/mtd/devices/docg3.h
··· 51 51 #define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2) 52 52 #define DOC_LAYOUT_BLOCK_SIZE \ 53 53 (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE) 54 + 55 + /* 56 + * ECC related constants 57 + */ 58 + #define DOC_ECC_BCH_M 14 59 + #define DOC_ECC_BCH_T 4 60 + #define DOC_ECC_BCH_PRIMPOLY 0x4443 54 61 #define DOC_ECC_BCH_SIZE 7 55 62 #define DOC_ECC_BCH_COVERED_BYTES \ 56 63 (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \ 57 - DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ) 64 + DOC_LAYOUT_OOB_HAMMING_SZ) 65 + #define DOC_ECC_BCH_TOTAL_BYTES \ 66 + (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ) 58 67 59 68 /* 60 69 * Blocks distribution ··· 89 80 90 81 #define DOC_CHIPID_G3 0x200 91 82 #define DOC_ERASE_MARK 0xaa 83 + #define DOC_MAX_NBFLOORS 4 92 84 /* 93 85 * Flash registers 94 86 */ ··· 115 105 #define DOC_ECCCONF1 0x1042 116 106 #define DOC_ECCPRESET 0x1044 117 107 #define DOC_HAMMINGPARITY 0x1046 118 - #define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1)) 108 + #define DOC_BCH_HW_ECC(idx) (0x1048 + idx) 119 109 120 110 #define DOC_PROTECTION 0x1056 111 + #define DOC_DPS0_KEY 0x105c 112 + #define DOC_DPS1_KEY 0x105e 121 113 #define DOC_DPS0_ADDRLOW 0x1060 122 114 #define DOC_DPS0_ADDRHIGH 0x1062 123 115 #define DOC_DPS1_ADDRLOW 0x1064 ··· 129 117 130 118 #define DOC_ASICMODECONFIRM 0x1072 131 119 #define DOC_CHIPID_INV 0x1074 120 + #define DOC_POWERMODE 0x107c 132 121 133 122 /* 134 123 * Flash sequences ··· 137 124 */ 138 125 #define DOC_SEQ_RESET 0x00 139 126 #define DOC_SEQ_PAGE_SIZE_532 0x03 140 - #define DOC_SEQ_SET_MODE 0x09 127 + #define DOC_SEQ_SET_FASTMODE 0x05 128 + #define DOC_SEQ_SET_RELIABLEMODE 0x09 141 129 #define DOC_SEQ_READ 0x12 142 130 #define DOC_SEQ_SET_PLANE1 0x0e 143 131 #define DOC_SEQ_SET_PLANE2 0x10 144 132 #define DOC_SEQ_PAGE_SETUP 0x1d 133 + #define DOC_SEQ_ERASE 0x27 134 + #define DOC_SEQ_PLANES_STATUS 0x31 145 135 146 136 /* 147 137 * Flash commands ··· 159 143 #define DOC_CMD_PROG_BLOCK_ADDR 0x60 160 144 #define DOC_CMD_PROG_CYCLE1 0x80 161 145 #define DOC_CMD_PROG_CYCLE2 0x10 146 + #define DOC_CMD_PROG_CYCLE3 0x11 162 147 #define DOC_CMD_ERASECYCLE2 0xd0 148 + #define DOC_CMD_READ_STATUS 0x70 149 + #define DOC_CMD_PLANES_STATUS 0x71 163 150 164 151 #define DOC_CMD_RELIABLE_MODE 0x22 165 152 #define DOC_CMD_FAST_MODE 0xa2 ··· 193 174 /* 194 175 * Flash register : DOC_ECCCONF0 195 176 */ 177 + #define DOC_ECCCONF0_WRITE_MODE 0x0000 196 178 #define DOC_ECCCONF0_READ_MODE 0x8000 197 179 #define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000 198 180 #define DOC_ECCCONF0_HAMMING_ENABLE 0x1000 ··· 205 185 */ 206 186 #define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80 207 187 #define DOC_ECCCONF1_UNKOWN1 0x40 208 - #define DOC_ECCCONF1_UNKOWN2 0x20 188 + #define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20 209 189 #define DOC_ECCCONF1_UNKOWN3 0x10 210 190 #define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f 211 191 ··· 243 223 #define DOC_READADDR_ONE_BYTE 0x4000 244 224 #define DOC_READADDR_ADDR_MASK 0x1fff 245 225 226 + /* 227 + * Flash register : DOC_POWERMODE 228 + */ 229 + #define DOC_POWERDOWN_READY 0x80 230 + 231 + /* 232 + * Status of erase and write operation 233 + */ 234 + #define DOC_PLANES_STATUS_FAIL 0x01 235 + #define DOC_PLANES_STATUS_PLANE0_KO 0x02 236 + #define DOC_PLANES_STATUS_PLANE1_KO 0x04 237 + 238 + /* 239 + * DPS key management 240 + * 241 + * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span 242 + * across block boundaries, and define whether these blocks can be read or 243 + * written. 244 + * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and 245 + * page 0 of blocks (4,5) for DPS1. 246 + */ 247 + #define DOC_LAYOUT_DPS_KEY_LENGTH 8 248 + 246 249 /** 247 250 * struct docg3 - DiskOnChip driver private data 248 251 * @dev: the device currently under control 249 252 * @base: mapped IO space 250 253 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) 251 254 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits 255 + 256 + * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in 257 + * reliable mode 258 + * Fast mode implies more errors than normal mode. 259 + * Reliable mode implies that page 2*n and 2*n+1 are clones. 252 260 * @bbt: bad block table cache 261 + * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next 262 + * page_write) 263 + * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC 264 + * if 0, use all the 16 bytes. 265 + * @oob_write_buf: prepared OOB for next page_write 253 266 * @debugfs_root: debugfs root node 254 267 */ 255 268 struct docg3 { ··· 290 237 void __iomem *base; 291 238 unsigned int device_id:4; 292 239 unsigned int if_cfg:1; 240 + unsigned int reliable:2; 293 241 int max_block; 294 242 u8 *bbt; 243 + loff_t oob_write_ofs; 244 + int oob_autoecc; 245 + u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE]; 295 246 struct dentry *debugfs_root; 296 247 }; 297 248
+1 -6
drivers/mtd/devices/docprobe.c
··· 241 241 return; 242 242 } 243 243 docfound = 1; 244 - mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL); 245 - 244 + mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL); 246 245 if (!mtd) { 247 246 printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n"); 248 247 iounmap(docptr); ··· 249 250 } 250 251 251 252 this = (struct DiskOnChip *)(&mtd[1]); 252 - 253 - memset((char *)mtd,0, sizeof(struct mtd_info)); 254 - memset((char *)this, 0, sizeof(struct DiskOnChip)); 255 - 256 253 mtd->priv = this; 257 254 this->virtadr = docptr; 258 255 this->physadr = physadr;
-1
drivers/mtd/devices/m25p80.c
··· 992 992 static struct spi_driver m25p80_driver = { 993 993 .driver = { 994 994 .name = "m25p80", 995 - .bus = &spi_bus_type, 996 995 .owner = THIS_MODULE, 997 996 }, 998 997 .id_table = m25p_ids,
-1
drivers/mtd/devices/mtd_dataflash.c
··· 936 936 static struct spi_driver dataflash_driver = { 937 937 .driver = { 938 938 .name = "mtd_dataflash", 939 - .bus = &spi_bus_type, 940 939 .owner = THIS_MODULE, 941 940 .of_match_table = dataflash_dt_ids, 942 941 },
+1 -2
drivers/mtd/devices/sst25l.c
··· 378 378 struct flash_info *flash_info; 379 379 struct sst25l_flash *flash; 380 380 struct flash_platform_data *data; 381 - int ret, i; 381 + int ret; 382 382 383 383 flash_info = sst25l_match_device(spi); 384 384 if (!flash_info) ··· 444 444 static struct spi_driver sst25l_driver = { 445 445 .driver = { 446 446 .name = "sst25l", 447 - .bus = &spi_bus_type, 448 447 .owner = THIS_MODULE, 449 448 }, 450 449 .probe = sst25l_probe,
+41 -40
drivers/mtd/ftl.c
··· 168 168 (offset + sizeof(header)) < max_offset; 169 169 offset += part->mbd.mtd->erasesize ? : 0x2000) { 170 170 171 - err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret, 172 - (unsigned char *)&header); 171 + err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret, 172 + (unsigned char *)&header); 173 173 174 174 if (err) 175 175 return err; ··· 224 224 for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) { 225 225 offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN)) 226 226 << part->header.EraseUnitSize); 227 - ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval, 228 - (unsigned char *)&header); 227 + ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval, 228 + (unsigned char *)&header); 229 229 230 230 if (ret) 231 231 goto out_XferInfo; ··· 289 289 part->EUNInfo[i].Deleted = 0; 290 290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); 291 291 292 - ret = part->mbd.mtd->read(part->mbd.mtd, offset, 293 - part->BlocksPerUnit * sizeof(uint32_t), &retval, 294 - (unsigned char *)part->bam_cache); 292 + ret = mtd_read(part->mbd.mtd, offset, 293 + part->BlocksPerUnit * sizeof(uint32_t), &retval, 294 + (unsigned char *)part->bam_cache); 295 295 296 296 if (ret) 297 297 goto out_bam_cache; ··· 355 355 erase->len = 1 << part->header.EraseUnitSize; 356 356 erase->priv = (u_long)part; 357 357 358 - ret = part->mbd.mtd->erase(part->mbd.mtd, erase); 358 + ret = mtd_erase(part->mbd.mtd, erase); 359 359 360 360 if (!ret) 361 361 xfer->EraseCount++; ··· 422 422 header.LogicalEUN = cpu_to_le16(0xffff); 423 423 header.EraseCount = cpu_to_le32(xfer->EraseCount); 424 424 425 - ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header), 426 - &retlen, (u_char *)&header); 425 + ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen, 426 + (u_char *)&header); 427 427 428 428 if (ret) { 429 429 return ret; ··· 438 438 439 439 for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) { 440 440 441 - ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t), 442 - &retlen, (u_char *)&ctl); 441 + ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, 442 + (u_char *)&ctl); 443 443 444 444 if (ret) 445 445 return ret; ··· 485 485 486 486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); 487 487 488 - ret = part->mbd.mtd->read(part->mbd.mtd, offset, 489 - part->BlocksPerUnit * sizeof(uint32_t), 490 - &retlen, (u_char *) (part->bam_cache)); 488 + ret = mtd_read(part->mbd.mtd, offset, 489 + part->BlocksPerUnit * sizeof(uint32_t), &retlen, 490 + (u_char *)(part->bam_cache)); 491 491 492 492 /* mark the cache bad, in case we get an error later */ 493 493 part->bam_index = 0xffff; ··· 503 503 offset = xfer->Offset + 20; /* Bad! */ 504 504 unit = cpu_to_le16(0x7fff); 505 505 506 - ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t), 507 - &retlen, (u_char *) &unit); 506 + ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen, 507 + (u_char *)&unit); 508 508 509 509 if (ret) { 510 510 printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n"); ··· 523 523 break; 524 524 case BLOCK_DATA: 525 525 case BLOCK_REPLACEMENT: 526 - ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE, 527 - &retlen, (u_char *) buf); 526 + ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen, 527 + (u_char *)buf); 528 528 if (ret) { 529 529 printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n"); 530 530 return ret; 531 531 } 532 532 533 533 534 - ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE, 535 - &retlen, (u_char *) buf); 534 + ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen, 535 + (u_char *)buf); 536 536 if (ret) { 537 537 printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n"); 538 538 return ret; ··· 550 550 } 551 551 552 552 /* Write the BAM to the transfer unit */ 553 - ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset), 554 - part->BlocksPerUnit * sizeof(int32_t), &retlen, 555 - (u_char *)part->bam_cache); 553 + ret = mtd_write(part->mbd.mtd, 554 + xfer->Offset + le32_to_cpu(part->header.BAMOffset), 555 + part->BlocksPerUnit * sizeof(int32_t), 556 + &retlen, 557 + (u_char *)part->bam_cache); 556 558 if (ret) { 557 559 printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n"); 558 560 return ret; ··· 562 560 563 561 564 562 /* All clear? Then update the LogicalEUN again */ 565 - ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t), 566 - &retlen, (u_char *)&srcunitswap); 563 + ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t), 564 + &retlen, (u_char *)&srcunitswap); 567 565 568 566 if (ret) { 569 567 printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n"); ··· 650 648 if (queued) { 651 649 pr_debug("ftl_cs: waiting for transfer " 652 650 "unit to be prepared...\n"); 653 - if (part->mbd.mtd->sync) 654 - part->mbd.mtd->sync(part->mbd.mtd); 651 + mtd_sync(part->mbd.mtd); 655 652 } else { 656 653 static int ne = 0; 657 654 if (++ne < 5) ··· 748 747 /* Invalidate cache */ 749 748 part->bam_index = 0xffff; 750 749 751 - ret = part->mbd.mtd->read(part->mbd.mtd, 752 - part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), 753 - part->BlocksPerUnit * sizeof(uint32_t), 754 - &retlen, (u_char *) (part->bam_cache)); 750 + ret = mtd_read(part->mbd.mtd, 751 + part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), 752 + part->BlocksPerUnit * sizeof(uint32_t), 753 + &retlen, 754 + (u_char *)(part->bam_cache)); 755 755 756 756 if (ret) { 757 757 printk(KERN_WARNING"ftl: Error reading BAM in find_free\n"); ··· 812 810 else { 813 811 offset = (part->EUNInfo[log_addr / bsize].Offset 814 812 + (log_addr % bsize)); 815 - ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE, 816 - &retlen, (u_char *) buffer); 813 + ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, 814 + (u_char *)buffer); 817 815 818 816 if (ret) { 819 817 printk(KERN_WARNING "Error reading MTD device in ftl_read()\n"); ··· 851 849 le32_to_cpu(part->header.BAMOffset)); 852 850 853 851 #ifdef PSYCHO_DEBUG 854 - ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t), 855 - &retlen, (u_char *)&old_addr); 852 + ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, 853 + (u_char *)&old_addr); 856 854 if (ret) { 857 855 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); 858 856 return ret; ··· 888 886 #endif 889 887 part->bam_cache[blk] = le_virt_addr; 890 888 } 891 - ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t), 892 - &retlen, (u_char *)&le_virt_addr); 889 + ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, 890 + (u_char *)&le_virt_addr); 893 891 894 892 if (ret) { 895 893 printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n"); ··· 948 946 part->EUNInfo[part->bam_index].Deleted++; 949 947 offset = (part->EUNInfo[part->bam_index].Offset + 950 948 blk * SECTOR_SIZE); 951 - ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, 952 - buffer); 949 + ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer); 953 950 954 951 if (ret) { 955 952 printk(KERN_NOTICE "ftl_cs: block write failed!\n");
+14 -11
drivers/mtd/inftlcore.c
··· 158 158 ops.oobbuf = buf; 159 159 ops.datbuf = NULL; 160 160 161 - res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 161 + res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 162 162 *retlen = ops.oobretlen; 163 163 return res; 164 164 } ··· 178 178 ops.oobbuf = buf; 179 179 ops.datbuf = NULL; 180 180 181 - res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 181 + res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 182 182 *retlen = ops.oobretlen; 183 183 return res; 184 184 } ··· 199 199 ops.datbuf = buf; 200 200 ops.len = len; 201 201 202 - res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 202 + res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 203 203 *retlen = ops.retlen; 204 204 return res; 205 205 } ··· 343 343 if (BlockMap[block] == BLOCK_NIL) 344 344 continue; 345 345 346 - ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + 347 - (block * SECTORSIZE), SECTORSIZE, &retlen, 348 - movebuf); 346 + ret = mtd_read(mtd, 347 + (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), 348 + SECTORSIZE, 349 + &retlen, 350 + movebuf); 349 351 if (ret < 0 && !mtd_is_bitflip(ret)) { 350 - ret = mtd->read(mtd, 351 - (inftl->EraseSize * BlockMap[block]) + 352 - (block * SECTORSIZE), SECTORSIZE, 353 - &retlen, movebuf); 352 + ret = mtd_read(mtd, 353 + (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), 354 + SECTORSIZE, 355 + &retlen, 356 + movebuf); 354 357 if (ret != -EIO) 355 358 pr_debug("INFTL: error went away on retry?\n"); 356 359 } ··· 917 914 } else { 918 915 size_t retlen; 919 916 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; 920 - int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer); 917 + int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer); 921 918 922 919 /* Handle corrected bit flips gracefully */ 923 920 if (ret < 0 && !mtd_is_bitflip(ret))
+10 -9
drivers/mtd/inftlmount.c
··· 73 73 * Check for BNAND header first. Then whinge if it's found 74 74 * but later checks fail. 75 75 */ 76 - ret = mtd->read(mtd, block * inftl->EraseSize, 77 - SECTORSIZE, &retlen, buf); 76 + ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE, 77 + &retlen, buf); 78 78 /* We ignore ret in case the ECC of the MediaHeader is invalid 79 79 (which is apparently acceptable) */ 80 80 if (retlen != SECTORSIZE) { ··· 118 118 memcpy(mh, buf, sizeof(struct INFTLMediaHeader)); 119 119 120 120 /* Read the spare media header at offset 4096 */ 121 - mtd->read(mtd, block * inftl->EraseSize + 4096, 122 - SECTORSIZE, &retlen, buf); 121 + mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE, 122 + &retlen, buf); 123 123 if (retlen != SECTORSIZE) { 124 124 printk(KERN_WARNING "INFTL: Unable to read spare " 125 125 "Media Header\n"); ··· 220 220 */ 221 221 instr->addr = ip->Reserved0 * inftl->EraseSize; 222 222 instr->len = inftl->EraseSize; 223 - mtd->erase(mtd, instr); 223 + mtd_erase(mtd, instr); 224 224 } 225 225 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) { 226 226 printk(KERN_WARNING "INFTL: Media Header " ··· 306 306 /* If any of the physical eraseblocks are bad, don't 307 307 use the unit. */ 308 308 for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) { 309 - if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock)) 309 + if (mtd_block_isbad(inftl->mbd.mtd, 310 + i * inftl->EraseSize + physblock)) 310 311 inftl->PUtable[i] = BLOCK_RESERVED; 311 312 } 312 313 } ··· 343 342 int i; 344 343 345 344 for (i = 0; i < len; i += SECTORSIZE) { 346 - if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf)) 345 + if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf)) 347 346 return -1; 348 347 if (memcmpb(buf, 0xff, SECTORSIZE) != 0) 349 348 return -1; ··· 394 393 mark only the failed block in the bbt. */ 395 394 for (physblock = 0; physblock < inftl->EraseSize; 396 395 physblock += instr->len, instr->addr += instr->len) { 397 - mtd->erase(inftl->mbd.mtd, instr); 396 + mtd_erase(inftl->mbd.mtd, instr); 398 397 399 398 if (instr->state == MTD_ERASE_FAILED) { 400 399 printk(KERN_WARNING "INFTL: error while formatting block %d\n", ··· 424 423 fail: 425 424 /* could not format, update the bad block table (caller is responsible 426 425 for setting the PUtable to BLOCK_RESERVED on failure) */ 427 - inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr); 426 + mtd_block_markbad(inftl->mbd.mtd, instr->addr); 428 427 return -1; 429 428 } 430 429
-7
drivers/mtd/lpddr/lpddr_cmds.c
··· 70 70 mtd->erase = lpddr_erase; 71 71 mtd->write = lpddr_write_buffers; 72 72 mtd->writev = lpddr_writev; 73 - mtd->read_oob = NULL; 74 - mtd->write_oob = NULL; 75 - mtd->sync = NULL; 76 73 mtd->lock = lpddr_lock; 77 74 mtd->unlock = lpddr_unlock; 78 - mtd->suspend = NULL; 79 - mtd->resume = NULL; 80 75 if (map_is_linear(map)) { 81 76 mtd->point = lpddr_point; 82 77 mtd->unpoint = lpddr_unpoint; 83 78 } 84 - mtd->block_isbad = NULL; 85 - mtd->block_markbad = NULL; 86 79 mtd->size = 1 << lpddr->qinfo->DevSizeShift; 87 80 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; 88 81 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
-9
drivers/mtd/maps/Kconfig
··· 242 242 help 243 243 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 244 244 245 - config MTD_BCM963XX 246 - tristate "Map driver for Broadcom BCM963xx boards" 247 - depends on BCM63XX 248 - select MTD_MAP_BANK_WIDTH_2 249 - select MTD_CFI_I1 250 - help 251 - Support for parsing CFE image tag and creating MTD partitions on 252 - Broadcom BCM63xx boards. 253 - 254 245 config MTD_LANTIQ 255 246 tristate "Lantiq SoC NOR support" 256 247 depends on LANTIQ
-1
drivers/mtd/maps/Makefile
··· 55 55 obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o 56 56 obj-$(CONFIG_MTD_VMU) += vmu-flash.o 57 57 obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o 58 - obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o 59 58 obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o 60 59 obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
-277
drivers/mtd/maps/bcm963xx-flash.c
··· 1 - /* 2 - * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org> 3 - * Mike Albon <malbon@openwrt.org> 4 - * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net> 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 - * 16 - * You should have received a copy of the GNU General Public License 17 - * along with this program; if not, write to the Free Software 18 - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 - */ 20 - 21 - #include <linux/init.h> 22 - #include <linux/kernel.h> 23 - #include <linux/slab.h> 24 - #include <linux/module.h> 25 - #include <linux/mtd/map.h> 26 - #include <linux/mtd/mtd.h> 27 - #include <linux/mtd/partitions.h> 28 - #include <linux/vmalloc.h> 29 - #include <linux/platform_device.h> 30 - #include <linux/io.h> 31 - 32 - #include <asm/mach-bcm63xx/bcm963xx_tag.h> 33 - 34 - #define BCM63XX_BUSWIDTH 2 /* Buswidth */ 35 - #define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */ 36 - 37 - #define PFX KBUILD_MODNAME ": " 38 - 39 - static struct mtd_partition *parsed_parts; 40 - 41 - static struct mtd_info *bcm963xx_mtd_info; 42 - 43 - static struct map_info bcm963xx_map = { 44 - .name = "bcm963xx", 45 - .bankwidth = BCM63XX_BUSWIDTH, 46 - }; 47 - 48 - static int parse_cfe_partitions(struct mtd_info *master, 49 - struct mtd_partition **pparts) 50 - { 51 - /* CFE, NVRAM and global Linux are always present */ 52 - int nrparts = 3, curpart = 0; 53 - struct bcm_tag *buf; 54 - struct mtd_partition *parts; 55 - int ret; 56 - size_t retlen; 57 - unsigned int rootfsaddr, kerneladdr, spareaddr; 58 - unsigned int rootfslen, kernellen, sparelen, totallen; 59 - int namelen = 0; 60 - int i; 61 - char *boardid; 62 - char *tagversion; 63 - 64 - /* Allocate memory for buffer */ 65 - buf = vmalloc(sizeof(struct bcm_tag)); 66 - if (!buf) 67 - return -ENOMEM; 68 - 69 - /* Get the tag */ 70 - ret = master->read(master, master->erasesize, sizeof(struct bcm_tag), 71 - &retlen, (void *)buf); 72 - if (retlen != sizeof(struct bcm_tag)) { 73 - vfree(buf); 74 - return -EIO; 75 - } 76 - 77 - sscanf(buf->kernel_address, "%u", &kerneladdr); 78 - sscanf(buf->kernel_length, "%u", &kernellen); 79 - sscanf(buf->total_length, "%u", &totallen); 80 - tagversion = &(buf->tag_version[0]); 81 - boardid = &(buf->board_id[0]); 82 - 83 - printk(KERN_INFO PFX "CFE boot tag found with version %s " 84 - "and board type %s\n", tagversion, boardid); 85 - 86 - kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; 87 - rootfsaddr = kerneladdr + kernellen; 88 - spareaddr = roundup(totallen, master->erasesize) + master->erasesize; 89 - sparelen = master->size - spareaddr - master->erasesize; 90 - rootfslen = spareaddr - rootfsaddr; 91 - 92 - /* Determine number of partitions */ 93 - namelen = 8; 94 - if (rootfslen > 0) { 95 - nrparts++; 96 - namelen += 6; 97 - }; 98 - if (kernellen > 0) { 99 - nrparts++; 100 - namelen += 6; 101 - }; 102 - 103 - /* Ask kernel for more memory */ 104 - parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); 105 - if (!parts) { 106 - vfree(buf); 107 - return -ENOMEM; 108 - }; 109 - 110 - /* Start building partition list */ 111 - parts[curpart].name = "CFE"; 112 - parts[curpart].offset = 0; 113 - parts[curpart].size = master->erasesize; 114 - curpart++; 115 - 116 - if (kernellen > 0) { 117 - parts[curpart].name = "kernel"; 118 - parts[curpart].offset = kerneladdr; 119 - parts[curpart].size = kernellen; 120 - curpart++; 121 - }; 122 - 123 - if (rootfslen > 0) { 124 - parts[curpart].name = "rootfs"; 125 - parts[curpart].offset = rootfsaddr; 126 - parts[curpart].size = rootfslen; 127 - if (sparelen > 0) 128 - parts[curpart].size += sparelen; 129 - curpart++; 130 - }; 131 - 132 - parts[curpart].name = "nvram"; 133 - parts[curpart].offset = master->size - master->erasesize; 134 - parts[curpart].size = master->erasesize; 135 - 136 - /* Global partition "linux" to make easy firmware upgrade */ 137 - curpart++; 138 - parts[curpart].name = "linux"; 139 - parts[curpart].offset = parts[0].size; 140 - parts[curpart].size = master->size - parts[0].size - parts[3].size; 141 - 142 - for (i = 0; i < nrparts; i++) 143 - printk(KERN_INFO PFX "Partition %d is %s offset %lx and " 144 - "length %lx\n", i, parts[i].name, 145 - (long unsigned int)(parts[i].offset), 146 - (long unsigned int)(parts[i].size)); 147 - 148 - printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n", 149 - spareaddr, sparelen); 150 - *pparts = parts; 151 - vfree(buf); 152 - 153 - return nrparts; 154 - }; 155 - 156 - static int bcm963xx_detect_cfe(struct mtd_info *master) 157 - { 158 - int idoffset = 0x4e0; 159 - static char idstring[8] = "CFE1CFE1"; 160 - char buf[9]; 161 - int ret; 162 - size_t retlen; 163 - 164 - ret = master->read(master, idoffset, 8, &retlen, (void *)buf); 165 - buf[retlen] = 0; 166 - printk(KERN_INFO PFX "Read Signature value of %s\n", buf); 167 - 168 - return strncmp(idstring, buf, 8); 169 - } 170 - 171 - static int bcm963xx_probe(struct platform_device *pdev) 172 - { 173 - int err = 0; 174 - int parsed_nr_parts = 0; 175 - char *part_type; 176 - struct resource *r; 177 - 178 - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 179 - if (!r) { 180 - dev_err(&pdev->dev, "no resource supplied\n"); 181 - return -ENODEV; 182 - } 183 - 184 - bcm963xx_map.phys = r->start; 185 - bcm963xx_map.size = resource_size(r); 186 - bcm963xx_map.virt = ioremap(r->start, resource_size(r)); 187 - if (!bcm963xx_map.virt) { 188 - dev_err(&pdev->dev, "failed to ioremap\n"); 189 - return -EIO; 190 - } 191 - 192 - dev_info(&pdev->dev, "0x%08lx at 0x%08x\n", 193 - bcm963xx_map.size, bcm963xx_map.phys); 194 - 195 - simple_map_init(&bcm963xx_map); 196 - 197 - bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map); 198 - if (!bcm963xx_mtd_info) { 199 - dev_err(&pdev->dev, "failed to probe using CFI\n"); 200 - bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map); 201 - if (bcm963xx_mtd_info) 202 - goto probe_ok; 203 - dev_err(&pdev->dev, "failed to probe using JEDEC\n"); 204 - err = -EIO; 205 - goto err_probe; 206 - } 207 - 208 - probe_ok: 209 - bcm963xx_mtd_info->owner = THIS_MODULE; 210 - 211 - /* This is mutually exclusive */ 212 - if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) { 213 - dev_info(&pdev->dev, "CFE bootloader detected\n"); 214 - if (parsed_nr_parts == 0) { 215 - int ret = parse_cfe_partitions(bcm963xx_mtd_info, 216 - &parsed_parts); 217 - if (ret > 0) { 218 - part_type = "CFE"; 219 - parsed_nr_parts = ret; 220 - } 221 - } 222 - } else { 223 - dev_info(&pdev->dev, "unsupported bootloader\n"); 224 - err = -ENODEV; 225 - goto err_probe; 226 - } 227 - 228 - return mtd_device_register(bcm963xx_mtd_info, parsed_parts, 229 - parsed_nr_parts); 230 - 231 - err_probe: 232 - iounmap(bcm963xx_map.virt); 233 - return err; 234 - } 235 - 236 - static int bcm963xx_remove(struct platform_device *pdev) 237 - { 238 - if (bcm963xx_mtd_info) { 239 - mtd_device_unregister(bcm963xx_mtd_info); 240 - map_destroy(bcm963xx_mtd_info); 241 - } 242 - 243 - if (bcm963xx_map.virt) { 244 - iounmap(bcm963xx_map.virt); 245 - bcm963xx_map.virt = 0; 246 - } 247 - 248 - return 0; 249 - } 250 - 251 - static struct platform_driver bcm63xx_mtd_dev = { 252 - .probe = bcm963xx_probe, 253 - .remove = bcm963xx_remove, 254 - .driver = { 255 - .name = "bcm963xx-flash", 256 - .owner = THIS_MODULE, 257 - }, 258 - }; 259 - 260 - static int __init bcm963xx_mtd_init(void) 261 - { 262 - return platform_driver_register(&bcm63xx_mtd_dev); 263 - } 264 - 265 - static void __exit bcm963xx_mtd_exit(void) 266 - { 267 - platform_driver_unregister(&bcm63xx_mtd_dev); 268 - } 269 - 270 - module_init(bcm963xx_mtd_init); 271 - module_exit(bcm963xx_mtd_exit); 272 - 273 - MODULE_LICENSE("GPL"); 274 - MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot"); 275 - MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>"); 276 - MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); 277 - MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+1 -11
drivers/mtd/maps/bfin-async-flash.c
··· 190 190 }, 191 191 }; 192 192 193 - static int __init bfin_flash_init(void) 194 - { 195 - return platform_driver_register(&bfin_flash_driver); 196 - } 197 - module_init(bfin_flash_init); 198 - 199 - static void __exit bfin_flash_exit(void) 200 - { 201 - platform_driver_unregister(&bfin_flash_driver); 202 - } 203 - module_exit(bfin_flash_exit); 193 + module_platform_driver(bfin_flash_driver); 204 194 205 195 MODULE_LICENSE("GPL"); 206 196 MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
+1 -11
drivers/mtd/maps/gpio-addr-flash.c
··· 279 279 }, 280 280 }; 281 281 282 - static int __init gpio_flash_init(void) 283 - { 284 - return platform_driver_register(&gpio_flash_driver); 285 - } 286 - module_init(gpio_flash_init); 287 - 288 - static void __exit gpio_flash_exit(void) 289 - { 290 - platform_driver_unregister(&gpio_flash_driver); 291 - } 292 - module_exit(gpio_flash_exit); 282 + module_platform_driver(gpio_flash_driver); 293 283 294 284 MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>"); 295 285 MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
+1 -11
drivers/mtd/maps/ixp2000.c
··· 246 246 }, 247 247 }; 248 248 249 - static int __init ixp2000_flash_init(void) 250 - { 251 - return platform_driver_register(&ixp2000_flash_driver); 252 - } 249 + module_platform_driver(ixp2000_flash_driver); 253 250 254 - static void __exit ixp2000_flash_exit(void) 255 - { 256 - platform_driver_unregister(&ixp2000_flash_driver); 257 - } 258 - 259 - module_init(ixp2000_flash_init); 260 - module_exit(ixp2000_flash_exit); 261 251 MODULE_LICENSE("GPL"); 262 252 MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); 263 253 MODULE_ALIAS("platform:IXP2000-Flash");
+1 -13
drivers/mtd/maps/ixp4xx.c
··· 270 270 }, 271 271 }; 272 272 273 - static int __init ixp4xx_flash_init(void) 274 - { 275 - return platform_driver_register(&ixp4xx_flash_driver); 276 - } 277 - 278 - static void __exit ixp4xx_flash_exit(void) 279 - { 280 - platform_driver_unregister(&ixp4xx_flash_driver); 281 - } 282 - 283 - 284 - module_init(ixp4xx_flash_init); 285 - module_exit(ixp4xx_flash_exit); 273 + module_platform_driver(ixp4xx_flash_driver); 286 274 287 275 MODULE_LICENSE("GPL"); 288 276 MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
+1 -5
drivers/mtd/maps/lantiq-flash.c
··· 159 159 if (!ltq_mtd->mtd) { 160 160 dev_err(&pdev->dev, "probing failed\n"); 161 161 err = -ENXIO; 162 - goto err_unmap; 162 + goto err_free; 163 163 } 164 164 165 165 ltq_mtd->mtd->owner = THIS_MODULE; ··· 179 179 180 180 err_destroy: 181 181 map_destroy(ltq_mtd->mtd); 182 - err_unmap: 183 - iounmap(ltq_mtd->map->virt); 184 182 err_free: 185 183 kfree(ltq_mtd->map); 186 184 err_out: ··· 196 198 mtd_device_unregister(ltq_mtd->mtd); 197 199 map_destroy(ltq_mtd->mtd); 198 200 } 199 - if (ltq_mtd->map->virt) 200 - iounmap(ltq_mtd->map->virt); 201 201 kfree(ltq_mtd->map); 202 202 kfree(ltq_mtd); 203 203 }
+1 -11
drivers/mtd/maps/latch-addr-flash.c
··· 223 223 }, 224 224 }; 225 225 226 - static int __init latch_addr_flash_init(void) 227 - { 228 - return platform_driver_register(&latch_addr_flash_driver); 229 - } 230 - module_init(latch_addr_flash_init); 231 - 232 - static void __exit latch_addr_flash_exit(void) 233 - { 234 - platform_driver_unregister(&latch_addr_flash_driver); 235 - } 236 - module_exit(latch_addr_flash_exit); 226 + module_platform_driver(latch_addr_flash_driver); 237 227 238 228 MODULE_AUTHOR("David Griego <dgriego@mvista.com>"); 239 229 MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
+6 -4
drivers/mtd/maps/physmap.c
··· 85 85 struct physmap_flash_data *physmap_data; 86 86 struct physmap_flash_info *info; 87 87 const char **probe_type; 88 + const char **part_types; 88 89 int err = 0; 89 90 int i; 90 91 int devices_found = 0; ··· 172 171 if (err) 173 172 goto err_out; 174 173 175 - mtd_device_parse_register(info->cmtd, part_probe_types, 0, 174 + part_types = physmap_data->part_probe_types ? : part_probe_types; 175 + 176 + mtd_device_parse_register(info->cmtd, part_types, 0, 176 177 physmap_data->parts, physmap_data->nr_parts); 177 178 return 0; 178 179 ··· 190 187 int i; 191 188 192 189 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) 193 - if (info->mtd[i]->suspend && info->mtd[i]->resume) 194 - if (info->mtd[i]->suspend(info->mtd[i]) == 0) 195 - info->mtd[i]->resume(info->mtd[i]); 190 + if (mtd_suspend(info->mtd[i]) == 0) 191 + mtd_resume(info->mtd[i]); 196 192 } 197 193 #else 198 194 #define physmap_flash_shutdown NULL
+1 -12
drivers/mtd/maps/physmap_of.c
··· 338 338 .remove = of_flash_remove, 339 339 }; 340 340 341 - static int __init of_flash_init(void) 342 - { 343 - return platform_driver_register(&of_flash_driver); 344 - } 345 - 346 - static void __exit of_flash_exit(void) 347 - { 348 - platform_driver_unregister(&of_flash_driver); 349 - } 350 - 351 - module_init(of_flash_init); 352 - module_exit(of_flash_exit); 341 + module_platform_driver(of_flash_driver); 353 342 354 343 MODULE_LICENSE("GPL"); 355 344 MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
+3 -14
drivers/mtd/maps/pxa2xx-flash.c
··· 125 125 { 126 126 struct pxa2xx_flash_info *info = platform_get_drvdata(dev); 127 127 128 - if (info && info->mtd->suspend(info->mtd) == 0) 129 - info->mtd->resume(info->mtd); 128 + if (info && mtd_suspend(info->mtd) == 0) 129 + mtd_resume(info->mtd); 130 130 } 131 131 #else 132 132 #define pxa2xx_flash_shutdown NULL ··· 142 142 .shutdown = pxa2xx_flash_shutdown, 143 143 }; 144 144 145 - static int __init init_pxa2xx_flash(void) 146 - { 147 - return platform_driver_register(&pxa2xx_flash_driver); 148 - } 149 - 150 - static void __exit cleanup_pxa2xx_flash(void) 151 - { 152 - platform_driver_unregister(&pxa2xx_flash_driver); 153 - } 154 - 155 - module_init(init_pxa2xx_flash); 156 - module_exit(cleanup_pxa2xx_flash); 145 + module_platform_driver(pxa2xx_flash_driver); 157 146 158 147 MODULE_LICENSE("GPL"); 159 148 MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
+3 -15
drivers/mtd/maps/rbtx4939-flash.c
··· 119 119 { 120 120 struct rbtx4939_flash_info *info = platform_get_drvdata(dev); 121 121 122 - if (info->mtd->suspend && info->mtd->resume) 123 - if (info->mtd->suspend(info->mtd) == 0) 124 - info->mtd->resume(info->mtd); 122 + if (mtd_suspend(info->mtd) == 0) 123 + mtd_resume(info->mtd); 125 124 } 126 125 #else 127 126 #define rbtx4939_flash_shutdown NULL ··· 136 137 }, 137 138 }; 138 139 139 - static int __init rbtx4939_flash_init(void) 140 - { 141 - return platform_driver_register(&rbtx4939_flash_driver); 142 - } 143 - 144 - static void __exit rbtx4939_flash_exit(void) 145 - { 146 - platform_driver_unregister(&rbtx4939_flash_driver); 147 - } 148 - 149 - module_init(rbtx4939_flash_init); 150 - module_exit(rbtx4939_flash_exit); 140 + module_platform_driver(rbtx4939_flash_driver); 151 141 152 142 MODULE_LICENSE("GPL"); 153 143 MODULE_DESCRIPTION("RBTX4939 MTD map driver");
+3 -14
drivers/mtd/maps/sa1100-flash.c
··· 377 377 static void sa1100_mtd_shutdown(struct platform_device *dev) 378 378 { 379 379 struct sa_info *info = platform_get_drvdata(dev); 380 - if (info && info->mtd->suspend(info->mtd) == 0) 381 - info->mtd->resume(info->mtd); 380 + if (info && mtd_suspend(info->mtd) == 0) 381 + mtd_resume(info->mtd); 382 382 } 383 383 #else 384 384 #define sa1100_mtd_shutdown NULL ··· 394 394 }, 395 395 }; 396 396 397 - static int __init sa1100_mtd_init(void) 398 - { 399 - return platform_driver_register(&sa1100_mtd_driver); 400 - } 401 - 402 - static void __exit sa1100_mtd_exit(void) 403 - { 404 - platform_driver_unregister(&sa1100_mtd_driver); 405 - } 406 - 407 - module_init(sa1100_mtd_init); 408 - module_exit(sa1100_mtd_exit); 397 + module_platform_driver(sa1100_mtd_driver); 409 398 410 399 MODULE_AUTHOR("Nicolas Pitre"); 411 400 MODULE_DESCRIPTION("SA1100 CFI map driver");
+1 -2
drivers/mtd/maps/scb2_flash.c
··· 204 204 return; 205 205 206 206 /* disable flash writes */ 207 - if (scb2_mtd->lock) 208 - scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size); 207 + mtd_lock(scb2_mtd, 0, scb2_mtd->size); 209 208 210 209 mtd_device_unregister(scb2_mtd); 211 210 map_destroy(scb2_mtd);
+1 -12
drivers/mtd/maps/sun_uflash.c
··· 158 158 .remove = __devexit_p(uflash_remove), 159 159 }; 160 160 161 - static int __init uflash_init(void) 162 - { 163 - return platform_driver_register(&uflash_driver); 164 - } 165 - 166 - static void __exit uflash_exit(void) 167 - { 168 - platform_driver_unregister(&uflash_driver); 169 - } 170 - 171 - module_init(uflash_init); 172 - module_exit(uflash_exit); 161 + module_platform_driver(uflash_driver);
+2 -1
drivers/mtd/mtd_blkdevs.c
··· 215 215 216 216 mutex_lock(&dev->lock); 217 217 218 - if (dev->open++) 218 + if (dev->open) 219 219 goto unlock; 220 220 221 221 kref_get(&dev->ref); ··· 235 235 goto error_release; 236 236 237 237 unlock: 238 + dev->open++; 238 239 mutex_unlock(&dev->lock); 239 240 blktrans_dev_put(dev); 240 241 return ret;
+9 -12
drivers/mtd/mtdblock.c
··· 85 85 set_current_state(TASK_INTERRUPTIBLE); 86 86 add_wait_queue(&wait_q, &wait); 87 87 88 - ret = mtd->erase(mtd, &erase); 88 + ret = mtd_erase(mtd, &erase); 89 89 if (ret) { 90 90 set_current_state(TASK_RUNNING); 91 91 remove_wait_queue(&wait_q, &wait); ··· 102 102 * Next, write the data to flash. 103 103 */ 104 104 105 - ret = mtd->write(mtd, pos, len, &retlen, buf); 105 + ret = mtd_write(mtd, pos, len, &retlen, buf); 106 106 if (ret) 107 107 return ret; 108 108 if (retlen != len) ··· 152 152 mtd->name, pos, len); 153 153 154 154 if (!sect_size) 155 - return mtd->write(mtd, pos, len, &retlen, buf); 155 + return mtd_write(mtd, pos, len, &retlen, buf); 156 156 157 157 while (len > 0) { 158 158 unsigned long sect_start = (pos/sect_size)*sect_size; ··· 184 184 mtdblk->cache_offset != sect_start) { 185 185 /* fill the cache with the current sector */ 186 186 mtdblk->cache_state = STATE_EMPTY; 187 - ret = mtd->read(mtd, sect_start, sect_size, 188 - &retlen, mtdblk->cache_data); 187 + ret = mtd_read(mtd, sect_start, sect_size, 188 + &retlen, mtdblk->cache_data); 189 189 if (ret) 190 190 return ret; 191 191 if (retlen != sect_size) ··· 222 222 mtd->name, pos, len); 223 223 224 224 if (!sect_size) 225 - return mtd->read(mtd, pos, len, &retlen, buf); 225 + return mtd_read(mtd, pos, len, &retlen, buf); 226 226 227 227 while (len > 0) { 228 228 unsigned long sect_start = (pos/sect_size)*sect_size; ··· 241 241 mtdblk->cache_offset == sect_start) { 242 242 memcpy (buf, mtdblk->cache_data + offset, size); 243 243 } else { 244 - ret = mtd->read(mtd, pos, size, &retlen, buf); 244 + ret = mtd_read(mtd, pos, size, &retlen, buf); 245 245 if (ret) 246 246 return ret; 247 247 if (retlen != size) ··· 322 322 323 323 if (!--mtdblk->count) { 324 324 /* It was the last usage. Free the cache */ 325 - if (mbd->mtd->sync) 326 - mbd->mtd->sync(mbd->mtd); 325 + mtd_sync(mbd->mtd); 327 326 vfree(mtdblk->cache_data); 328 327 } 329 328 ··· 340 341 mutex_lock(&mtdblk->cache_mutex); 341 342 write_cached_data(mtdblk); 342 343 mutex_unlock(&mtdblk->cache_mutex); 343 - 344 - if (dev->mtd->sync) 345 - dev->mtd->sync(dev->mtd); 344 + mtd_sync(dev->mtd); 346 345 return 0; 347 346 } 348 347
+2 -2
drivers/mtd/mtdblock_ro.c
··· 30 30 { 31 31 size_t retlen; 32 32 33 - if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf)) 33 + if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf)) 34 34 return 1; 35 35 return 0; 36 36 } ··· 40 40 { 41 41 size_t retlen; 42 42 43 - if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf)) 43 + if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf)) 44 44 return 1; 45 45 return 0; 46 46 }
+89 -114
drivers/mtd/mtdchar.c
··· 51 51 enum mtd_file_modes mode; 52 52 }; 53 53 54 - static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 54 + static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 55 55 { 56 56 struct mtd_file_info *mfi = file->private_data; 57 57 struct mtd_info *mtd = mfi->mtd; ··· 77 77 78 78 79 79 80 - static int mtd_open(struct inode *inode, struct file *file) 80 + static int mtdchar_open(struct inode *inode, struct file *file) 81 81 { 82 82 int minor = iminor(inode); 83 83 int devnum = minor >> 1; ··· 142 142 out: 143 143 mutex_unlock(&mtd_mutex); 144 144 return ret; 145 - } /* mtd_open */ 145 + } /* mtdchar_open */ 146 146 147 147 /*====================================================================*/ 148 148 149 - static int mtd_close(struct inode *inode, struct file *file) 149 + static int mtdchar_close(struct inode *inode, struct file *file) 150 150 { 151 151 struct mtd_file_info *mfi = file->private_data; 152 152 struct mtd_info *mtd = mfi->mtd; ··· 154 154 pr_debug("MTD_close\n"); 155 155 156 156 /* Only sync if opened RW */ 157 - if ((file->f_mode & FMODE_WRITE) && mtd->sync) 158 - mtd->sync(mtd); 157 + if ((file->f_mode & FMODE_WRITE)) 158 + mtd_sync(mtd); 159 159 160 160 iput(mfi->ino); 161 161 ··· 164 164 kfree(mfi); 165 165 166 166 return 0; 167 - } /* mtd_close */ 167 + } /* mtdchar_close */ 168 168 169 169 /* Back in June 2001, dwmw2 wrote: 170 170 * ··· 184 184 * alignment requirements are not met in the NAND subdriver. 185 185 */ 186 186 187 - static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 187 + static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 188 + loff_t *ppos) 188 189 { 189 190 struct mtd_file_info *mfi = file->private_data; 190 191 struct mtd_info *mtd = mfi->mtd; 191 - size_t retlen=0; 192 + size_t retlen; 192 193 size_t total_retlen=0; 193 194 int ret=0; 194 195 int len; ··· 213 212 214 213 switch (mfi->mode) { 215 214 case MTD_FILE_MODE_OTP_FACTORY: 216 - ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 215 + ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 216 + &retlen, kbuf); 217 217 break; 218 218 case MTD_FILE_MODE_OTP_USER: 219 - ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 219 + ret = mtd_read_user_prot_reg(mtd, *ppos, len, 220 + &retlen, kbuf); 220 221 break; 221 222 case MTD_FILE_MODE_RAW: 222 223 { ··· 229 226 ops.oobbuf = NULL; 230 227 ops.len = len; 231 228 232 - ret = mtd->read_oob(mtd, *ppos, &ops); 229 + ret = mtd_read_oob(mtd, *ppos, &ops); 233 230 retlen = ops.retlen; 234 231 break; 235 232 } 236 233 default: 237 - ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 234 + ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 238 235 } 239 236 /* Nand returns -EBADMSG on ECC errors, but it returns 240 237 * the data. For our userspace tools it is important ··· 268 265 269 266 kfree(kbuf); 270 267 return total_retlen; 271 - } /* mtd_read */ 268 + } /* mtdchar_read */ 272 269 273 - static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 270 + static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 271 + loff_t *ppos) 274 272 { 275 273 struct mtd_file_info *mfi = file->private_data; 276 274 struct mtd_info *mtd = mfi->mtd; ··· 310 306 ret = -EROFS; 311 307 break; 312 308 case MTD_FILE_MODE_OTP_USER: 313 - if (!mtd->write_user_prot_reg) { 314 - ret = -EOPNOTSUPP; 315 - break; 316 - } 317 - ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 309 + ret = mtd_write_user_prot_reg(mtd, *ppos, len, 310 + &retlen, kbuf); 318 311 break; 319 312 320 313 case MTD_FILE_MODE_RAW: ··· 324 323 ops.ooboffs = 0; 325 324 ops.len = len; 326 325 327 - ret = mtd->write_oob(mtd, *ppos, &ops); 326 + ret = mtd_write_oob(mtd, *ppos, &ops); 328 327 retlen = ops.retlen; 329 328 break; 330 329 } 331 330 332 331 default: 333 - ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 332 + ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 334 333 } 335 334 if (!ret) { 336 335 *ppos += retlen; ··· 346 345 347 346 kfree(kbuf); 348 347 return total_retlen; 349 - } /* mtd_write */ 348 + } /* mtdchar_write */ 350 349 351 350 /*====================================================================== 352 351 ··· 362 361 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 363 362 { 364 363 struct mtd_info *mtd = mfi->mtd; 364 + size_t retlen; 365 365 int ret = 0; 366 + 367 + /* 368 + * Make a fake call to mtd_read_fact_prot_reg() to check if OTP 369 + * operations are supported. 370 + */ 371 + if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP) 372 + return -EOPNOTSUPP; 366 373 367 374 switch (mode) { 368 375 case MTD_OTP_FACTORY: 369 - if (!mtd->read_fact_prot_reg) 370 - ret = -EOPNOTSUPP; 371 - else 372 - mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 376 + mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 373 377 break; 374 378 case MTD_OTP_USER: 375 - if (!mtd->read_fact_prot_reg) 376 - ret = -EOPNOTSUPP; 377 - else 378 - mfi->mode = MTD_FILE_MODE_OTP_USER; 379 + mfi->mode = MTD_FILE_MODE_OTP_USER; 379 380 break; 380 381 default: 381 382 ret = -EINVAL; ··· 390 387 # define otp_select_filemode(f,m) -EOPNOTSUPP 391 388 #endif 392 389 393 - static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, 390 + static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 394 391 uint64_t start, uint32_t length, void __user *ptr, 395 392 uint32_t __user *retp) 396 393 { ··· 427 424 return PTR_ERR(ops.oobbuf); 428 425 429 426 start &= ~((uint64_t)mtd->writesize - 1); 430 - ret = mtd->write_oob(mtd, start, &ops); 427 + ret = mtd_write_oob(mtd, start, &ops); 431 428 432 429 if (ops.oobretlen > 0xFFFFFFFFU) 433 430 ret = -EOVERFLOW; ··· 439 436 return ret; 440 437 } 441 438 442 - static int mtd_do_readoob(struct file *file, struct mtd_info *mtd, 439 + static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 443 440 uint64_t start, uint32_t length, void __user *ptr, 444 441 uint32_t __user *retp) 445 442 { ··· 450 447 if (length > 4096) 451 448 return -EINVAL; 452 449 453 - if (!mtd->read_oob) 454 - ret = -EOPNOTSUPP; 455 - else 456 - ret = access_ok(VERIFY_WRITE, ptr, 457 - length) ? 0 : -EFAULT; 458 - if (ret) 459 - return ret; 450 + if (!access_ok(VERIFY_WRITE, ptr, length)) 451 + return -EFAULT; 460 452 461 453 ops.ooblen = length; 462 454 ops.ooboffs = start & (mtd->writesize - 1); ··· 467 469 return -ENOMEM; 468 470 469 471 start &= ~((uint64_t)mtd->writesize - 1); 470 - ret = mtd->read_oob(mtd, start, &ops); 472 + ret = mtd_read_oob(mtd, start, &ops); 471 473 472 474 if (put_user(ops.oobretlen, retp)) 473 475 ret = -EFAULT; ··· 528 530 return 0; 529 531 } 530 532 531 - static int mtd_blkpg_ioctl(struct mtd_info *mtd, 533 + static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 532 534 struct blkpg_ioctl_arg __user *arg) 533 535 { 534 536 struct blkpg_ioctl_arg a; ··· 564 566 } 565 567 } 566 568 567 - static int mtd_write_ioctl(struct mtd_info *mtd, 569 + static int mtdchar_write_ioctl(struct mtd_info *mtd, 568 570 struct mtd_write_req __user *argp) 569 571 { 570 572 struct mtd_write_req req; ··· 605 607 ops.oobbuf = NULL; 606 608 } 607 609 608 - ret = mtd->write_oob(mtd, (loff_t)req.start, &ops); 610 + ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); 609 611 610 612 kfree(ops.datbuf); 611 613 kfree(ops.oobbuf); ··· 613 615 return ret; 614 616 } 615 617 616 - static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 618 + static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 617 619 { 618 620 struct mtd_file_info *mfi = file->private_data; 619 621 struct mtd_info *mtd = mfi->mtd; ··· 727 729 wq_head is no longer there when the 728 730 callback routine tries to wake us up. 729 731 */ 730 - ret = mtd->erase(mtd, erase); 732 + ret = mtd_erase(mtd, erase); 731 733 if (!ret) { 732 734 set_current_state(TASK_UNINTERRUPTIBLE); 733 735 add_wait_queue(&waitq, &wait); ··· 753 755 if (copy_from_user(&buf, argp, sizeof(buf))) 754 756 ret = -EFAULT; 755 757 else 756 - ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, 758 + ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 757 759 buf.ptr, &buf_user->length); 758 760 break; 759 761 } ··· 767 769 if (copy_from_user(&buf, argp, sizeof(buf))) 768 770 ret = -EFAULT; 769 771 else 770 - ret = mtd_do_readoob(file, mtd, buf.start, buf.length, 772 + ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 771 773 buf.ptr, &buf_user->start); 772 774 break; 773 775 } ··· 780 782 if (copy_from_user(&buf, argp, sizeof(buf))) 781 783 ret = -EFAULT; 782 784 else 783 - ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, 785 + ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 784 786 (void __user *)(uintptr_t)buf.usr_ptr, 785 787 &buf_user->length); 786 788 break; ··· 794 796 if (copy_from_user(&buf, argp, sizeof(buf))) 795 797 ret = -EFAULT; 796 798 else 797 - ret = mtd_do_readoob(file, mtd, buf.start, buf.length, 799 + ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 798 800 (void __user *)(uintptr_t)buf.usr_ptr, 799 801 &buf_user->length); 800 802 break; ··· 802 804 803 805 case MEMWRITE: 804 806 { 805 - ret = mtd_write_ioctl(mtd, 807 + ret = mtdchar_write_ioctl(mtd, 806 808 (struct mtd_write_req __user *)arg); 807 809 break; 808 810 } ··· 814 816 if (copy_from_user(&einfo, argp, sizeof(einfo))) 815 817 return -EFAULT; 816 818 817 - if (!mtd->lock) 818 - ret = -EOPNOTSUPP; 819 - else 820 - ret = mtd->lock(mtd, einfo.start, einfo.length); 819 + ret = mtd_lock(mtd, einfo.start, einfo.length); 821 820 break; 822 821 } 823 822 ··· 825 830 if (copy_from_user(&einfo, argp, sizeof(einfo))) 826 831 return -EFAULT; 827 832 828 - if (!mtd->unlock) 829 - ret = -EOPNOTSUPP; 830 - else 831 - ret = mtd->unlock(mtd, einfo.start, einfo.length); 833 + ret = mtd_unlock(mtd, einfo.start, einfo.length); 832 834 break; 833 835 } 834 836 ··· 836 844 if (copy_from_user(&einfo, argp, sizeof(einfo))) 837 845 return -EFAULT; 838 846 839 - if (!mtd->is_locked) 840 - ret = -EOPNOTSUPP; 841 - else 842 - ret = mtd->is_locked(mtd, einfo.start, einfo.length); 847 + ret = mtd_is_locked(mtd, einfo.start, einfo.length); 843 848 break; 844 849 } 845 850 ··· 867 878 868 879 if (copy_from_user(&offs, argp, sizeof(loff_t))) 869 880 return -EFAULT; 870 - if (!mtd->block_isbad) 871 - ret = -EOPNOTSUPP; 872 - else 873 - return mtd->block_isbad(mtd, offs); 881 + return mtd_block_isbad(mtd, offs); 874 882 break; 875 883 } 876 884 ··· 877 891 878 892 if (copy_from_user(&offs, argp, sizeof(loff_t))) 879 893 return -EFAULT; 880 - if (!mtd->block_markbad) 881 - ret = -EOPNOTSUPP; 882 - else 883 - return mtd->block_markbad(mtd, offs); 894 + return mtd_block_markbad(mtd, offs); 884 895 break; 885 896 } 886 897 ··· 902 919 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 903 920 if (!buf) 904 921 return -ENOMEM; 905 - ret = -EOPNOTSUPP; 906 922 switch (mfi->mode) { 907 923 case MTD_FILE_MODE_OTP_FACTORY: 908 - if (mtd->get_fact_prot_info) 909 - ret = mtd->get_fact_prot_info(mtd, buf, 4096); 924 + ret = mtd_get_fact_prot_info(mtd, buf, 4096); 910 925 break; 911 926 case MTD_FILE_MODE_OTP_USER: 912 - if (mtd->get_user_prot_info) 913 - ret = mtd->get_user_prot_info(mtd, buf, 4096); 927 + ret = mtd_get_user_prot_info(mtd, buf, 4096); 914 928 break; 915 929 default: 930 + ret = -EINVAL; 916 931 break; 917 932 } 918 933 if (ret >= 0) { ··· 934 953 return -EINVAL; 935 954 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 936 955 return -EFAULT; 937 - if (!mtd->lock_user_prot_reg) 938 - return -EOPNOTSUPP; 939 - ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 956 + ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 940 957 break; 941 958 } 942 959 #endif ··· 978 999 break; 979 1000 980 1001 case MTD_FILE_MODE_RAW: 981 - if (!mtd->read_oob || !mtd->write_oob) 1002 + if (!mtd_has_oob(mtd)) 982 1003 return -EOPNOTSUPP; 983 1004 mfi->mode = arg; 984 1005 ··· 993 1014 994 1015 case BLKPG: 995 1016 { 996 - ret = mtd_blkpg_ioctl(mtd, 1017 + ret = mtdchar_blkpg_ioctl(mtd, 997 1018 (struct blkpg_ioctl_arg __user *)arg); 998 1019 break; 999 1020 } ··· 1012 1033 return ret; 1013 1034 } /* memory_ioctl */ 1014 1035 1015 - static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1036 + static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1016 1037 { 1017 1038 int ret; 1018 1039 1019 1040 mutex_lock(&mtd_mutex); 1020 - ret = mtd_ioctl(file, cmd, arg); 1041 + ret = mtdchar_ioctl(file, cmd, arg); 1021 1042 mutex_unlock(&mtd_mutex); 1022 1043 1023 1044 return ret; ··· 1034 1055 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1035 1056 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1036 1057 1037 - static long mtd_compat_ioctl(struct file *file, unsigned int cmd, 1058 + static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1038 1059 unsigned long arg) 1039 1060 { 1040 1061 struct mtd_file_info *mfi = file->private_data; ··· 1053 1074 if (copy_from_user(&buf, argp, sizeof(buf))) 1054 1075 ret = -EFAULT; 1055 1076 else 1056 - ret = mtd_do_writeoob(file, mtd, buf.start, 1077 + ret = mtdchar_writeoob(file, mtd, buf.start, 1057 1078 buf.length, compat_ptr(buf.ptr), 1058 1079 &buf_user->length); 1059 1080 break; ··· 1068 1089 if (copy_from_user(&buf, argp, sizeof(buf))) 1069 1090 ret = -EFAULT; 1070 1091 else 1071 - ret = mtd_do_readoob(file, mtd, buf.start, 1092 + ret = mtdchar_readoob(file, mtd, buf.start, 1072 1093 buf.length, compat_ptr(buf.ptr), 1073 1094 &buf_user->start); 1074 1095 break; 1075 1096 } 1076 1097 default: 1077 - ret = mtd_ioctl(file, cmd, (unsigned long)argp); 1098 + ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1078 1099 } 1079 1100 1080 1101 mutex_unlock(&mtd_mutex); ··· 1090 1111 * mappings) 1091 1112 */ 1092 1113 #ifndef CONFIG_MMU 1093 - static unsigned long mtd_get_unmapped_area(struct file *file, 1114 + static unsigned long mtdchar_get_unmapped_area(struct file *file, 1094 1115 unsigned long addr, 1095 1116 unsigned long len, 1096 1117 unsigned long pgoff, ··· 1098 1119 { 1099 1120 struct mtd_file_info *mfi = file->private_data; 1100 1121 struct mtd_info *mtd = mfi->mtd; 1122 + unsigned long offset; 1123 + int ret; 1101 1124 1102 - if (mtd->get_unmapped_area) { 1103 - unsigned long offset; 1125 + if (addr != 0) 1126 + return (unsigned long) -EINVAL; 1104 1127 1105 - if (addr != 0) 1106 - return (unsigned long) -EINVAL; 1128 + if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1129 + return (unsigned long) -EINVAL; 1107 1130 1108 - if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1109 - return (unsigned long) -EINVAL; 1131 + offset = pgoff << PAGE_SHIFT; 1132 + if (offset > mtd->size - len) 1133 + return (unsigned long) -EINVAL; 1110 1134 1111 - offset = pgoff << PAGE_SHIFT; 1112 - if (offset > mtd->size - len) 1113 - return (unsigned long) -EINVAL; 1114 - 1115 - return mtd->get_unmapped_area(mtd, len, offset, flags); 1116 - } 1117 - 1118 - /* can't map directly */ 1119 - return (unsigned long) -ENOSYS; 1135 + ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1136 + return ret == -EOPNOTSUPP ? -ENOSYS : ret; 1120 1137 } 1121 1138 #endif 1122 1139 1123 1140 /* 1124 1141 * set up a mapping for shared memory segments 1125 1142 */ 1126 - static int mtd_mmap(struct file *file, struct vm_area_struct *vma) 1143 + static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1127 1144 { 1128 1145 #ifdef CONFIG_MMU 1129 1146 struct mtd_file_info *mfi = file->private_data; ··· 1160 1185 1161 1186 static const struct file_operations mtd_fops = { 1162 1187 .owner = THIS_MODULE, 1163 - .llseek = mtd_lseek, 1164 - .read = mtd_read, 1165 - .write = mtd_write, 1166 - .unlocked_ioctl = mtd_unlocked_ioctl, 1188 + .llseek = mtdchar_lseek, 1189 + .read = mtdchar_read, 1190 + .write = mtdchar_write, 1191 + .unlocked_ioctl = mtdchar_unlocked_ioctl, 1167 1192 #ifdef CONFIG_COMPAT 1168 - .compat_ioctl = mtd_compat_ioctl, 1193 + .compat_ioctl = mtdchar_compat_ioctl, 1169 1194 #endif 1170 - .open = mtd_open, 1171 - .release = mtd_close, 1172 - .mmap = mtd_mmap, 1195 + .open = mtdchar_open, 1196 + .release = mtdchar_close, 1197 + .mmap = mtdchar_mmap, 1173 1198 #ifndef CONFIG_MMU 1174 - .get_unmapped_area = mtd_get_unmapped_area, 1199 + .get_unmapped_area = mtdchar_get_unmapped_area, 1175 1200 #endif 1176 1201 }; 1177 1202
+22 -31
drivers/mtd/mtdconcat.c
··· 91 91 /* Entire transaction goes into this subdev */ 92 92 size = len; 93 93 94 - err = subdev->read(subdev, from, size, &retsize, buf); 94 + err = mtd_read(subdev, from, size, &retsize, buf); 95 95 96 96 /* Save information about bitflips! */ 97 97 if (unlikely(err)) { ··· 148 148 if (!(subdev->flags & MTD_WRITEABLE)) 149 149 err = -EROFS; 150 150 else 151 - err = subdev->write(subdev, to, size, &retsize, buf); 151 + err = mtd_write(subdev, to, size, &retsize, buf); 152 152 153 153 if (err) 154 154 break; ··· 227 227 if (!(subdev->flags & MTD_WRITEABLE)) 228 228 err = -EROFS; 229 229 else 230 - err = subdev->writev(subdev, &vecs_copy[entry_low], 231 - entry_high - entry_low + 1, to, &retsize); 230 + err = mtd_writev(subdev, &vecs_copy[entry_low], 231 + entry_high - entry_low + 1, to, 232 + &retsize); 232 233 233 234 vecs_copy[entry_high].iov_len = old_iov_len - size; 234 235 vecs_copy[entry_high].iov_base += size; ··· 274 273 if (from + devops.len > subdev->size) 275 274 devops.len = subdev->size - from; 276 275 277 - err = subdev->read_oob(subdev, from, &devops); 276 + err = mtd_read_oob(subdev, from, &devops); 278 277 ops->retlen += devops.retlen; 279 278 ops->oobretlen += devops.oobretlen; 280 279 ··· 334 333 if (to + devops.len > subdev->size) 335 334 devops.len = subdev->size - to; 336 335 337 - err = subdev->write_oob(subdev, to, &devops); 336 + err = mtd_write_oob(subdev, to, &devops); 338 337 ops->retlen += devops.oobretlen; 339 338 if (err) 340 339 return err; ··· 380 379 * FIXME: Allow INTERRUPTIBLE. Which means 381 380 * not having the wait_queue head on the stack. 382 381 */ 383 - err = mtd->erase(mtd, erase); 382 + err = mtd_erase(mtd, erase); 384 383 if (!err) { 385 384 set_current_state(TASK_UNINTERRUPTIBLE); 386 385 add_wait_queue(&waitq, &wait); ··· 555 554 else 556 555 size = len; 557 556 558 - if (subdev->lock) { 559 - err = subdev->lock(subdev, ofs, size); 560 - if (err) 561 - break; 562 - } else 563 - err = -EOPNOTSUPP; 557 + err = mtd_lock(subdev, ofs, size); 558 + if (err) 559 + break; 564 560 565 561 len -= size; 566 562 if (len == 0) ··· 592 594 else 593 595 size = len; 594 596 595 - if (subdev->unlock) { 596 - err = subdev->unlock(subdev, ofs, size); 597 - if (err) 598 - break; 599 - } else 600 - err = -EOPNOTSUPP; 597 + err = mtd_unlock(subdev, ofs, size); 598 + if (err) 599 + break; 601 600 602 601 len -= size; 603 602 if (len == 0) ··· 614 619 615 620 for (i = 0; i < concat->num_subdev; i++) { 616 621 struct mtd_info *subdev = concat->subdev[i]; 617 - subdev->sync(subdev); 622 + mtd_sync(subdev); 618 623 } 619 624 } 620 625 ··· 625 630 626 631 for (i = 0; i < concat->num_subdev; i++) { 627 632 struct mtd_info *subdev = concat->subdev[i]; 628 - if ((rc = subdev->suspend(subdev)) < 0) 633 + if ((rc = mtd_suspend(subdev)) < 0) 629 634 return rc; 630 635 } 631 636 return rc; ··· 638 643 639 644 for (i = 0; i < concat->num_subdev; i++) { 640 645 struct mtd_info *subdev = concat->subdev[i]; 641 - subdev->resume(subdev); 646 + mtd_resume(subdev); 642 647 } 643 648 } 644 649 ··· 647 652 struct mtd_concat *concat = CONCAT(mtd); 648 653 int i, res = 0; 649 654 650 - if (!concat->subdev[0]->block_isbad) 655 + if (!mtd_can_have_bb(concat->subdev[0])) 651 656 return res; 652 657 653 658 if (ofs > mtd->size) ··· 661 666 continue; 662 667 } 663 668 664 - res = subdev->block_isbad(subdev, ofs); 669 + res = mtd_block_isbad(subdev, ofs); 665 670 break; 666 671 } 667 672 ··· 673 678 struct mtd_concat *concat = CONCAT(mtd); 674 679 int i, err = -EINVAL; 675 680 676 - if (!concat->subdev[0]->block_markbad) 681 + if (!mtd_can_have_bb(concat->subdev[0])) 677 682 return 0; 678 683 679 684 if (ofs > mtd->size) ··· 687 692 continue; 688 693 } 689 694 690 - err = subdev->block_markbad(subdev, ofs); 695 + err = mtd_block_markbad(subdev, ofs); 691 696 if (!err) 692 697 mtd->ecc_stats.badblocks++; 693 698 break; ··· 720 725 if (offset + len > subdev->size) 721 726 return (unsigned long) -EINVAL; 722 727 723 - if (subdev->get_unmapped_area) 724 - return subdev->get_unmapped_area(subdev, len, offset, 725 - flags); 726 - 727 - break; 728 + return mtd_get_unmapped_area(subdev, len, offset, flags); 728 729 } 729 730 730 731 return (unsigned long) -ENOSYS;
+71 -55
drivers/mtd/mtdcore.c
··· 107 107 */ 108 108 static void mtd_release(struct device *dev) 109 109 { 110 - dev_t index = MTD_DEVT(dev_to_mtd(dev)->index); 110 + struct mtd_info *mtd = dev_get_drvdata(dev); 111 + dev_t index = MTD_DEVT(mtd->index); 111 112 112 113 /* remove /dev/mtdXro node if needed */ 113 114 if (index) ··· 117 116 118 117 static int mtd_cls_suspend(struct device *dev, pm_message_t state) 119 118 { 120 - struct mtd_info *mtd = dev_to_mtd(dev); 119 + struct mtd_info *mtd = dev_get_drvdata(dev); 121 120 122 - if (mtd && mtd->suspend) 123 - return mtd->suspend(mtd); 124 - else 125 - return 0; 121 + return mtd_suspend(mtd); 126 122 } 127 123 128 124 static int mtd_cls_resume(struct device *dev) 129 125 { 130 - struct mtd_info *mtd = dev_to_mtd(dev); 131 - 126 + struct mtd_info *mtd = dev_get_drvdata(dev); 127 + 132 128 if (mtd && mtd->resume) 133 - mtd->resume(mtd); 129 + mtd_resume(mtd); 134 130 return 0; 135 131 } 136 132 137 133 static ssize_t mtd_type_show(struct device *dev, 138 134 struct device_attribute *attr, char *buf) 139 135 { 140 - struct mtd_info *mtd = dev_to_mtd(dev); 136 + struct mtd_info *mtd = dev_get_drvdata(dev); 141 137 char *type; 142 138 143 139 switch (mtd->type) { ··· 170 172 static ssize_t mtd_flags_show(struct device *dev, 171 173 struct device_attribute *attr, char *buf) 172 174 { 173 - struct mtd_info *mtd = dev_to_mtd(dev); 175 + struct mtd_info *mtd = dev_get_drvdata(dev); 174 176 175 177 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 176 178 ··· 180 182 static ssize_t mtd_size_show(struct device *dev, 181 183 struct device_attribute *attr, char *buf) 182 184 { 183 - struct mtd_info *mtd = dev_to_mtd(dev); 185 + struct mtd_info *mtd = dev_get_drvdata(dev); 184 186 185 187 return snprintf(buf, PAGE_SIZE, "%llu\n", 186 188 (unsigned long long)mtd->size); ··· 191 193 static ssize_t mtd_erasesize_show(struct device *dev, 192 194 struct device_attribute *attr, char *buf) 193 195 { 194 - struct mtd_info *mtd = dev_to_mtd(dev); 196 + struct mtd_info *mtd = dev_get_drvdata(dev); 195 197 196 198 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 197 199 ··· 201 203 static ssize_t mtd_writesize_show(struct device *dev, 202 204 struct device_attribute *attr, char *buf) 203 205 { 204 - struct mtd_info *mtd = dev_to_mtd(dev); 206 + struct mtd_info *mtd = dev_get_drvdata(dev); 205 207 206 208 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 207 209 ··· 211 213 static ssize_t mtd_subpagesize_show(struct device *dev, 212 214 struct device_attribute *attr, char *buf) 213 215 { 214 - struct mtd_info *mtd = dev_to_mtd(dev); 216 + struct mtd_info *mtd = dev_get_drvdata(dev); 215 217 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 216 218 217 219 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); ··· 222 224 static ssize_t mtd_oobsize_show(struct device *dev, 223 225 struct device_attribute *attr, char *buf) 224 226 { 225 - struct mtd_info *mtd = dev_to_mtd(dev); 227 + struct mtd_info *mtd = dev_get_drvdata(dev); 226 228 227 229 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 228 230 ··· 232 234 static ssize_t mtd_numeraseregions_show(struct device *dev, 233 235 struct device_attribute *attr, char *buf) 234 236 { 235 - struct mtd_info *mtd = dev_to_mtd(dev); 237 + struct mtd_info *mtd = dev_get_drvdata(dev); 236 238 237 239 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 238 240 ··· 243 245 static ssize_t mtd_name_show(struct device *dev, 244 246 struct device_attribute *attr, char *buf) 245 247 { 246 - struct mtd_info *mtd = dev_to_mtd(dev); 248 + struct mtd_info *mtd = dev_get_drvdata(dev); 247 249 248 250 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 249 251 ··· 336 338 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 337 339 338 340 /* Some chips always power up locked. Unlock them now */ 339 - if ((mtd->flags & MTD_WRITEABLE) 340 - && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 341 - if (mtd->unlock(mtd, 0, mtd->size)) 341 + if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 342 + error = mtd_unlock(mtd, 0, mtd->size); 343 + if (error && error != -EOPNOTSUPP) 342 344 printk(KERN_WARNING 343 345 "%s: unlock failed, writes may not work\n", 344 346 mtd->name); ··· 514 516 * or removal of MTD devices. Causes the 'add' callback to be immediately 515 517 * invoked for each MTD device currently present in the system. 516 518 */ 517 - 518 519 void register_mtd_user (struct mtd_notifier *new) 519 520 { 520 521 struct mtd_info *mtd; ··· 529 532 530 533 mutex_unlock(&mtd_table_mutex); 531 534 } 535 + EXPORT_SYMBOL_GPL(register_mtd_user); 532 536 533 537 /** 534 538 * unregister_mtd_user - unregister a 'user' of MTD devices. ··· 540 542 * 'remove' callback to be immediately invoked for each MTD device 541 543 * currently present in the system. 542 544 */ 543 - 544 545 int unregister_mtd_user (struct mtd_notifier *old) 545 546 { 546 547 struct mtd_info *mtd; ··· 555 558 mutex_unlock(&mtd_table_mutex); 556 559 return 0; 557 560 } 558 - 561 + EXPORT_SYMBOL_GPL(unregister_mtd_user); 559 562 560 563 /** 561 564 * get_mtd_device - obtain a validated handle for an MTD device ··· 568 571 * both, return the num'th driver only if its address matches. Return 569 572 * error code if not. 570 573 */ 571 - 572 574 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 573 575 { 574 576 struct mtd_info *ret = NULL, *other; ··· 600 604 mutex_unlock(&mtd_table_mutex); 601 605 return ret; 602 606 } 607 + EXPORT_SYMBOL_GPL(get_mtd_device); 603 608 604 609 605 610 int __get_mtd_device(struct mtd_info *mtd) ··· 621 624 mtd->usecount++; 622 625 return 0; 623 626 } 627 + EXPORT_SYMBOL_GPL(__get_mtd_device); 624 628 625 629 /** 626 630 * get_mtd_device_nm - obtain a validated handle for an MTD device by ··· 631 633 * This function returns MTD device description structure in case of 632 634 * success and an error code in case of failure. 633 635 */ 634 - 635 636 struct mtd_info *get_mtd_device_nm(const char *name) 636 637 { 637 638 int err = -ENODEV; ··· 659 662 mutex_unlock(&mtd_table_mutex); 660 663 return ERR_PTR(err); 661 664 } 665 + EXPORT_SYMBOL_GPL(get_mtd_device_nm); 662 666 663 667 void put_mtd_device(struct mtd_info *mtd) 664 668 { ··· 668 670 mutex_unlock(&mtd_table_mutex); 669 671 670 672 } 673 + EXPORT_SYMBOL_GPL(put_mtd_device); 671 674 672 675 void __put_mtd_device(struct mtd_info *mtd) 673 676 { ··· 680 681 681 682 module_put(mtd->owner); 682 683 } 684 + EXPORT_SYMBOL_GPL(__put_mtd_device); 683 685 684 - /* default_mtd_writev - default mtd writev method for MTD devices that 685 - * don't implement their own 686 + /* 687 + * default_mtd_writev - the default writev method 688 + * @mtd: mtd device description object pointer 689 + * @vecs: the vectors to write 690 + * @count: count of vectors in @vecs 691 + * @to: the MTD device offset to write to 692 + * @retlen: on exit contains the count of bytes written to the MTD device. 693 + * 694 + * This function returns zero in case of success and a negative error code in 695 + * case of failure. 686 696 */ 687 - 688 - int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 689 - unsigned long count, loff_t to, size_t *retlen) 697 + static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 698 + unsigned long count, loff_t to, size_t *retlen) 690 699 { 691 700 unsigned long i; 692 701 size_t totlen = 0, thislen; 693 702 int ret = 0; 694 703 695 - if(!mtd->write) { 696 - ret = -EROFS; 697 - } else { 698 - for (i=0; i<count; i++) { 699 - if (!vecs[i].iov_len) 700 - continue; 701 - ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base); 702 - totlen += thislen; 703 - if (ret || thislen != vecs[i].iov_len) 704 - break; 705 - to += vecs[i].iov_len; 706 - } 704 + for (i = 0; i < count; i++) { 705 + if (!vecs[i].iov_len) 706 + continue; 707 + ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 708 + vecs[i].iov_base); 709 + totlen += thislen; 710 + if (ret || thislen != vecs[i].iov_len) 711 + break; 712 + to += vecs[i].iov_len; 707 713 } 708 - if (retlen) 709 - *retlen = totlen; 714 + *retlen = totlen; 710 715 return ret; 711 716 } 712 717 718 + /* 719 + * mtd_writev - the vector-based MTD write method 720 + * @mtd: mtd device description object pointer 721 + * @vecs: the vectors to write 722 + * @count: count of vectors in @vecs 723 + * @to: the MTD device offset to write to 724 + * @retlen: on exit contains the count of bytes written to the MTD device. 725 + * 726 + * This function returns zero in case of success and a negative error code in 727 + * case of failure. 728 + */ 729 + int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 730 + unsigned long count, loff_t to, size_t *retlen) 731 + { 732 + *retlen = 0; 733 + if (!mtd->writev) 734 + return default_mtd_writev(mtd, vecs, count, to, retlen); 735 + return mtd->writev(mtd, vecs, count, to, retlen); 736 + } 737 + EXPORT_SYMBOL_GPL(mtd_writev); 738 + 713 739 /** 714 740 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 715 - * @size: A pointer to the ideal or maximum size of the allocation. Points 741 + * @mtd: mtd device description object pointer 742 + * @size: a pointer to the ideal or maximum size of the allocation, points 716 743 * to the actual allocation size on success. 717 744 * 718 745 * This routine attempts to allocate a contiguous kernel buffer up to ··· 783 758 */ 784 759 return kmalloc(*size, GFP_KERNEL); 785 760 } 786 - 787 - EXPORT_SYMBOL_GPL(get_mtd_device); 788 - EXPORT_SYMBOL_GPL(get_mtd_device_nm); 789 - EXPORT_SYMBOL_GPL(__get_mtd_device); 790 - EXPORT_SYMBOL_GPL(put_mtd_device); 791 - EXPORT_SYMBOL_GPL(__put_mtd_device); 792 - EXPORT_SYMBOL_GPL(register_mtd_user); 793 - EXPORT_SYMBOL_GPL(unregister_mtd_user); 794 - EXPORT_SYMBOL_GPL(default_mtd_writev); 795 761 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 796 762 797 763 #ifdef CONFIG_PROC_FS
+23 -21
drivers/mtd/mtdoops.c
··· 112 112 set_current_state(TASK_INTERRUPTIBLE); 113 113 add_wait_queue(&wait_q, &wait); 114 114 115 - ret = mtd->erase(mtd, &erase); 115 + ret = mtd_erase(mtd, &erase); 116 116 if (ret) { 117 117 set_current_state(TASK_RUNNING); 118 118 remove_wait_queue(&wait_q, &wait); ··· 169 169 cxt->nextpage = 0; 170 170 } 171 171 172 - while (mtd->block_isbad) { 173 - ret = mtd->block_isbad(mtd, cxt->nextpage * record_size); 172 + while (mtd_can_have_bb(mtd)) { 173 + ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); 174 174 if (!ret) 175 175 break; 176 176 if (ret < 0) { ··· 199 199 return; 200 200 } 201 201 202 - if (mtd->block_markbad && ret == -EIO) { 203 - ret = mtd->block_markbad(mtd, cxt->nextpage * record_size); 202 + if (mtd_can_have_bb(mtd) && ret == -EIO) { 203 + ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); 204 204 if (ret < 0) { 205 205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); 206 206 return; ··· 221 221 hdr[0] = cxt->nextcount; 222 222 hdr[1] = MTDOOPS_KERNMSG_MAGIC; 223 223 224 - if (panic) 225 - ret = mtd->panic_write(mtd, cxt->nextpage * record_size, 226 - record_size, &retlen, cxt->oops_buf); 227 - else 228 - ret = mtd->write(mtd, cxt->nextpage * record_size, 229 - record_size, &retlen, cxt->oops_buf); 224 + if (panic) { 225 + ret = mtd_panic_write(mtd, cxt->nextpage * record_size, 226 + record_size, &retlen, cxt->oops_buf); 227 + if (ret == -EOPNOTSUPP) { 228 + printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); 229 + return; 230 + } 231 + } else 232 + ret = mtd_write(mtd, cxt->nextpage * record_size, 233 + record_size, &retlen, cxt->oops_buf); 230 234 231 235 if (retlen != record_size || ret < 0) 232 236 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", ··· 257 253 size_t retlen; 258 254 259 255 for (page = 0; page < cxt->oops_pages; page++) { 256 + if (mtd_can_have_bb(mtd) && 257 + mtd_block_isbad(mtd, page * record_size)) 258 + continue; 260 259 /* Assume the page is used */ 261 260 mark_page_used(cxt, page); 262 - ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, 263 - &retlen, (u_char *) &count[0]); 261 + ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, 262 + &retlen, (u_char *)&count[0]); 264 263 if (retlen != MTDOOPS_HEADER_SIZE || 265 264 (ret < 0 && !mtd_is_bitflip(ret))) { 266 265 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", ··· 334 327 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); 335 328 336 329 /* Panics must be written immediately */ 337 - if (reason != KMSG_DUMP_OOPS) { 338 - if (!cxt->mtd->panic_write) 339 - printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); 340 - else 341 - mtdoops_write(cxt, 1); 342 - return; 343 - } 330 + if (reason != KMSG_DUMP_OOPS) 331 + mtdoops_write(cxt, 1); 344 332 345 333 /* For other cases, schedule work to write it "nicely" */ 346 334 schedule_work(&cxt->work_write); ··· 371 369 372 370 /* oops_page_used is a bit field */ 373 371 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, 374 - BITS_PER_LONG)); 372 + BITS_PER_LONG) * sizeof(unsigned long)); 375 373 if (!cxt->oops_page_used) { 376 374 printk(KERN_ERR "mtdoops: could not allocate page array\n"); 377 375 return;
+28 -35
drivers/mtd/mtdpart.c
··· 70 70 len = 0; 71 71 else if (from + len > mtd->size) 72 72 len = mtd->size - from; 73 - res = part->master->read(part->master, from + part->offset, 74 - len, retlen, buf); 73 + res = mtd_read(part->master, from + part->offset, len, retlen, buf); 75 74 if (unlikely(res)) { 76 75 if (mtd_is_bitflip(res)) 77 76 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; ··· 88 89 len = 0; 89 90 else if (from + len > mtd->size) 90 91 len = mtd->size - from; 91 - return part->master->point (part->master, from + part->offset, 92 - len, retlen, virt, phys); 92 + return mtd_point(part->master, from + part->offset, len, retlen, 93 + virt, phys); 93 94 } 94 95 95 96 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 96 97 { 97 98 struct mtd_part *part = PART(mtd); 98 99 99 - part->master->unpoint(part->master, from + part->offset, len); 100 + mtd_unpoint(part->master, from + part->offset, len); 100 101 } 101 102 102 103 static unsigned long part_get_unmapped_area(struct mtd_info *mtd, ··· 107 108 struct mtd_part *part = PART(mtd); 108 109 109 110 offset += part->offset; 110 - return part->master->get_unmapped_area(part->master, len, offset, 111 - flags); 111 + return mtd_get_unmapped_area(part->master, len, offset, flags); 112 112 } 113 113 114 114 static int part_read_oob(struct mtd_info *mtd, loff_t from, ··· 138 140 return -EINVAL; 139 141 } 140 142 141 - res = part->master->read_oob(part->master, from + part->offset, ops); 143 + res = mtd_read_oob(part->master, from + part->offset, ops); 142 144 if (unlikely(res)) { 143 145 if (mtd_is_bitflip(res)) 144 146 mtd->ecc_stats.corrected++; ··· 152 154 size_t len, size_t *retlen, u_char *buf) 153 155 { 154 156 struct mtd_part *part = PART(mtd); 155 - return part->master->read_user_prot_reg(part->master, from, 156 - len, retlen, buf); 157 + return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); 157 158 } 158 159 159 160 static int part_get_user_prot_info(struct mtd_info *mtd, 160 161 struct otp_info *buf, size_t len) 161 162 { 162 163 struct mtd_part *part = PART(mtd); 163 - return part->master->get_user_prot_info(part->master, buf, len); 164 + return mtd_get_user_prot_info(part->master, buf, len); 164 165 } 165 166 166 167 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 167 168 size_t len, size_t *retlen, u_char *buf) 168 169 { 169 170 struct mtd_part *part = PART(mtd); 170 - return part->master->read_fact_prot_reg(part->master, from, 171 - len, retlen, buf); 171 + return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); 172 172 } 173 173 174 174 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 175 175 size_t len) 176 176 { 177 177 struct mtd_part *part = PART(mtd); 178 - return part->master->get_fact_prot_info(part->master, buf, len); 178 + return mtd_get_fact_prot_info(part->master, buf, len); 179 179 } 180 180 181 181 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, ··· 186 190 len = 0; 187 191 else if (to + len > mtd->size) 188 192 len = mtd->size - to; 189 - return part->master->write(part->master, to + part->offset, 190 - len, retlen, buf); 193 + return mtd_write(part->master, to + part->offset, len, retlen, buf); 191 194 } 192 195 193 196 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, ··· 199 204 len = 0; 200 205 else if (to + len > mtd->size) 201 206 len = mtd->size - to; 202 - return part->master->panic_write(part->master, to + part->offset, 203 - len, retlen, buf); 207 + return mtd_panic_write(part->master, to + part->offset, len, retlen, 208 + buf); 204 209 } 205 210 206 211 static int part_write_oob(struct mtd_info *mtd, loff_t to, ··· 215 220 return -EINVAL; 216 221 if (ops->datbuf && to + ops->len > mtd->size) 217 222 return -EINVAL; 218 - return part->master->write_oob(part->master, to + part->offset, ops); 223 + return mtd_write_oob(part->master, to + part->offset, ops); 219 224 } 220 225 221 226 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 222 227 size_t len, size_t *retlen, u_char *buf) 223 228 { 224 229 struct mtd_part *part = PART(mtd); 225 - return part->master->write_user_prot_reg(part->master, from, 226 - len, retlen, buf); 230 + return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); 227 231 } 228 232 229 233 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 230 234 size_t len) 231 235 { 232 236 struct mtd_part *part = PART(mtd); 233 - return part->master->lock_user_prot_reg(part->master, from, len); 237 + return mtd_lock_user_prot_reg(part->master, from, len); 234 238 } 235 239 236 240 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, ··· 238 244 struct mtd_part *part = PART(mtd); 239 245 if (!(mtd->flags & MTD_WRITEABLE)) 240 246 return -EROFS; 241 - return part->master->writev(part->master, vecs, count, 242 - to + part->offset, retlen); 247 + return mtd_writev(part->master, vecs, count, to + part->offset, 248 + retlen); 243 249 } 244 250 245 251 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) ··· 251 257 if (instr->addr >= mtd->size) 252 258 return -EINVAL; 253 259 instr->addr += part->offset; 254 - ret = part->master->erase(part->master, instr); 260 + ret = mtd_erase(part->master, instr); 255 261 if (ret) { 256 262 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 257 263 instr->fail_addr -= part->offset; ··· 279 285 struct mtd_part *part = PART(mtd); 280 286 if ((len + ofs) > mtd->size) 281 287 return -EINVAL; 282 - return part->master->lock(part->master, ofs + part->offset, len); 288 + return mtd_lock(part->master, ofs + part->offset, len); 283 289 } 284 290 285 291 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ··· 287 293 struct mtd_part *part = PART(mtd); 288 294 if ((len + ofs) > mtd->size) 289 295 return -EINVAL; 290 - return part->master->unlock(part->master, ofs + part->offset, len); 296 + return mtd_unlock(part->master, ofs + part->offset, len); 291 297 } 292 298 293 299 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) ··· 295 301 struct mtd_part *part = PART(mtd); 296 302 if ((len + ofs) > mtd->size) 297 303 return -EINVAL; 298 - return part->master->is_locked(part->master, ofs + part->offset, len); 304 + return mtd_is_locked(part->master, ofs + part->offset, len); 299 305 } 300 306 301 307 static void part_sync(struct mtd_info *mtd) 302 308 { 303 309 struct mtd_part *part = PART(mtd); 304 - part->master->sync(part->master); 310 + mtd_sync(part->master); 305 311 } 306 312 307 313 static int part_suspend(struct mtd_info *mtd) 308 314 { 309 315 struct mtd_part *part = PART(mtd); 310 - return part->master->suspend(part->master); 316 + return mtd_suspend(part->master); 311 317 } 312 318 313 319 static void part_resume(struct mtd_info *mtd) 314 320 { 315 321 struct mtd_part *part = PART(mtd); 316 - part->master->resume(part->master); 322 + mtd_resume(part->master); 317 323 } 318 324 319 325 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) ··· 322 328 if (ofs >= mtd->size) 323 329 return -EINVAL; 324 330 ofs += part->offset; 325 - return part->master->block_isbad(part->master, ofs); 331 + return mtd_block_isbad(part->master, ofs); 326 332 } 327 333 328 334 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) ··· 335 341 if (ofs >= mtd->size) 336 342 return -EINVAL; 337 343 ofs += part->offset; 338 - res = part->master->block_markbad(part->master, ofs); 344 + res = mtd_block_markbad(part->master, ofs); 339 345 if (!res) 340 346 mtd->ecc_stats.badblocks++; 341 347 return res; ··· 553 559 uint64_t offs = 0; 554 560 555 561 while (offs < slave->mtd.size) { 556 - if (master->block_isbad(master, 557 - offs + slave->offset)) 562 + if (mtd_block_isbad(master, offs + slave->offset)) 558 563 slave->mtd.ecc_stats.badblocks++; 559 564 offs += slave->mtd.erasesize; 560 565 }
+14 -15
drivers/mtd/mtdswap.c
··· 274 274 eb->root = NULL; 275 275 276 276 /* badblocks not supported */ 277 - if (!d->mtd->block_markbad) 277 + if (!mtd_can_have_bb(d->mtd)) 278 278 return 1; 279 279 280 280 offset = mtdswap_eb_offset(d, eb); 281 281 dev_warn(d->dev, "Marking bad block at %08llx\n", offset); 282 - ret = d->mtd->block_markbad(d->mtd, offset); 282 + ret = mtd_block_markbad(d->mtd, offset); 283 283 284 284 if (ret) { 285 285 dev_warn(d->dev, "Mark block bad failed for block at %08llx " ··· 312 312 static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, 313 313 struct mtd_oob_ops *ops) 314 314 { 315 - int ret = d->mtd->read_oob(d->mtd, from, ops); 315 + int ret = mtd_read_oob(d->mtd, from, ops); 316 316 317 317 if (mtd_is_bitflip(ret)) 318 318 return ret; ··· 343 343 offset = mtdswap_eb_offset(d, eb); 344 344 345 345 /* Check first if the block is bad. */ 346 - if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset)) 346 + if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) 347 347 return MTDSWAP_SCANNED_BAD; 348 348 349 349 ops.ooblen = 2 * d->mtd->ecclayout->oobavail; ··· 403 403 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; 404 404 } 405 405 406 - ret = d->mtd->write_oob(d->mtd, offset , &ops); 406 + ret = mtd_write_oob(d->mtd, offset, &ops); 407 407 408 408 if (ret) { 409 409 dev_warn(d->dev, "Write OOB failed for block at %08llx " ··· 567 567 erase.len = mtd->erasesize; 568 568 erase.priv = (u_long)&wq; 569 569 570 - ret = mtd->erase(mtd, &erase); 570 + ret = mtd_erase(mtd, &erase); 571 571 if (ret) { 572 572 if (retries++ < MTDSWAP_ERASE_RETRIES) { 573 573 dev_warn(d->dev, ··· 689 689 return ret; 690 690 691 691 writepos = (loff_t)*bp << PAGE_SHIFT; 692 - ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); 692 + ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf); 693 693 if (ret == -EIO || mtd_is_eccerr(ret)) { 694 694 d->curr_write_pos--; 695 695 eb->active_count--; ··· 736 736 retries = 0; 737 737 738 738 retry: 739 - ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); 739 + ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); 740 740 741 741 if (ret < 0 && !mtd_is_bitflip(ret)) { 742 742 oldeb = d->eb_data + oldblock / d->pages_per_eblk; ··· 946 946 patt = mtdswap_test_patt(test + i); 947 947 memset(d->page_buf, patt, mtd->writesize); 948 948 memset(d->oob_buf, patt, mtd->ecclayout->oobavail); 949 - ret = mtd->write_oob(mtd, pos, &ops); 949 + ret = mtd_write_oob(mtd, pos, &ops); 950 950 if (ret) 951 951 goto error; 952 952 ··· 955 955 956 956 pos = base; 957 957 for (i = 0; i < mtd_pages; i++) { 958 - ret = mtd->read_oob(mtd, pos, &ops); 958 + ret = mtd_read_oob(mtd, pos, &ops); 959 959 if (ret) 960 960 goto error; 961 961 ··· 1047 1047 { 1048 1048 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); 1049 1049 1050 - if (d->mtd->sync) 1051 - d->mtd->sync(d->mtd); 1050 + mtd_sync(d->mtd); 1052 1051 return 0; 1053 1052 } 1054 1053 ··· 1058 1059 1059 1060 badcnt = 0; 1060 1061 1061 - if (mtd->block_isbad) 1062 + if (mtd_can_have_bb(mtd)) 1062 1063 for (offset = 0; offset < size; offset += mtd->erasesize) 1063 - if (mtd->block_isbad(mtd, offset)) 1064 + if (mtd_block_isbad(mtd, offset)) 1064 1065 badcnt++; 1065 1066 1066 1067 return badcnt; ··· 1160 1161 retries = 0; 1161 1162 1162 1163 retry: 1163 - ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); 1164 + ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf); 1164 1165 1165 1166 d->mtd_read_count++; 1166 1167 if (mtd_is_bitflip(ret)) {
+1 -1
drivers/mtd/nand/Kconfig
··· 110 110 111 111 config MTD_NAND_OMAP2 112 112 tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4" 113 - depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4) 113 + depends on ARCH_OMAP2PLUS 114 114 help 115 115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 116 116 platforms.
+1 -11
drivers/mtd/nand/ams-delta.c
··· 280 280 }, 281 281 }; 282 282 283 - static int __init ams_delta_nand_init(void) 284 - { 285 - return platform_driver_register(&ams_delta_nand_driver); 286 - } 287 - module_init(ams_delta_nand_init); 288 - 289 - static void __exit ams_delta_nand_exit(void) 290 - { 291 - platform_driver_unregister(&ams_delta_nand_driver); 292 - } 293 - module_exit(ams_delta_nand_exit); 283 + module_platform_driver(ams_delta_nand_driver); 294 284 295 285 MODULE_LICENSE("GPL"); 296 286 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+1 -12
drivers/mtd/nand/bcm_umi_nand.c
··· 546 546 .resume = bcm_umi_nand_resume, 547 547 }; 548 548 549 - static int __init nand_init(void) 550 - { 551 - return platform_driver_register(&nand_driver); 552 - } 553 - 554 - static void __exit nand_exit(void) 555 - { 556 - platform_driver_unregister(&nand_driver); 557 - } 558 - 559 - module_init(nand_init); 560 - module_exit(nand_exit); 549 + module_platform_driver(nand_driver); 561 550 562 551 MODULE_LICENSE("GPL"); 563 552 MODULE_AUTHOR("Broadcom");
+3 -1
drivers/mtd/nand/davinci_nand.c
··· 675 675 676 676 davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val); 677 677 678 - ret = davinci_aemif_setup_timing(info->timing, info->base, 678 + ret = 0; 679 + if (info->timing) 680 + ret = davinci_aemif_setup_timing(info->timing, info->base, 679 681 info->core_chipsel); 680 682 if (ret < 0) { 681 683 dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
+2 -2
drivers/mtd/nand/diskonchip.c
··· 1072 1072 size_t retlen; 1073 1073 1074 1074 for (offs = 0; offs < mtd->size; offs += mtd->erasesize) { 1075 - ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf); 1075 + ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf); 1076 1076 if (retlen != mtd->writesize) 1077 1077 continue; 1078 1078 if (ret) { ··· 1097 1097 /* Only one mediaheader was found. We want buf to contain a 1098 1098 mediaheader on return, so we'll have to re-read the one we found. */ 1099 1099 offs = doc->mh0_page << this->page_shift; 1100 - ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf); 1100 + ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf); 1101 1101 if (retlen != mtd->writesize) { 1102 1102 /* Insanity. Give up. */ 1103 1103 printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
+39 -36
drivers/mtd/nand/fsl_elbc_nand.c
··· 166 166 167 167 elbc_fcm_ctrl->page = page_addr; 168 168 169 - out_be32(&lbc->fbar, 170 - page_addr >> (chip->phys_erase_shift - chip->page_shift)); 171 - 172 169 if (priv->page_size) { 170 + /* 171 + * large page size chip : FPAR[PI] save the lowest 6 bits, 172 + * FBAR[BLK] save the other bits. 173 + */ 174 + out_be32(&lbc->fbar, page_addr >> 6); 173 175 out_be32(&lbc->fpar, 174 176 ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) | 175 177 (oob ? FPAR_LP_MS : 0) | column); 176 178 buf_num = (page_addr & 1) << 2; 177 179 } else { 180 + /* 181 + * small page size chip : FPAR[PI] save the lowest 5 bits, 182 + * FBAR[BLK] save the other bits. 183 + */ 184 + out_be32(&lbc->fbar, page_addr >> 5); 178 185 out_be32(&lbc->fpar, 179 186 ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) | 180 187 (oob ? FPAR_SP_MS : 0) | column); ··· 356 349 fsl_elbc_run_command(mtd); 357 350 return; 358 351 359 - /* READID must read all 5 possible bytes while CEB is active */ 360 352 case NAND_CMD_READID: 361 - dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); 353 + case NAND_CMD_PARAM: 354 + dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command); 362 355 363 356 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | 364 357 (FIR_OP_UA << FIR_OP1_SHIFT) | 365 358 (FIR_OP_RBW << FIR_OP2_SHIFT)); 366 - out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 367 - /* nand_get_flash_type() reads 8 bytes of entire ID string */ 368 - out_be32(&lbc->fbcr, 8); 369 - elbc_fcm_ctrl->read_bytes = 8; 359 + out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT); 360 + /* 361 + * although currently it's 8 bytes for READID, we always read 362 + * the maximum 256 bytes(for PARAM) 363 + */ 364 + out_be32(&lbc->fbcr, 256); 365 + elbc_fcm_ctrl->read_bytes = 256; 370 366 elbc_fcm_ctrl->use_mdr = 1; 371 - elbc_fcm_ctrl->mdr = 0; 372 - 367 + elbc_fcm_ctrl->mdr = column; 373 368 set_addr(mtd, 0, 0, 0); 374 369 fsl_elbc_run_command(mtd); 375 370 return; ··· 416 407 page_addr, column); 417 408 418 409 elbc_fcm_ctrl->column = column; 419 - elbc_fcm_ctrl->oob = 0; 420 410 elbc_fcm_ctrl->use_mdr = 1; 411 + 412 + if (column >= mtd->writesize) { 413 + /* OOB area */ 414 + column -= mtd->writesize; 415 + elbc_fcm_ctrl->oob = 1; 416 + } else { 417 + WARN_ON(column != 0); 418 + elbc_fcm_ctrl->oob = 0; 419 + } 421 420 422 421 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | 423 422 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) | ··· 451 434 (FIR_OP_CW1 << FIR_OP6_SHIFT) | 452 435 (FIR_OP_RS << FIR_OP7_SHIFT)); 453 436 454 - if (column >= mtd->writesize) { 437 + if (elbc_fcm_ctrl->oob) 455 438 /* OOB area --> READOOB */ 456 - column -= mtd->writesize; 457 439 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 458 - elbc_fcm_ctrl->oob = 1; 459 - } else { 460 - WARN_ON(column != 0); 440 + else 461 441 /* First 256 bytes --> READ0 */ 462 442 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 463 - } 464 443 } 465 444 466 445 out_be32(&lbc->fcr, fcr); ··· 476 463 */ 477 464 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || 478 465 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) 479 - out_be32(&lbc->fbcr, elbc_fcm_ctrl->index); 466 + out_be32(&lbc->fbcr, 467 + elbc_fcm_ctrl->index - elbc_fcm_ctrl->column); 480 468 else 481 469 out_be32(&lbc->fbcr, 0); 482 470 ··· 673 659 if (chip->pagemask & 0xff000000) 674 660 al++; 675 661 676 - /* add to ECCM mode set in fsl_elbc_init */ 677 - priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */ 678 - (al << FMR_AL_SHIFT); 662 + priv->fmr |= al << FMR_AL_SHIFT; 679 663 680 664 dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n", 681 665 chip->numchips); ··· 776 764 priv->mtd.priv = chip; 777 765 priv->mtd.owner = THIS_MODULE; 778 766 779 - /* Set the ECCM according to the settings in bootloader.*/ 780 - priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM; 767 + /* set timeout to maximum */ 768 + priv->fmr = 15 << FMR_CWTO_SHIFT; 769 + if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS) 770 + priv->fmr |= FMR_ECCM; 781 771 782 772 /* fill in nand_chip structure */ 783 773 /* set up function call table */ ··· 985 971 .remove = fsl_elbc_nand_remove, 986 972 }; 987 973 988 - static int __init fsl_elbc_nand_init(void) 989 - { 990 - return platform_driver_register(&fsl_elbc_nand_driver); 991 - } 992 - 993 - static void __exit fsl_elbc_nand_exit(void) 994 - { 995 - platform_driver_unregister(&fsl_elbc_nand_driver); 996 - } 997 - 998 - module_init(fsl_elbc_nand_init); 999 - module_exit(fsl_elbc_nand_exit); 974 + module_platform_driver(fsl_elbc_nand_driver); 1000 975 1001 976 MODULE_LICENSE("GPL"); 1002 977 MODULE_AUTHOR("Freescale");
+1 -11
drivers/mtd/nand/fsl_upm.c
··· 353 353 .remove = __devexit_p(fun_remove), 354 354 }; 355 355 356 - static int __init fun_module_init(void) 357 - { 358 - return platform_driver_register(&of_fun_driver); 359 - } 360 - module_init(fun_module_init); 361 - 362 - static void __exit fun_module_exit(void) 363 - { 364 - platform_driver_unregister(&of_fun_driver); 365 - } 366 - module_exit(fun_module_exit); 356 + module_platform_driver(of_fun_driver); 367 357 368 358 MODULE_LICENSE("GPL"); 369 359 MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+108 -7
drivers/mtd/nand/gpio.c
··· 27 27 #include <linux/mtd/nand.h> 28 28 #include <linux/mtd/partitions.h> 29 29 #include <linux/mtd/nand-gpio.h> 30 + #include <linux/of.h> 31 + #include <linux/of_address.h> 32 + #include <linux/of_gpio.h> 30 33 31 34 struct gpiomtd { 32 35 void __iomem *io_sync; ··· 174 171 return gpio_get_value(gpiomtd->plat.gpio_rdy); 175 172 } 176 173 174 + #ifdef CONFIG_OF 175 + static const struct of_device_id gpio_nand_id_table[] = { 176 + { .compatible = "gpio-control-nand" }, 177 + {} 178 + }; 179 + MODULE_DEVICE_TABLE(of, gpio_nand_id_table); 180 + 181 + static int gpio_nand_get_config_of(const struct device *dev, 182 + struct gpio_nand_platdata *plat) 183 + { 184 + u32 val; 185 + 186 + if (!of_property_read_u32(dev->of_node, "bank-width", &val)) { 187 + if (val == 2) { 188 + plat->options |= NAND_BUSWIDTH_16; 189 + } else if (val != 1) { 190 + dev_err(dev, "invalid bank-width %u\n", val); 191 + return -EINVAL; 192 + } 193 + } 194 + 195 + plat->gpio_rdy = of_get_gpio(dev->of_node, 0); 196 + plat->gpio_nce = of_get_gpio(dev->of_node, 1); 197 + plat->gpio_ale = of_get_gpio(dev->of_node, 2); 198 + plat->gpio_cle = of_get_gpio(dev->of_node, 3); 199 + plat->gpio_nwp = of_get_gpio(dev->of_node, 4); 200 + 201 + if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) 202 + plat->chip_delay = val; 203 + 204 + return 0; 205 + } 206 + 207 + static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev) 208 + { 209 + struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL); 210 + u64 addr; 211 + 212 + if (!r || of_property_read_u64(pdev->dev.of_node, 213 + "gpio-control-nand,io-sync-reg", &addr)) 214 + return NULL; 215 + 216 + r->start = addr; 217 + r->end = r->start + 0x3; 218 + r->flags = IORESOURCE_MEM; 219 + 220 + return r; 221 + } 222 + #else /* CONFIG_OF */ 223 + #define gpio_nand_id_table NULL 224 + static inline int gpio_nand_get_config_of(const struct device *dev, 225 + struct gpio_nand_platdata *plat) 226 + { 227 + return -ENOSYS; 228 + } 229 + 230 + static inline struct resource * 231 + gpio_nand_get_io_sync_of(struct platform_device *pdev) 232 + { 233 + return NULL; 234 + } 235 + #endif /* CONFIG_OF */ 236 + 237 + static inline int gpio_nand_get_config(const struct device *dev, 238 + struct gpio_nand_platdata *plat) 239 + { 240 + int ret = gpio_nand_get_config_of(dev, plat); 241 + 242 + if (!ret) 243 + return ret; 244 + 245 + if (dev->platform_data) { 246 + memcpy(plat, dev->platform_data, sizeof(*plat)); 247 + return 0; 248 + } 249 + 250 + return -EINVAL; 251 + } 252 + 253 + static inline struct resource * 254 + gpio_nand_get_io_sync(struct platform_device *pdev) 255 + { 256 + struct resource *r = gpio_nand_get_io_sync_of(pdev); 257 + 258 + if (r) 259 + return r; 260 + 261 + return platform_get_resource(pdev, IORESOURCE_MEM, 1); 262 + } 263 + 177 264 static int __devexit gpio_nand_remove(struct platform_device *dev) 178 265 { 179 266 struct gpiomtd *gpiomtd = platform_get_drvdata(dev); ··· 271 178 272 179 nand_release(&gpiomtd->mtd_info); 273 180 274 - res = platform_get_resource(dev, IORESOURCE_MEM, 1); 181 + res = gpio_nand_get_io_sync(dev); 275 182 iounmap(gpiomtd->io_sync); 276 183 if (res) 277 184 release_mem_region(res->start, resource_size(res)); ··· 319 226 struct gpiomtd *gpiomtd; 320 227 struct nand_chip *this; 321 228 struct resource *res0, *res1; 322 - int ret; 229 + struct mtd_part_parser_data ppdata = {}; 230 + int ret = 0; 323 231 324 - if (!dev->dev.platform_data) 232 + if (!dev->dev.of_node && !dev->dev.platform_data) 325 233 return -EINVAL; 326 234 327 235 res0 = platform_get_resource(dev, IORESOURCE_MEM, 0); ··· 342 248 goto err_map; 343 249 } 344 250 345 - res1 = platform_get_resource(dev, IORESOURCE_MEM, 1); 251 + res1 = gpio_nand_get_io_sync(dev); 346 252 if (res1) { 347 253 gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret); 348 254 if (!gpiomtd->io_sync) { ··· 351 257 } 352 258 } 353 259 354 - memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat)); 260 + ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat); 261 + if (ret) 262 + goto err_nce; 355 263 356 264 ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE"); 357 265 if (ret) ··· 412 316 gpiomtd->plat.adjust_parts(&gpiomtd->plat, 413 317 gpiomtd->mtd_info.size); 414 318 415 - mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts, 416 - gpiomtd->plat.num_parts); 319 + ppdata.of_node = dev->dev.of_node; 320 + ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata, 321 + gpiomtd->plat.parts, 322 + gpiomtd->plat.num_parts); 323 + if (ret) 324 + goto err_wp; 417 325 platform_set_drvdata(dev, gpiomtd); 418 326 419 327 return 0; ··· 452 352 .remove = gpio_nand_remove, 453 353 .driver = { 454 354 .name = "gpio-nand", 355 + .of_match_table = gpio_nand_id_table, 455 356 }, 456 357 }; 457 358
+1 -11
drivers/mtd/nand/jz4740_nand.c
··· 423 423 }, 424 424 }; 425 425 426 - static int __init jz_nand_init(void) 427 - { 428 - return platform_driver_register(&jz_nand_driver); 429 - } 430 - module_init(jz_nand_init); 431 - 432 - static void __exit jz_nand_exit(void) 433 - { 434 - platform_driver_unregister(&jz_nand_driver); 435 - } 436 - module_exit(jz_nand_exit); 426 + module_platform_driver(jz_nand_driver); 437 427 438 428 MODULE_LICENSE("GPL"); 439 429 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+1 -13
drivers/mtd/nand/mpc5121_nfc.c
··· 879 879 }, 880 880 }; 881 881 882 - static int __init mpc5121_nfc_init(void) 883 - { 884 - return platform_driver_register(&mpc5121_nfc_driver); 885 - } 886 - 887 - module_init(mpc5121_nfc_init); 888 - 889 - static void __exit mpc5121_nfc_cleanup(void) 890 - { 891 - platform_driver_unregister(&mpc5121_nfc_driver); 892 - } 893 - 894 - module_exit(mpc5121_nfc_cleanup); 882 + module_platform_driver(mpc5121_nfc_driver); 895 883 896 884 MODULE_AUTHOR("Freescale Semiconductor, Inc."); 897 885 MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+4 -3
drivers/mtd/nand/nand_base.c
··· 3132 3132 * Bad block marker is stored in the last page of each block 3133 3133 * on Samsung and Hynix MLC devices; stored in first two pages 3134 3134 * of each block on Micron devices with 2KiB pages and on 3135 - * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan 3136 - * only the first page. 3135 + * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix. 3136 + * All others scan only the first page. 3137 3137 */ 3138 3138 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3139 3139 (*maf_id == NAND_MFR_SAMSUNG || ··· 3143 3143 (*maf_id == NAND_MFR_SAMSUNG || 3144 3144 *maf_id == NAND_MFR_HYNIX || 3145 3145 *maf_id == NAND_MFR_TOSHIBA || 3146 - *maf_id == NAND_MFR_AMD)) || 3146 + *maf_id == NAND_MFR_AMD || 3147 + *maf_id == NAND_MFR_MACRONIX)) || 3147 3148 (mtd->writesize == 2048 && 3148 3149 *maf_id == NAND_MFR_MICRON)) 3149 3150 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+7 -7
drivers/mtd/nand/nand_bbt.c
··· 201 201 from += marker_len; 202 202 marker_len = 0; 203 203 } 204 - res = mtd->read(mtd, from, len, &retlen, buf); 204 + res = mtd_read(mtd, from, len, &retlen, buf); 205 205 if (res < 0) { 206 206 if (mtd_is_eccerr(res)) { 207 207 pr_info("nand_bbt: ECC error in BBT at " ··· 298 298 if (td->options & NAND_BBT_VERSION) 299 299 len++; 300 300 301 - return mtd->read(mtd, offs, len, &retlen, buf); 301 + return mtd_read(mtd, offs, len, &retlen, buf); 302 302 } 303 303 304 304 /* Scan read raw data from flash */ ··· 317 317 ops.len = min(len, (size_t)mtd->writesize); 318 318 ops.oobbuf = buf + ops.len; 319 319 320 - res = mtd->read_oob(mtd, offs, &ops); 320 + res = mtd_read_oob(mtd, offs, &ops); 321 321 322 322 if (res) 323 323 return res; ··· 350 350 ops.oobbuf = oob; 351 351 ops.len = len; 352 352 353 - return mtd->write_oob(mtd, offs, &ops); 353 + return mtd_write_oob(mtd, offs, &ops); 354 354 } 355 355 356 356 static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td) ··· 434 434 * Read the full oob until read_oob is fixed to handle single 435 435 * byte reads for 16 bit buswidth. 436 436 */ 437 - ret = mtd->read_oob(mtd, offs, &ops); 437 + ret = mtd_read_oob(mtd, offs, &ops); 438 438 /* Ignore ECC errors when checking for BBM */ 439 439 if (ret && !mtd_is_bitflip_or_eccerr(ret)) 440 440 return ret; ··· 756 756 /* Make it block aligned */ 757 757 to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1)); 758 758 len = 1 << this->bbt_erase_shift; 759 - res = mtd->read(mtd, to, len, &retlen, buf); 759 + res = mtd_read(mtd, to, len, &retlen, buf); 760 760 if (res < 0) { 761 761 if (retlen != len) { 762 762 pr_info("nand_bbt: error reading block " ··· 769 769 /* Read oob data */ 770 770 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; 771 771 ops.oobbuf = &buf[len]; 772 - res = mtd->read_oob(mtd, to + mtd->writesize, &ops); 772 + res = mtd_read_oob(mtd, to + mtd->writesize, &ops); 773 773 if (res < 0 || ops.oobretlen != ops.ooblen) 774 774 goto outerr; 775 775
+3 -1
drivers/mtd/nand/nand_ids.c
··· 73 73 #define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR) 74 74 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) 75 75 76 - /*512 Megabit */ 76 + /* 512 Megabit */ 77 77 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, 78 78 {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS}, 79 79 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, 80 80 {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS}, 81 + {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS}, 81 82 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, 82 83 {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16}, 83 84 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, ··· 177 176 {NAND_MFR_HYNIX, "Hynix"}, 178 177 {NAND_MFR_MICRON, "Micron"}, 179 178 {NAND_MFR_AMD, "AMD"}, 179 + {NAND_MFR_MACRONIX, "Macronix"}, 180 180 {0x0, "Unknown"} 181 181 }; 182 182
+1 -1
drivers/mtd/nand/nandsim.c
··· 737 737 return -EINVAL; 738 738 } 739 739 offset = erase_block_no * ns->geom.secsz; 740 - if (mtd->block_markbad(mtd, offset)) { 740 + if (mtd_block_markbad(mtd, offset)) { 741 741 NS_ERR("invalid badblocks.\n"); 742 742 return -EINVAL; 743 743 }
+1 -12
drivers/mtd/nand/ndfc.c
··· 294 294 .remove = __devexit_p(ndfc_remove), 295 295 }; 296 296 297 - static int __init ndfc_nand_init(void) 298 - { 299 - return platform_driver_register(&ndfc_driver); 300 - } 301 - 302 - static void __exit ndfc_nand_exit(void) 303 - { 304 - platform_driver_unregister(&ndfc_driver); 305 - } 306 - 307 - module_init(ndfc_nand_init); 308 - module_exit(ndfc_nand_exit); 297 + module_platform_driver(ndfc_driver); 309 298 310 299 MODULE_LICENSE("GPL"); 311 300 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+3 -15
drivers/mtd/nand/nomadik_nand.c
··· 201 201 struct nomadik_nand_host *host = dev_get_drvdata(dev); 202 202 int ret = 0; 203 203 if (host) 204 - ret = host->mtd.suspend(&host->mtd); 204 + ret = mtd_suspend(&host->mtd); 205 205 return ret; 206 206 } 207 207 ··· 209 209 { 210 210 struct nomadik_nand_host *host = dev_get_drvdata(dev); 211 211 if (host) 212 - host->mtd.resume(&host->mtd); 212 + mtd_resume(&host->mtd); 213 213 return 0; 214 214 } 215 215 ··· 228 228 }, 229 229 }; 230 230 231 - static int __init nand_nomadik_init(void) 232 - { 233 - pr_info("Nomadik NAND driver\n"); 234 - return platform_driver_register(&nomadik_nand_driver); 235 - } 236 - 237 - static void __exit nand_nomadik_exit(void) 238 - { 239 - platform_driver_unregister(&nomadik_nand_driver); 240 - } 241 - 242 - module_init(nand_nomadik_init); 243 - module_exit(nand_nomadik_exit); 231 + module_platform_driver(nomadik_nand_driver); 244 232 245 233 MODULE_LICENSE("GPL"); 246 234 MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
+1 -12
drivers/mtd/nand/nuc900_nand.c
··· 364 364 }, 365 365 }; 366 366 367 - static int __init nuc900_nand_init(void) 368 - { 369 - return platform_driver_register(&nuc900_nand_driver); 370 - } 371 - 372 - static void __exit nuc900_nand_exit(void) 373 - { 374 - platform_driver_unregister(&nuc900_nand_driver); 375 - } 376 - 377 - module_init(nuc900_nand_init); 378 - module_exit(nuc900_nand_exit); 367 + module_platform_driver(nuc900_nand_driver); 379 368 380 369 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 381 370 MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
+1 -14
drivers/mtd/nand/omap2.c
··· 1145 1145 }, 1146 1146 }; 1147 1147 1148 - static int __init omap_nand_init(void) 1149 - { 1150 - pr_info("%s driver initializing\n", DRIVER_NAME); 1151 - 1152 - return platform_driver_register(&omap_nand_driver); 1153 - } 1154 - 1155 - static void __exit omap_nand_exit(void) 1156 - { 1157 - platform_driver_unregister(&omap_nand_driver); 1158 - } 1159 - 1160 - module_init(omap_nand_init); 1161 - module_exit(omap_nand_exit); 1148 + module_platform_driver(omap_nand_driver); 1162 1149 1163 1150 MODULE_ALIAS("platform:" DRIVER_NAME); 1164 1151 MODULE_LICENSE("GPL");
+1 -11
drivers/mtd/nand/pasemi_nand.c
··· 230 230 .remove = pasemi_nand_remove, 231 231 }; 232 232 233 - static int __init pasemi_nand_init(void) 234 - { 235 - return platform_driver_register(&pasemi_nand_driver); 236 - } 237 - module_init(pasemi_nand_init); 238 - 239 - static void __exit pasemi_nand_exit(void) 240 - { 241 - platform_driver_unregister(&pasemi_nand_driver); 242 - } 243 - module_exit(pasemi_nand_exit); 233 + module_platform_driver(pasemi_nand_driver); 244 234 245 235 MODULE_LICENSE("GPL"); 246 236 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+1 -12
drivers/mtd/nand/plat_nand.c
··· 148 148 }, 149 149 }; 150 150 151 - static int __init plat_nand_init(void) 152 - { 153 - return platform_driver_register(&plat_nand_driver); 154 - } 155 - 156 - static void __exit plat_nand_exit(void) 157 - { 158 - platform_driver_unregister(&plat_nand_driver); 159 - } 160 - 161 - module_init(plat_nand_init); 162 - module_exit(plat_nand_exit); 151 + module_platform_driver(plat_nand_driver); 163 152 164 153 MODULE_LICENSE("GPL"); 165 154 MODULE_AUTHOR("Vitaly Wool");
+3 -13
drivers/mtd/nand/pxa3xx_nand.c
··· 1258 1258 1259 1259 for (cs = 0; cs < pdata->num_cs; cs++) { 1260 1260 mtd = info->host[cs]->mtd; 1261 - mtd->suspend(mtd); 1261 + mtd_suspend(mtd); 1262 1262 } 1263 1263 1264 1264 return 0; ··· 1291 1291 nand_writel(info, NDSR, NDSR_MASK); 1292 1292 for (cs = 0; cs < pdata->num_cs; cs++) { 1293 1293 mtd = info->host[cs]->mtd; 1294 - mtd->resume(mtd); 1294 + mtd_resume(mtd); 1295 1295 } 1296 1296 1297 1297 return 0; ··· 1311 1311 .resume = pxa3xx_nand_resume, 1312 1312 }; 1313 1313 1314 - static int __init pxa3xx_nand_init(void) 1315 - { 1316 - return platform_driver_register(&pxa3xx_nand_driver); 1317 - } 1318 - module_init(pxa3xx_nand_init); 1319 - 1320 - static void __exit pxa3xx_nand_exit(void) 1321 - { 1322 - platform_driver_unregister(&pxa3xx_nand_driver); 1323 - } 1324 - module_exit(pxa3xx_nand_exit); 1314 + module_platform_driver(pxa3xx_nand_driver); 1325 1315 1326 1316 MODULE_LICENSE("GPL"); 1327 1317 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
+1 -11
drivers/mtd/nand/sharpsl.c
··· 230 230 .remove = __devexit_p(sharpsl_nand_remove), 231 231 }; 232 232 233 - static int __init sharpsl_nand_init(void) 234 - { 235 - return platform_driver_register(&sharpsl_nand_driver); 236 - } 237 - module_init(sharpsl_nand_init); 238 - 239 - static void __exit sharpsl_nand_exit(void) 240 - { 241 - platform_driver_unregister(&sharpsl_nand_driver); 242 - } 243 - module_exit(sharpsl_nand_exit); 233 + module_platform_driver(sharpsl_nand_driver); 244 234 245 235 MODULE_LICENSE("GPL"); 246 236 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+1 -1
drivers/mtd/nand/sm_common.c
··· 55 55 ops.datbuf = NULL; 56 56 57 57 58 - ret = mtd->write_oob(mtd, ofs, &ops); 58 + ret = mtd_write_oob(mtd, ofs, &ops); 59 59 if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { 60 60 printk(KERN_NOTICE 61 61 "sm_common: can't mark sector at %i as bad\n",
+1 -12
drivers/mtd/nand/socrates_nand.c
··· 273 273 .remove = __devexit_p(socrates_nand_remove), 274 274 }; 275 275 276 - static int __init socrates_nand_init(void) 277 - { 278 - return platform_driver_register(&socrates_nand_driver); 279 - } 280 - 281 - static void __exit socrates_nand_exit(void) 282 - { 283 - platform_driver_unregister(&socrates_nand_driver); 284 - } 285 - 286 - module_init(socrates_nand_init); 287 - module_exit(socrates_nand_exit); 276 + module_platform_driver(socrates_nand_driver); 288 277 289 278 MODULE_LICENSE("GPL"); 290 279 MODULE_AUTHOR("Ilya Yanok");
+1 -12
drivers/mtd/nand/tmio_nand.c
··· 533 533 .resume = tmio_resume, 534 534 }; 535 535 536 - static int __init tmio_init(void) 537 - { 538 - return platform_driver_register(&tmio_driver); 539 - } 540 - 541 - static void __exit tmio_exit(void) 542 - { 543 - platform_driver_unregister(&tmio_driver); 544 - } 545 - 546 - module_init(tmio_init); 547 - module_exit(tmio_exit); 536 + module_platform_driver(tmio_driver); 548 537 549 538 MODULE_LICENSE("GPL v2"); 550 539 MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
+1 -5
drivers/mtd/nand/txx9ndfmc.c
··· 298 298 drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL); 299 299 if (!drvdata) 300 300 return -ENOMEM; 301 - if (!devm_request_mem_region(&dev->dev, res->start, 302 - resource_size(res), dev_name(&dev->dev))) 303 - return -EBUSY; 304 - drvdata->base = devm_ioremap(&dev->dev, res->start, 305 - resource_size(res)); 301 + drvdata->base = devm_request_and_ioremap(&dev->dev, res); 306 302 if (!drvdata->base) 307 303 return -EBUSY; 308 304
+15 -10
drivers/mtd/nftlcore.c
··· 56 56 if (memcmp(mtd->name, "DiskOnChip", 10)) 57 57 return; 58 58 59 - if (!mtd->block_isbad) { 59 + if (!mtd_can_have_bb(mtd)) { 60 60 printk(KERN_ERR 61 61 "NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" 62 62 "Please use the new diskonchip driver under the NAND subsystem.\n"); ··· 153 153 ops.oobbuf = buf; 154 154 ops.datbuf = NULL; 155 155 156 - res = mtd->read_oob(mtd, offs & ~mask, &ops); 156 + res = mtd_read_oob(mtd, offs & ~mask, &ops); 157 157 *retlen = ops.oobretlen; 158 158 return res; 159 159 } ··· 174 174 ops.oobbuf = buf; 175 175 ops.datbuf = NULL; 176 176 177 - res = mtd->write_oob(mtd, offs & ~mask, &ops); 177 + res = mtd_write_oob(mtd, offs & ~mask, &ops); 178 178 *retlen = ops.oobretlen; 179 179 return res; 180 180 } ··· 198 198 ops.datbuf = buf; 199 199 ops.len = len; 200 200 201 - res = mtd->write_oob(mtd, offs & ~mask, &ops); 201 + res = mtd_write_oob(mtd, offs & ~mask, &ops); 202 202 *retlen = ops.retlen; 203 203 return res; 204 204 } ··· 423 423 if (BlockMap[block] == BLOCK_NIL) 424 424 continue; 425 425 426 - ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 427 - 512, &retlen, movebuf); 426 + ret = mtd_read(mtd, 427 + (nftl->EraseSize * BlockMap[block]) + (block * 512), 428 + 512, 429 + &retlen, 430 + movebuf); 428 431 if (ret < 0 && !mtd_is_bitflip(ret)) { 429 - ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) 430 - + (block * 512), 512, &retlen, 431 - movebuf); 432 + ret = mtd_read(mtd, 433 + (nftl->EraseSize * BlockMap[block]) + (block * 512), 434 + 512, 435 + &retlen, 436 + movebuf); 432 437 if (ret != -EIO) 433 438 printk("Error went away on retry.\n"); 434 439 } ··· 776 771 } else { 777 772 loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs; 778 773 size_t retlen; 779 - int res = mtd->read(mtd, ptr, 512, &retlen, buffer); 774 + int res = mtd_read(mtd, ptr, 512, &retlen, buffer); 780 775 781 776 if (res < 0 && !mtd_is_bitflip(res)) 782 777 return -EIO;
+7 -6
drivers/mtd/nftlmount.c
··· 63 63 64 64 /* Check for ANAND header first. Then can whinge if it's found but later 65 65 checks fail */ 66 - ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE, 67 - &retlen, buf); 66 + ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE, 67 + &retlen, buf); 68 68 /* We ignore ret in case the ECC of the MediaHeader is invalid 69 69 (which is apparently acceptable) */ 70 70 if (retlen != SECTORSIZE) { ··· 242 242 if (buf[i & (SECTORSIZE - 1)] != 0xff) 243 243 nftl->ReplUnitTable[i] = BLOCK_RESERVED; 244 244 #endif 245 - if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize)) 245 + if (mtd_block_isbad(nftl->mbd.mtd, 246 + i * nftl->EraseSize)) 246 247 nftl->ReplUnitTable[i] = BLOCK_RESERVED; 247 248 } 248 249 ··· 275 274 int i; 276 275 277 276 for (i = 0; i < len; i += SECTORSIZE) { 278 - if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf)) 277 + if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf)) 279 278 return -1; 280 279 if (memcmpb(buf, 0xff, SECTORSIZE) != 0) 281 280 return -1; ··· 327 326 instr->mtd = nftl->mbd.mtd; 328 327 instr->addr = block * nftl->EraseSize; 329 328 instr->len = nftl->EraseSize; 330 - mtd->erase(mtd, instr); 329 + mtd_erase(mtd, instr); 331 330 332 331 if (instr->state == MTD_ERASE_FAILED) { 333 332 printk("Error while formatting block %d\n", block); ··· 356 355 fail: 357 356 /* could not format, update the bad block table (caller is responsible 358 357 for setting the ReplUnitTable to BLOCK_RESERVED on failure) */ 359 - nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr); 358 + mtd_block_markbad(nftl->mbd.mtd, instr->addr); 360 359 return -1; 361 360 } 362 361
+2 -14
drivers/mtd/onenand/generic.c
··· 115 115 .remove = __devexit_p(generic_onenand_remove), 116 116 }; 117 117 118 - MODULE_ALIAS("platform:" DRIVER_NAME); 119 - 120 - static int __init generic_onenand_init(void) 121 - { 122 - return platform_driver_register(&generic_onenand_driver); 123 - } 124 - 125 - static void __exit generic_onenand_exit(void) 126 - { 127 - platform_driver_unregister(&generic_onenand_driver); 128 - } 129 - 130 - module_init(generic_onenand_init); 131 - module_exit(generic_onenand_exit); 118 + module_platform_driver(generic_onenand_driver); 132 119 133 120 MODULE_LICENSE("GPL"); 134 121 MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); 135 122 MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards"); 123 + MODULE_ALIAS("platform:" DRIVER_NAME);
+1 -2
drivers/mtd/onenand/onenand_base.c
··· 2633 2633 */ 2634 2634 static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2635 2635 { 2636 - struct onenand_chip *this = mtd->priv; 2637 2636 int ret; 2638 2637 2639 2638 ret = onenand_block_isbad(mtd, ofs); ··· 2644 2645 } 2645 2646 2646 2647 onenand_get_device(mtd, FL_WRITING); 2647 - ret = this->block_markbad(mtd, ofs); 2648 + ret = mtd_block_markbad(mtd, ofs); 2648 2649 onenand_release_device(mtd); 2649 2650 return ret; 2650 2651 }
+1 -12
drivers/mtd/onenand/samsung.c
··· 1133 1133 .remove = __devexit_p(s3c_onenand_remove), 1134 1134 }; 1135 1135 1136 - static int __init s3c_onenand_init(void) 1137 - { 1138 - return platform_driver_register(&s3c_onenand_driver); 1139 - } 1140 - 1141 - static void __exit s3c_onenand_exit(void) 1142 - { 1143 - platform_driver_unregister(&s3c_onenand_driver); 1144 - } 1145 - 1146 - module_init(s3c_onenand_init); 1147 - module_exit(s3c_onenand_exit); 1136 + module_platform_driver(s3c_onenand_driver); 1148 1137 1149 1138 MODULE_LICENSE("GPL"); 1150 1139 MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+6 -6
drivers/mtd/redboot.c
··· 78 78 79 79 if ( directory < 0 ) { 80 80 offset = master->size + directory * master->erasesize; 81 - while (master->block_isbad && 82 - master->block_isbad(master, offset)) { 81 + while (mtd_can_have_bb(master) && 82 + mtd_block_isbad(master, offset)) { 83 83 if (!offset) { 84 84 nogood: 85 85 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); ··· 89 89 } 90 90 } else { 91 91 offset = directory * master->erasesize; 92 - while (master->block_isbad && 93 - master->block_isbad(master, offset)) { 92 + while (mtd_can_have_bb(master) && 93 + mtd_block_isbad(master, offset)) { 94 94 offset += master->erasesize; 95 95 if (offset == master->size) 96 96 goto nogood; ··· 104 104 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 105 105 master->name, offset); 106 106 107 - ret = master->read(master, offset, 108 - master->erasesize, &retlen, (void *)buf); 107 + ret = mtd_read(master, offset, master->erasesize, &retlen, 108 + (void *)buf); 109 109 110 110 if (ret) 111 111 goto out;
+22 -24
drivers/mtd/rfd_ftl.c
··· 200 200 part->sector_map[i] = -1; 201 201 202 202 for (i=0, blocks_found=0; i<part->total_blocks; i++) { 203 - rc = part->mbd.mtd->read(part->mbd.mtd, 204 - i * part->block_size, part->header_size, 205 - &retlen, (u_char*)part->header_cache); 203 + rc = mtd_read(part->mbd.mtd, i * part->block_size, 204 + part->header_size, &retlen, 205 + (u_char *)part->header_cache); 206 206 207 207 if (!rc && retlen != part->header_size) 208 208 rc = -EIO; ··· 250 250 251 251 addr = part->sector_map[sector]; 252 252 if (addr != -1) { 253 - rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE, 254 - &retlen, (u_char*)buf); 253 + rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 254 + (u_char *)buf); 255 255 if (!rc && retlen != SECTOR_SIZE) 256 256 rc = -EIO; 257 257 ··· 304 304 part->blocks[i].used_sectors = 0; 305 305 part->blocks[i].erases++; 306 306 307 - rc = part->mbd.mtd->write(part->mbd.mtd, 308 - part->blocks[i].offset, sizeof(magic), &retlen, 309 - (u_char*)&magic); 307 + rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), 308 + &retlen, (u_char *)&magic); 310 309 311 310 if (!rc && retlen != sizeof(magic)) 312 311 rc = -EIO; ··· 341 342 part->blocks[block].state = BLOCK_ERASING; 342 343 part->blocks[block].free_sectors = 0; 343 344 344 - rc = part->mbd.mtd->erase(part->mbd.mtd, erase); 345 + rc = mtd_erase(part->mbd.mtd, erase); 345 346 346 347 if (rc) { 347 348 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " ··· 371 372 if (!map) 372 373 goto err2; 373 374 374 - rc = part->mbd.mtd->read(part->mbd.mtd, 375 - part->blocks[block_no].offset, part->header_size, 376 - &retlen, (u_char*)map); 375 + rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, 376 + part->header_size, &retlen, (u_char *)map); 377 377 378 378 if (!rc && retlen != part->header_size) 379 379 rc = -EIO; ··· 411 413 } 412 414 continue; 413 415 } 414 - rc = part->mbd.mtd->read(part->mbd.mtd, addr, 415 - SECTOR_SIZE, &retlen, sector_data); 416 + rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 417 + sector_data); 416 418 417 419 if (!rc && retlen != SECTOR_SIZE) 418 420 rc = -EIO; ··· 448 450 int rc; 449 451 450 452 /* we have a race if sync doesn't exist */ 451 - if (part->mbd.mtd->sync) 452 - part->mbd.mtd->sync(part->mbd.mtd); 453 + mtd_sync(part->mbd.mtd); 453 454 454 455 score = 0x7fffffff; /* MAX_INT */ 455 456 best_block = -1; ··· 560 563 } 561 564 } 562 565 563 - rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset, 564 - part->header_size, &retlen, (u_char*)part->header_cache); 566 + rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, 567 + part->header_size, &retlen, 568 + (u_char *)part->header_cache); 565 569 566 570 if (!rc && retlen != part->header_size) 567 571 rc = -EIO; ··· 593 595 594 596 addr = part->blocks[block].offset + 595 597 (HEADER_MAP_OFFSET + offset) * sizeof(u16); 596 - rc = part->mbd.mtd->write(part->mbd.mtd, addr, 597 - sizeof(del), &retlen, (u_char*)&del); 598 + rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, 599 + (u_char *)&del); 598 600 599 601 if (!rc && retlen != sizeof(del)) 600 602 rc = -EIO; ··· 666 668 667 669 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + 668 670 block->offset; 669 - rc = part->mbd.mtd->write(part->mbd.mtd, 670 - addr, SECTOR_SIZE, &retlen, (u_char*)buf); 671 + rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 672 + (u_char *)buf); 671 673 672 674 if (!rc && retlen != SECTOR_SIZE) 673 675 rc = -EIO; ··· 686 688 part->header_cache[i + HEADER_MAP_OFFSET] = entry; 687 689 688 690 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); 689 - rc = part->mbd.mtd->write(part->mbd.mtd, addr, 690 - sizeof(entry), &retlen, (u_char*)&entry); 691 + rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, 692 + (u_char *)&entry); 691 693 692 694 if (!rc && retlen != sizeof(entry)) 693 695 rc = -EIO;
+6 -6
drivers/mtd/sm_ftl.c
··· 25 25 struct workqueue_struct *cache_flush_workqueue; 26 26 27 27 static int cache_timeout = 1000; 28 - module_param(cache_timeout, bool, S_IRUGO); 28 + module_param(cache_timeout, int, S_IRUGO); 29 29 MODULE_PARM_DESC(cache_timeout, 30 30 "Timeout (in ms) for cache flush (1000 ms default"); 31 31 ··· 278 278 279 279 /* Unfortunately, oob read will _always_ succeed, 280 280 despite card removal..... */ 281 - ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 281 + ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 282 282 283 283 /* Test for unknown errors */ 284 284 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) { ··· 343 343 ops.ooblen = SM_OOB_SIZE; 344 344 ops.oobbuf = (void *)oob; 345 345 346 - ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 346 + ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 347 347 348 348 /* Now we assume that hardware will catch write bitflip errors */ 349 349 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */ ··· 479 479 return -EIO; 480 480 } 481 481 482 - if (mtd->erase(mtd, &erase)) { 482 + if (mtd_erase(mtd, &erase)) { 483 483 sm_printk("erase of block %d in zone %d failed", 484 484 block, zone_num); 485 485 goto error; ··· 645 645 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) 646 646 return -ENODEV; 647 647 648 - /* We use these functions for IO */ 649 - if (!mtd->read_oob || !mtd->write_oob) 648 + /* We use OOB */ 649 + if (!mtd_has_oob(mtd)) 650 650 return -ENODEV; 651 651 652 652 /* Find geometry information */
+6 -6
drivers/mtd/ssfdc.c
··· 122 122 * is not SSFDC formatted 123 123 */ 124 124 for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) { 125 - if (!mtd->block_isbad(mtd, offset)) { 126 - ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, 127 - sect_buf); 125 + if (mtd_block_isbad(mtd, offset)) { 126 + ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, 127 + sect_buf); 128 128 129 129 /* CIS pattern match on the sector buffer */ 130 130 if (ret < 0 || retlen != SECTOR_SIZE) { ··· 156 156 size_t retlen; 157 157 loff_t offset = (loff_t)sect_no << SECTOR_SHIFT; 158 158 159 - ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf); 159 + ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf); 160 160 if (ret < 0 || retlen != SECTOR_SIZE) 161 161 return -1; 162 162 ··· 175 175 ops.oobbuf = buf; 176 176 ops.datbuf = NULL; 177 177 178 - ret = mtd->read_oob(mtd, offs, &ops); 178 + ret = mtd_read_oob(mtd, offs, &ops); 179 179 if (ret < 0 || ops.oobretlen != OOB_SIZE) 180 180 return -1; 181 181 ··· 255 255 for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len; 256 256 phys_block++) { 257 257 offset = (unsigned long)phys_block * ssfdc->erase_size; 258 - if (mtd->block_isbad(mtd, offset)) 258 + if (mtd_block_isbad(mtd, offset)) 259 259 continue; /* skip bad blocks */ 260 260 261 261 ret = read_raw_oob(mtd, offset, oob_buf);
+14 -14
drivers/mtd/tests/mtd_oobtest.c
··· 78 78 ei.addr = addr; 79 79 ei.len = mtd->erasesize; 80 80 81 - err = mtd->erase(mtd, &ei); 81 + err = mtd_erase(mtd, &ei); 82 82 if (err) { 83 83 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 84 84 return err; ··· 139 139 ops.ooboffs = use_offset; 140 140 ops.datbuf = NULL; 141 141 ops.oobbuf = writebuf; 142 - err = mtd->write_oob(mtd, addr, &ops); 142 + err = mtd_write_oob(mtd, addr, &ops); 143 143 if (err || ops.oobretlen != use_len) { 144 144 printk(PRINT_PREF "error: writeoob failed at %#llx\n", 145 145 (long long)addr); ··· 192 192 ops.ooboffs = use_offset; 193 193 ops.datbuf = NULL; 194 194 ops.oobbuf = readbuf; 195 - err = mtd->read_oob(mtd, addr, &ops); 195 + err = mtd_read_oob(mtd, addr, &ops); 196 196 if (err || ops.oobretlen != use_len) { 197 197 printk(PRINT_PREF "error: readoob failed at %#llx\n", 198 198 (long long)addr); ··· 219 219 ops.ooboffs = 0; 220 220 ops.datbuf = NULL; 221 221 ops.oobbuf = readbuf; 222 - err = mtd->read_oob(mtd, addr, &ops); 222 + err = mtd_read_oob(mtd, addr, &ops); 223 223 if (err || ops.oobretlen != mtd->ecclayout->oobavail) { 224 224 printk(PRINT_PREF "error: readoob failed at " 225 225 "%#llx\n", (long long)addr); ··· 284 284 ops.ooboffs = 0; 285 285 ops.datbuf = NULL; 286 286 ops.oobbuf = readbuf; 287 - err = mtd->read_oob(mtd, addr, &ops); 287 + err = mtd_read_oob(mtd, addr, &ops); 288 288 if (err || ops.oobretlen != len) { 289 289 printk(PRINT_PREF "error: readoob failed at %#llx\n", 290 290 (long long)addr); ··· 329 329 int ret; 330 330 loff_t addr = ebnum * mtd->erasesize; 331 331 332 - ret = mtd->block_isbad(mtd, addr); 332 + ret = mtd_block_isbad(mtd, addr); 333 333 if (ret) 334 334 printk(PRINT_PREF "block %d is bad\n", ebnum); 335 335 return ret; ··· 524 524 ops.oobbuf = writebuf; 525 525 printk(PRINT_PREF "attempting to start write past end of OOB\n"); 526 526 printk(PRINT_PREF "an error is expected...\n"); 527 - err = mtd->write_oob(mtd, addr0, &ops); 527 + err = mtd_write_oob(mtd, addr0, &ops); 528 528 if (err) { 529 529 printk(PRINT_PREF "error occurred as expected\n"); 530 530 err = 0; ··· 544 544 ops.oobbuf = readbuf; 545 545 printk(PRINT_PREF "attempting to start read past end of OOB\n"); 546 546 printk(PRINT_PREF "an error is expected...\n"); 547 - err = mtd->read_oob(mtd, addr0, &ops); 547 + err = mtd_read_oob(mtd, addr0, &ops); 548 548 if (err) { 549 549 printk(PRINT_PREF "error occurred as expected\n"); 550 550 err = 0; ··· 568 568 ops.oobbuf = writebuf; 569 569 printk(PRINT_PREF "attempting to write past end of device\n"); 570 570 printk(PRINT_PREF "an error is expected...\n"); 571 - err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops); 571 + err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 572 572 if (err) { 573 573 printk(PRINT_PREF "error occurred as expected\n"); 574 574 err = 0; ··· 588 588 ops.oobbuf = readbuf; 589 589 printk(PRINT_PREF "attempting to read past end of device\n"); 590 590 printk(PRINT_PREF "an error is expected...\n"); 591 - err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops); 591 + err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 592 592 if (err) { 593 593 printk(PRINT_PREF "error occurred as expected\n"); 594 594 err = 0; ··· 612 612 ops.oobbuf = writebuf; 613 613 printk(PRINT_PREF "attempting to write past end of device\n"); 614 614 printk(PRINT_PREF "an error is expected...\n"); 615 - err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops); 615 + err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 616 616 if (err) { 617 617 printk(PRINT_PREF "error occurred as expected\n"); 618 618 err = 0; ··· 632 632 ops.oobbuf = readbuf; 633 633 printk(PRINT_PREF "attempting to read past end of device\n"); 634 634 printk(PRINT_PREF "an error is expected...\n"); 635 - err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops); 635 + err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 636 636 if (err) { 637 637 printk(PRINT_PREF "error occurred as expected\n"); 638 638 err = 0; ··· 670 670 ops.ooboffs = 0; 671 671 ops.datbuf = NULL; 672 672 ops.oobbuf = writebuf; 673 - err = mtd->write_oob(mtd, addr, &ops); 673 + err = mtd_write_oob(mtd, addr, &ops); 674 674 if (err) 675 675 goto out; 676 676 if (i % 256 == 0) ··· 698 698 ops.ooboffs = 0; 699 699 ops.datbuf = NULL; 700 700 ops.oobbuf = readbuf; 701 - err = mtd->read_oob(mtd, addr, &ops); 701 + err = mtd_read_oob(mtd, addr, &ops); 702 702 if (err) 703 703 goto out; 704 704 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
+25 -32
drivers/mtd/tests/mtd_pagetest.c
··· 77 77 ei.addr = addr; 78 78 ei.len = mtd->erasesize; 79 79 80 - err = mtd->erase(mtd, &ei); 80 + err = mtd_erase(mtd, &ei); 81 81 if (err) { 82 82 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 83 83 return err; ··· 95 95 static int write_eraseblock(int ebnum) 96 96 { 97 97 int err = 0; 98 - size_t written = 0; 98 + size_t written; 99 99 loff_t addr = ebnum * mtd->erasesize; 100 100 101 101 set_random_data(writebuf, mtd->erasesize); 102 102 cond_resched(); 103 - err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf); 103 + err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf); 104 104 if (err || written != mtd->erasesize) 105 105 printk(PRINT_PREF "error: write failed at %#llx\n", 106 106 (long long)addr); ··· 111 111 static int verify_eraseblock(int ebnum) 112 112 { 113 113 uint32_t j; 114 - size_t read = 0; 114 + size_t read; 115 115 int err = 0, i; 116 116 loff_t addr0, addrn; 117 117 loff_t addr = ebnum * mtd->erasesize; ··· 127 127 set_random_data(writebuf, mtd->erasesize); 128 128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { 129 129 /* Do a read to set the internal dataRAMs to different data */ 130 - err = mtd->read(mtd, addr0, bufsize, &read, twopages); 130 + err = mtd_read(mtd, addr0, bufsize, &read, twopages); 131 131 if (mtd_is_bitflip(err)) 132 132 err = 0; 133 133 if (err || read != bufsize) { ··· 135 135 (long long)addr0); 136 136 return err; 137 137 } 138 - err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); 138 + err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages); 139 139 if (mtd_is_bitflip(err)) 140 140 err = 0; 141 141 if (err || read != bufsize) { ··· 144 144 return err; 145 145 } 146 146 memset(twopages, 0, bufsize); 147 - read = 0; 148 - err = mtd->read(mtd, addr, bufsize, &read, twopages); 147 + err = mtd_read(mtd, addr, bufsize, &read, twopages); 149 148 if (mtd_is_bitflip(err)) 150 149 err = 0; 151 150 if (err || read != bufsize) { ··· 162 163 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { 163 164 unsigned long oldnext = next; 164 165 /* Do a read to set the internal dataRAMs to different data */ 165 - err = mtd->read(mtd, addr0, bufsize, &read, twopages); 166 + err = mtd_read(mtd, addr0, bufsize, &read, twopages); 166 167 if (mtd_is_bitflip(err)) 167 168 err = 0; 168 169 if (err || read != bufsize) { ··· 170 171 (long long)addr0); 171 172 return err; 172 173 } 173 - err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); 174 + err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages); 174 175 if (mtd_is_bitflip(err)) 175 176 err = 0; 176 177 if (err || read != bufsize) { ··· 179 180 return err; 180 181 } 181 182 memset(twopages, 0, bufsize); 182 - read = 0; 183 - err = mtd->read(mtd, addr, bufsize, &read, twopages); 183 + err = mtd_read(mtd, addr, bufsize, &read, twopages); 184 184 if (mtd_is_bitflip(err)) 185 185 err = 0; 186 186 if (err || read != bufsize) { ··· 201 203 202 204 static int crosstest(void) 203 205 { 204 - size_t read = 0; 206 + size_t read; 205 207 int err = 0, i; 206 208 loff_t addr, addr0, addrn; 207 209 unsigned char *pp1, *pp2, *pp3, *pp4; ··· 226 228 addrn -= mtd->erasesize; 227 229 228 230 /* Read 2nd-to-last page to pp1 */ 229 - read = 0; 230 231 addr = addrn - pgsize - pgsize; 231 - err = mtd->read(mtd, addr, pgsize, &read, pp1); 232 + err = mtd_read(mtd, addr, pgsize, &read, pp1); 232 233 if (mtd_is_bitflip(err)) 233 234 err = 0; 234 235 if (err || read != pgsize) { ··· 238 241 } 239 242 240 243 /* Read 3rd-to-last page to pp1 */ 241 - read = 0; 242 244 addr = addrn - pgsize - pgsize - pgsize; 243 - err = mtd->read(mtd, addr, pgsize, &read, pp1); 245 + err = mtd_read(mtd, addr, pgsize, &read, pp1); 244 246 if (mtd_is_bitflip(err)) 245 247 err = 0; 246 248 if (err || read != pgsize) { ··· 250 254 } 251 255 252 256 /* Read first page to pp2 */ 253 - read = 0; 254 257 addr = addr0; 255 258 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 256 - err = mtd->read(mtd, addr, pgsize, &read, pp2); 259 + err = mtd_read(mtd, addr, pgsize, &read, pp2); 257 260 if (mtd_is_bitflip(err)) 258 261 err = 0; 259 262 if (err || read != pgsize) { ··· 263 268 } 264 269 265 270 /* Read last page to pp3 */ 266 - read = 0; 267 271 addr = addrn - pgsize; 268 272 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 269 - err = mtd->read(mtd, addr, pgsize, &read, pp3); 273 + err = mtd_read(mtd, addr, pgsize, &read, pp3); 270 274 if (mtd_is_bitflip(err)) 271 275 err = 0; 272 276 if (err || read != pgsize) { ··· 276 282 } 277 283 278 284 /* Read first page again to pp4 */ 279 - read = 0; 280 285 addr = addr0; 281 286 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 282 - err = mtd->read(mtd, addr, pgsize, &read, pp4); 287 + err = mtd_read(mtd, addr, pgsize, &read, pp4); 283 288 if (mtd_is_bitflip(err)) 284 289 err = 0; 285 290 if (err || read != pgsize) { ··· 302 309 303 310 static int erasecrosstest(void) 304 311 { 305 - size_t read = 0, written = 0; 312 + size_t read, written; 306 313 int err = 0, i, ebnum, ebnum2; 307 314 loff_t addr0; 308 315 char *readbuf = twopages; ··· 328 335 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 329 336 set_random_data(writebuf, pgsize); 330 337 strcpy(writebuf, "There is no data like this!"); 331 - err = mtd->write(mtd, addr0, pgsize, &written, writebuf); 338 + err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 332 339 if (err || written != pgsize) { 333 340 printk(PRINT_PREF "error: write failed at %#llx\n", 334 341 (long long)addr0); ··· 337 344 338 345 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 339 346 memset(readbuf, 0, pgsize); 340 - err = mtd->read(mtd, addr0, pgsize, &read, readbuf); 347 + err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 341 348 if (mtd_is_bitflip(err)) 342 349 err = 0; 343 350 if (err || read != pgsize) { ··· 361 368 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 362 369 set_random_data(writebuf, pgsize); 363 370 strcpy(writebuf, "There is no data like this!"); 364 - err = mtd->write(mtd, addr0, pgsize, &written, writebuf); 371 + err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 365 372 if (err || written != pgsize) { 366 373 printk(PRINT_PREF "error: write failed at %#llx\n", 367 374 (long long)addr0); ··· 375 382 376 383 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 377 384 memset(readbuf, 0, pgsize); 378 - err = mtd->read(mtd, addr0, pgsize, &read, readbuf); 385 + err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 379 386 if (mtd_is_bitflip(err)) 380 387 err = 0; 381 388 if (err || read != pgsize) { ··· 398 405 399 406 static int erasetest(void) 400 407 { 401 - size_t read = 0, written = 0; 408 + size_t read, written; 402 409 int err = 0, i, ebnum, ok = 1; 403 410 loff_t addr0; 404 411 ··· 418 425 419 426 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 420 427 set_random_data(writebuf, pgsize); 421 - err = mtd->write(mtd, addr0, pgsize, &written, writebuf); 428 + err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 422 429 if (err || written != pgsize) { 423 430 printk(PRINT_PREF "error: write failed at %#llx\n", 424 431 (long long)addr0); ··· 431 438 return err; 432 439 433 440 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 434 - err = mtd->read(mtd, addr0, pgsize, &read, twopages); 441 + err = mtd_read(mtd, addr0, pgsize, &read, twopages); 435 442 if (mtd_is_bitflip(err)) 436 443 err = 0; 437 444 if (err || read != pgsize) { ··· 462 469 loff_t addr = ebnum * mtd->erasesize; 463 470 int ret; 464 471 465 - ret = mtd->block_isbad(mtd, addr); 472 + ret = mtd_block_isbad(mtd, addr); 466 473 if (ret) 467 474 printk(PRINT_PREF "block %d is bad\n", ebnum); 468 475 return ret;
+5 -6
drivers/mtd/tests/mtd_readtest.c
··· 44 44 45 45 static int read_eraseblock_by_page(int ebnum) 46 46 { 47 - size_t read = 0; 47 + size_t read; 48 48 int i, ret, err = 0; 49 49 loff_t addr = ebnum * mtd->erasesize; 50 50 void *buf = iobuf; ··· 52 52 53 53 for (i = 0; i < pgcnt; i++) { 54 54 memset(buf, 0 , pgcnt); 55 - ret = mtd->read(mtd, addr, pgsize, &read, buf); 55 + ret = mtd_read(mtd, addr, pgsize, &read, buf); 56 56 if (ret == -EUCLEAN) 57 57 ret = 0; 58 58 if (ret || read != pgsize) { ··· 74 74 ops.ooboffs = 0; 75 75 ops.datbuf = NULL; 76 76 ops.oobbuf = oobbuf; 77 - ret = mtd->read_oob(mtd, addr, &ops); 77 + ret = mtd_read_oob(mtd, addr, &ops); 78 78 if ((ret && !mtd_is_bitflip(ret)) || 79 79 ops.oobretlen != mtd->oobsize) { 80 80 printk(PRINT_PREF "error: read oob failed at " ··· 132 132 loff_t addr = ebnum * mtd->erasesize; 133 133 int ret; 134 134 135 - ret = mtd->block_isbad(mtd, addr); 135 + ret = mtd_block_isbad(mtd, addr); 136 136 if (ret) 137 137 printk(PRINT_PREF "block %d is bad\n", ebnum); 138 138 return ret; ··· 148 148 return -ENOMEM; 149 149 } 150 150 151 - /* NOR flash does not implement block_isbad */ 152 - if (mtd->block_isbad == NULL) 151 + if (!mtd_can_have_bb(mtd)) 153 152 return 0; 154 153 155 154 printk(PRINT_PREF "scanning for bad eraseblocks\n");
+18 -19
drivers/mtd/tests/mtd_speedtest.c
··· 79 79 ei.addr = addr; 80 80 ei.len = mtd->erasesize; 81 81 82 - err = mtd->erase(mtd, &ei); 82 + err = mtd_erase(mtd, &ei); 83 83 if (err) { 84 84 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 85 85 return err; ··· 105 105 ei.addr = addr; 106 106 ei.len = mtd->erasesize * blocks; 107 107 108 - err = mtd->erase(mtd, &ei); 108 + err = mtd_erase(mtd, &ei); 109 109 if (err) { 110 110 printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n", 111 111 err, ebnum, blocks); ··· 139 139 140 140 static int write_eraseblock(int ebnum) 141 141 { 142 - size_t written = 0; 142 + size_t written; 143 143 int err = 0; 144 144 loff_t addr = ebnum * mtd->erasesize; 145 145 146 - err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf); 146 + err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf); 147 147 if (err || written != mtd->erasesize) { 148 148 printk(PRINT_PREF "error: write failed at %#llx\n", addr); 149 149 if (!err) ··· 155 155 156 156 static int write_eraseblock_by_page(int ebnum) 157 157 { 158 - size_t written = 0; 158 + size_t written; 159 159 int i, err = 0; 160 160 loff_t addr = ebnum * mtd->erasesize; 161 161 void *buf = iobuf; 162 162 163 163 for (i = 0; i < pgcnt; i++) { 164 - err = mtd->write(mtd, addr, pgsize, &written, buf); 164 + err = mtd_write(mtd, addr, pgsize, &written, buf); 165 165 if (err || written != pgsize) { 166 166 printk(PRINT_PREF "error: write failed at %#llx\n", 167 167 addr); ··· 178 178 179 179 static int write_eraseblock_by_2pages(int ebnum) 180 180 { 181 - size_t written = 0, sz = pgsize * 2; 181 + size_t written, sz = pgsize * 2; 182 182 int i, n = pgcnt / 2, err = 0; 183 183 loff_t addr = ebnum * mtd->erasesize; 184 184 void *buf = iobuf; 185 185 186 186 for (i = 0; i < n; i++) { 187 - err = mtd->write(mtd, addr, sz, &written, buf); 187 + err = mtd_write(mtd, addr, sz, &written, buf); 188 188 if (err || written != sz) { 189 189 printk(PRINT_PREF "error: write failed at %#llx\n", 190 190 addr); ··· 196 196 buf += sz; 197 197 } 198 198 if (pgcnt % 2) { 199 - err = mtd->write(mtd, addr, pgsize, &written, buf); 199 + err = mtd_write(mtd, addr, pgsize, &written, buf); 200 200 if (err || written != pgsize) { 201 201 printk(PRINT_PREF "error: write failed at %#llx\n", 202 202 addr); ··· 210 210 211 211 static int read_eraseblock(int ebnum) 212 212 { 213 - size_t read = 0; 213 + size_t read; 214 214 int err = 0; 215 215 loff_t addr = ebnum * mtd->erasesize; 216 216 217 - err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf); 217 + err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf); 218 218 /* Ignore corrected ECC errors */ 219 219 if (mtd_is_bitflip(err)) 220 220 err = 0; ··· 229 229 230 230 static int read_eraseblock_by_page(int ebnum) 231 231 { 232 - size_t read = 0; 232 + size_t read; 233 233 int i, err = 0; 234 234 loff_t addr = ebnum * mtd->erasesize; 235 235 void *buf = iobuf; 236 236 237 237 for (i = 0; i < pgcnt; i++) { 238 - err = mtd->read(mtd, addr, pgsize, &read, buf); 238 + err = mtd_read(mtd, addr, pgsize, &read, buf); 239 239 /* Ignore corrected ECC errors */ 240 240 if (mtd_is_bitflip(err)) 241 241 err = 0; ··· 255 255 256 256 static int read_eraseblock_by_2pages(int ebnum) 257 257 { 258 - size_t read = 0, sz = pgsize * 2; 258 + size_t read, sz = pgsize * 2; 259 259 int i, n = pgcnt / 2, err = 0; 260 260 loff_t addr = ebnum * mtd->erasesize; 261 261 void *buf = iobuf; 262 262 263 263 for (i = 0; i < n; i++) { 264 - err = mtd->read(mtd, addr, sz, &read, buf); 264 + err = mtd_read(mtd, addr, sz, &read, buf); 265 265 /* Ignore corrected ECC errors */ 266 266 if (mtd_is_bitflip(err)) 267 267 err = 0; ··· 276 276 buf += sz; 277 277 } 278 278 if (pgcnt % 2) { 279 - err = mtd->read(mtd, addr, pgsize, &read, buf); 279 + err = mtd_read(mtd, addr, pgsize, &read, buf); 280 280 /* Ignore corrected ECC errors */ 281 281 if (mtd_is_bitflip(err)) 282 282 err = 0; ··· 296 296 loff_t addr = ebnum * mtd->erasesize; 297 297 int ret; 298 298 299 - ret = mtd->block_isbad(mtd, addr); 299 + ret = mtd_block_isbad(mtd, addr); 300 300 if (ret) 301 301 printk(PRINT_PREF "block %d is bad\n", ebnum); 302 302 return ret; ··· 336 336 return -ENOMEM; 337 337 } 338 338 339 - /* NOR flash does not implement block_isbad */ 340 - if (mtd->block_isbad == NULL) 339 + if (!mtd_can_have_bb(mtd)) 341 340 goto out; 342 341 343 342 printk(PRINT_PREF "scanning for bad eraseblocks\n");
+14 -8
drivers/mtd/tests/mtd_stresstest.c
··· 112 112 ei.addr = addr; 113 113 ei.len = mtd->erasesize; 114 114 115 - err = mtd->erase(mtd, &ei); 115 + err = mtd_erase(mtd, &ei); 116 116 if (unlikely(err)) { 117 117 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 118 118 return err; ··· 132 132 loff_t addr = ebnum * mtd->erasesize; 133 133 int ret; 134 134 135 - ret = mtd->block_isbad(mtd, addr); 135 + ret = mtd_block_isbad(mtd, addr); 136 136 if (ret) 137 137 printk(PRINT_PREF "block %d is bad\n", ebnum); 138 138 return ret; ··· 140 140 141 141 static int do_read(void) 142 142 { 143 - size_t read = 0; 143 + size_t read; 144 144 int eb = rand_eb(); 145 145 int offs = rand_offs(); 146 146 int len = rand_len(offs), err; ··· 153 153 len = mtd->erasesize - offs; 154 154 } 155 155 addr = eb * mtd->erasesize + offs; 156 - err = mtd->read(mtd, addr, len, &read, readbuf); 156 + err = mtd_read(mtd, addr, len, &read, readbuf); 157 157 if (mtd_is_bitflip(err)) 158 158 err = 0; 159 159 if (unlikely(err || read != len)) { ··· 169 169 static int do_write(void) 170 170 { 171 171 int eb = rand_eb(), offs, err, len; 172 - size_t written = 0; 172 + size_t written; 173 173 loff_t addr; 174 174 175 175 offs = offsets[eb]; ··· 192 192 } 193 193 } 194 194 addr = eb * mtd->erasesize + offs; 195 - err = mtd->write(mtd, addr, len, &written, writebuf); 195 + err = mtd_write(mtd, addr, len, &written, writebuf); 196 196 if (unlikely(err || written != len)) { 197 197 printk(PRINT_PREF "error: write failed at 0x%llx\n", 198 198 (long long)addr); ··· 227 227 return -ENOMEM; 228 228 } 229 229 230 - /* NOR flash does not implement block_isbad */ 231 - if (mtd->block_isbad == NULL) 230 + if (!mtd_can_have_bb(mtd)) 232 231 return 0; 233 232 234 233 printk(PRINT_PREF "scanning for bad eraseblocks\n"); ··· 283 284 (unsigned long long)mtd->size, mtd->erasesize, 284 285 pgsize, ebcnt, pgcnt, mtd->oobsize); 285 286 287 + if (ebcnt < 2) { 288 + printk(PRINT_PREF "error: need at least 2 eraseblocks\n"); 289 + err = -ENOSPC; 290 + goto out_put_mtd; 291 + } 292 + 286 293 /* Read or write up 2 eraseblocks at a time */ 287 294 bufsize = mtd->erasesize * 2; 288 295 ··· 327 322 kfree(bbt); 328 323 vfree(writebuf); 329 324 vfree(readbuf); 325 + out_put_mtd: 330 326 put_mtd_device(mtd); 331 327 if (err) 332 328 printk(PRINT_PREF "error %d occurred\n", err);
+14 -18
drivers/mtd/tests/mtd_subpagetest.c
··· 80 80 ei.addr = addr; 81 81 ei.len = mtd->erasesize; 82 82 83 - err = mtd->erase(mtd, &ei); 83 + err = mtd_erase(mtd, &ei); 84 84 if (err) { 85 85 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 86 86 return err; ··· 115 115 116 116 static int write_eraseblock(int ebnum) 117 117 { 118 - size_t written = 0; 118 + size_t written; 119 119 int err = 0; 120 120 loff_t addr = ebnum * mtd->erasesize; 121 121 122 122 set_random_data(writebuf, subpgsize); 123 - err = mtd->write(mtd, addr, subpgsize, &written, writebuf); 123 + err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 124 124 if (unlikely(err || written != subpgsize)) { 125 125 printk(PRINT_PREF "error: write failed at %#llx\n", 126 126 (long long)addr); ··· 134 134 addr += subpgsize; 135 135 136 136 set_random_data(writebuf, subpgsize); 137 - err = mtd->write(mtd, addr, subpgsize, &written, writebuf); 137 + err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 138 138 if (unlikely(err || written != subpgsize)) { 139 139 printk(PRINT_PREF "error: write failed at %#llx\n", 140 140 (long long)addr); ··· 150 150 151 151 static int write_eraseblock2(int ebnum) 152 152 { 153 - size_t written = 0; 153 + size_t written; 154 154 int err = 0, k; 155 155 loff_t addr = ebnum * mtd->erasesize; 156 156 ··· 158 158 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize) 159 159 break; 160 160 set_random_data(writebuf, subpgsize * k); 161 - err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf); 161 + err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf); 162 162 if (unlikely(err || written != subpgsize * k)) { 163 163 printk(PRINT_PREF "error: write failed at %#llx\n", 164 164 (long long)addr); ··· 189 189 190 190 static int verify_eraseblock(int ebnum) 191 191 { 192 - size_t read = 0; 192 + size_t read; 193 193 int err = 0; 194 194 loff_t addr = ebnum * mtd->erasesize; 195 195 196 196 set_random_data(writebuf, subpgsize); 197 197 clear_data(readbuf, subpgsize); 198 - read = 0; 199 - err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 198 + err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 200 199 if (unlikely(err || read != subpgsize)) { 201 200 if (mtd_is_bitflip(err) && read == subpgsize) { 202 201 printk(PRINT_PREF "ECC correction at %#llx\n", ··· 222 223 223 224 set_random_data(writebuf, subpgsize); 224 225 clear_data(readbuf, subpgsize); 225 - read = 0; 226 - err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 226 + err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 227 227 if (unlikely(err || read != subpgsize)) { 228 228 if (mtd_is_bitflip(err) && read == subpgsize) { 229 229 printk(PRINT_PREF "ECC correction at %#llx\n", ··· 250 252 251 253 static int verify_eraseblock2(int ebnum) 252 254 { 253 - size_t read = 0; 255 + size_t read; 254 256 int err = 0, k; 255 257 loff_t addr = ebnum * mtd->erasesize; 256 258 ··· 259 261 break; 260 262 set_random_data(writebuf, subpgsize * k); 261 263 clear_data(readbuf, subpgsize * k); 262 - read = 0; 263 - err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf); 264 + err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf); 264 265 if (unlikely(err || read != subpgsize * k)) { 265 266 if (mtd_is_bitflip(err) && read == subpgsize * k) { 266 267 printk(PRINT_PREF "ECC correction at %#llx\n", ··· 285 288 static int verify_eraseblock_ff(int ebnum) 286 289 { 287 290 uint32_t j; 288 - size_t read = 0; 291 + size_t read; 289 292 int err = 0; 290 293 loff_t addr = ebnum * mtd->erasesize; 291 294 292 295 memset(writebuf, 0xff, subpgsize); 293 296 for (j = 0; j < mtd->erasesize / subpgsize; ++j) { 294 297 clear_data(readbuf, subpgsize); 295 - read = 0; 296 - err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 298 + err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 297 299 if (unlikely(err || read != subpgsize)) { 298 300 if (mtd_is_bitflip(err) && read == subpgsize) { 299 301 printk(PRINT_PREF "ECC correction at %#llx\n", ··· 340 344 loff_t addr = ebnum * mtd->erasesize; 341 345 int ret; 342 346 343 - ret = mtd->block_isbad(mtd, addr); 347 + ret = mtd_block_isbad(mtd, addr); 344 348 if (ret) 345 349 printk(PRINT_PREF "block %d is bad\n", ebnum); 346 350 return ret;
+7 -8
drivers/mtd/tests/mtd_torturetest.c
··· 105 105 ei.addr = addr; 106 106 ei.len = mtd->erasesize; 107 107 108 - err = mtd->erase(mtd, &ei); 108 + err = mtd_erase(mtd, &ei); 109 109 if (err) { 110 110 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 111 111 return err; ··· 127 127 static inline int check_eraseblock(int ebnum, unsigned char *buf) 128 128 { 129 129 int err, retries = 0; 130 - size_t read = 0; 130 + size_t read; 131 131 loff_t addr = ebnum * mtd->erasesize; 132 132 size_t len = mtd->erasesize; 133 133 ··· 137 137 } 138 138 139 139 retry: 140 - err = mtd->read(mtd, addr, len, &read, check_buf); 140 + err = mtd_read(mtd, addr, len, &read, check_buf); 141 141 if (mtd_is_bitflip(err)) 142 142 printk(PRINT_PREF "single bit flip occurred at EB %d " 143 143 "MTD reported that it was fixed.\n", ebnum); ··· 181 181 static inline int write_pattern(int ebnum, void *buf) 182 182 { 183 183 int err; 184 - size_t written = 0; 184 + size_t written; 185 185 loff_t addr = ebnum * mtd->erasesize; 186 186 size_t len = mtd->erasesize; 187 187 ··· 189 189 addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize; 190 190 len = pgcnt * pgsize; 191 191 } 192 - err = mtd->write(mtd, addr, len, &written, buf); 192 + err = mtd_write(mtd, addr, len, &written, buf); 193 193 if (err) { 194 194 printk(PRINT_PREF "error %d while writing EB %d, written %zd" 195 195 " bytes\n", err, ebnum, written); ··· 290 290 * Check if there is a bad eraseblock among those we are going to test. 291 291 */ 292 292 memset(&bad_ebs[0], 0, sizeof(int) * ebcnt); 293 - if (mtd->block_isbad) { 293 + if (mtd_can_have_bb(mtd)) { 294 294 for (i = eb; i < eb + ebcnt; i++) { 295 - err = mtd->block_isbad(mtd, 296 - (loff_t)i * mtd->erasesize); 295 + err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); 297 296 298 297 if (err < 0) { 299 298 printk(PRINT_PREF "block_isbad() returned %d "
+1 -1
drivers/mtd/ubi/build.c
··· 664 664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 665 665 ubi->flash_size = ubi->mtd->size; 666 666 667 - if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 667 + if (mtd_can_have_bb(ubi->mtd)) 668 668 ubi->bad_allowed = 1; 669 669 670 670 if (ubi->mtd->type == MTD_NORFLASH) {
+1 -1
drivers/mtd/ubi/debug.c
··· 216 216 buf = vmalloc(len); 217 217 if (!buf) 218 218 return; 219 - err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 219 + err = mtd_read(ubi->mtd, addr, len, &read, buf); 220 220 if (err && err != -EUCLEAN) { 221 221 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 222 222 "read %zd bytes", err, len, pnum, offset, read);
+9 -10
drivers/mtd/ubi/io.c
··· 170 170 171 171 addr = (loff_t)pnum * ubi->peb_size + offset; 172 172 retry: 173 - err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 173 + err = mtd_read(ubi->mtd, addr, len, &read, buf); 174 174 if (err) { 175 175 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; 176 176 ··· 289 289 } 290 290 291 291 addr = (loff_t)pnum * ubi->peb_size + offset; 292 - err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf); 292 + err = mtd_write(ubi->mtd, addr, len, &written, buf); 293 293 if (err) { 294 294 ubi_err("error %d while writing %d bytes to PEB %d:%d, written " 295 295 "%zd bytes", err, len, pnum, offset, written); ··· 361 361 ei.callback = erase_callback; 362 362 ei.priv = (unsigned long)&wq; 363 363 364 - err = ubi->mtd->erase(ubi->mtd, &ei); 364 + err = mtd_erase(ubi->mtd, &ei); 365 365 if (err) { 366 366 if (retries++ < UBI_IO_RETRIES) { 367 367 dbg_io("error %d while erasing PEB %d, retry", ··· 525 525 * the header comment in scan.c for more information). 526 526 */ 527 527 addr = (loff_t)pnum * ubi->peb_size; 528 - err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); 528 + err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); 529 529 if (!err) { 530 530 addr += ubi->vid_hdr_aloffset; 531 - err = ubi->mtd->write(ubi->mtd, addr, 4, &written, 532 - (void *)&data); 531 + err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); 533 532 if (!err) 534 533 return 0; 535 534 } ··· 634 635 if (ubi->bad_allowed) { 635 636 int ret; 636 637 637 - ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size); 638 + ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); 638 639 if (ret < 0) 639 640 ubi_err("error %d while checking if PEB %d is bad", 640 641 ret, pnum); ··· 669 670 if (!ubi->bad_allowed) 670 671 return 0; 671 672 672 - err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size); 673 + err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); 673 674 if (err) 674 675 ubi_err("cannot mark PEB %d bad, error %d", pnum, err); 675 676 return err; ··· 1356 1357 return 0; 1357 1358 } 1358 1359 1359 - err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1); 1360 + err = mtd_read(ubi->mtd, addr, len, &read, buf1); 1360 1361 if (err && !mtd_is_bitflip(err)) 1361 1362 goto out_free; 1362 1363 ··· 1420 1421 return 0; 1421 1422 } 1422 1423 1423 - err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 1424 + err = mtd_read(ubi->mtd, addr, len, &read, buf); 1424 1425 if (err && !mtd_is_bitflip(err)) { 1425 1426 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 1426 1427 "read %zd bytes", err, len, pnum, offset, read);
+1 -3
drivers/mtd/ubi/kapi.c
··· 714 714 if (!ubi) 715 715 return -ENODEV; 716 716 717 - if (ubi->mtd->sync) 718 - ubi->mtd->sync(ubi->mtd); 719 - 717 + mtd_sync(ubi->mtd); 720 718 ubi_put_device(ubi); 721 719 return 0; 722 720 }
+8 -9
fs/jffs2/erase.c
··· 74 74 ((struct erase_priv_struct *)instr->priv)->jeb = jeb; 75 75 ((struct erase_priv_struct *)instr->priv)->c = c; 76 76 77 - ret = c->mtd->erase(c->mtd, instr); 77 + ret = mtd_erase(c->mtd, instr); 78 78 if (!ret) 79 79 return; 80 80 ··· 336 336 uint32_t ofs; 337 337 size_t retlen; 338 338 int ret = -EIO; 339 + unsigned long *wordebuf; 339 340 340 - if (c->mtd->point) { 341 - unsigned long *wordebuf; 342 - 343 - ret = c->mtd->point(c->mtd, jeb->offset, c->sector_size, 344 - &retlen, &ebuf, NULL); 341 + ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, 342 + &ebuf, NULL); 343 + if (ret != -EOPNOTSUPP) { 345 344 if (ret) { 346 345 D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); 347 346 goto do_flash_read; ··· 348 349 if (retlen < c->sector_size) { 349 350 /* Don't muck about if it won't let us point to the whole erase sector */ 350 351 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); 351 - c->mtd->unpoint(c->mtd, jeb->offset, retlen); 352 + mtd_unpoint(c->mtd, jeb->offset, retlen); 352 353 goto do_flash_read; 353 354 } 354 355 wordebuf = ebuf-sizeof(*wordebuf); ··· 357 358 if (*++wordebuf != ~0) 358 359 break; 359 360 } while(--retlen); 360 - c->mtd->unpoint(c->mtd, jeb->offset, c->sector_size); 361 + mtd_unpoint(c->mtd, jeb->offset, c->sector_size); 361 362 if (retlen) { 362 363 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n", 363 364 *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf)); ··· 380 381 381 382 *bad_offset = ofs; 382 383 383 - ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); 384 + ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); 384 385 if (ret) { 385 386 printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); 386 387 ret = -EIO;
-1
fs/jffs2/fs.c
··· 466 466 467 467 if (insert_inode_locked(inode) < 0) { 468 468 make_bad_inode(inode); 469 - unlock_new_inode(inode); 470 469 iput(inode); 471 470 return ERR_PTR(-EINVAL); 472 471 }
+10 -12
fs/jffs2/readinode.c
··· 62 62 #ifndef __ECOS 63 63 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), 64 64 * adding and jffs2_flash_read_end() interface. */ 65 - if (c->mtd->point) { 66 - err = c->mtd->point(c->mtd, ofs, len, &retlen, 67 - (void **)&buffer, NULL); 68 - if (!err && retlen < len) { 69 - JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); 70 - c->mtd->unpoint(c->mtd, ofs, retlen); 71 - } else if (err) 65 + err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); 66 + if (!err && retlen < len) { 67 + JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); 68 + mtd_unpoint(c->mtd, ofs, retlen); 69 + } else if (err) { 70 + if (err != -EOPNOTSUPP) 72 71 JFFS2_WARNING("MTD point failed: error code %d.\n", err); 73 - else 74 - pointed = 1; /* succefully pointed to device */ 75 - } 72 + } else 73 + pointed = 1; /* succefully pointed to device */ 76 74 #endif 77 75 78 76 if (!pointed) { ··· 99 101 kfree(buffer); 100 102 #ifndef __ECOS 101 103 else 102 - c->mtd->unpoint(c->mtd, ofs, len); 104 + mtd_unpoint(c->mtd, ofs, len); 103 105 #endif 104 106 105 107 if (crc != tn->data_crc) { ··· 135 137 kfree(buffer); 136 138 #ifndef __ECOS 137 139 else 138 - c->mtd->unpoint(c->mtd, ofs, len); 140 + mtd_unpoint(c->mtd, ofs, len); 139 141 #endif 140 142 return err; 141 143 }
+6 -6
fs/jffs2/scan.c
··· 97 97 size_t pointlen, try_size; 98 98 99 99 if (c->mtd->point) { 100 - ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen, 101 - (void **)&flashbuf, NULL); 100 + ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, 101 + (void **)&flashbuf, NULL); 102 102 if (!ret && pointlen < c->mtd->size) { 103 103 /* Don't muck about if it won't let us point to the whole flash */ 104 104 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); 105 - c->mtd->unpoint(c->mtd, 0, pointlen); 105 + mtd_unpoint(c->mtd, 0, pointlen); 106 106 flashbuf = NULL; 107 107 } 108 - if (ret) 108 + if (ret && ret != -EOPNOTSUPP) 109 109 D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); 110 110 } 111 111 #endif ··· 273 273 kfree(flashbuf); 274 274 #ifndef __ECOS 275 275 else 276 - c->mtd->unpoint(c->mtd, 0, c->mtd->size); 276 + mtd_unpoint(c->mtd, 0, c->mtd->size); 277 277 #endif 278 278 kfree(s); 279 279 return ret; ··· 455 455 if (jffs2_cleanmarker_oob(c)) { 456 456 int ret; 457 457 458 - if (c->mtd->block_isbad(c->mtd, jeb->offset)) 458 + if (mtd_block_isbad(c->mtd, jeb->offset)) 459 459 return BLK_STATE_BADBLOCK; 460 460 461 461 ret = jffs2_check_nand_cleanmarker(c, jeb);
+1 -3
fs/jffs2/super.c
··· 335 335 jffs2_flash_cleanup(c); 336 336 kfree(c->inocache_list); 337 337 jffs2_clear_xattr_subsystem(c); 338 - if (c->mtd->sync) 339 - c->mtd->sync(c->mtd); 340 - 338 + mtd_sync(c->mtd); 341 339 D1(printk(KERN_DEBUG "jffs2_put_super returning\n")); 342 340 } 343 341
+18 -20
fs/jffs2/wbuf.c
··· 228 228 size_t retlen; 229 229 char *eccstr; 230 230 231 - ret = c->mtd->read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); 231 + ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); 232 232 if (ret && ret != -EUCLEAN && ret != -EBADMSG) { 233 233 printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret); 234 234 return ret; ··· 337 337 } 338 338 339 339 /* Do the read... */ 340 - ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); 340 + ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, 341 + buf); 341 342 342 343 /* ECC recovered ? */ 343 344 if ((ret == -EUCLEAN || ret == -EBADMSG) && ··· 414 413 if (breakme++ == 20) { 415 414 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); 416 415 breakme = 0; 417 - c->mtd->write(c->mtd, ofs, towrite, &retlen, 418 - brokenbuf); 416 + mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); 419 417 ret = -EIO; 420 418 } else 421 419 #endif 422 - ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, 423 - rewrite_buf); 420 + ret = mtd_write(c->mtd, ofs, towrite, &retlen, 421 + rewrite_buf); 424 422 425 423 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { 426 424 /* Argh. We tried. Really we did. */ ··· 619 619 if (breakme++ == 20) { 620 620 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); 621 621 breakme = 0; 622 - c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, 623 - brokenbuf); 622 + mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, 623 + brokenbuf); 624 624 ret = -EIO; 625 625 } else 626 626 #endif 627 627 628 - ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); 628 + ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, 629 + &retlen, c->wbuf); 629 630 630 631 if (ret) { 631 632 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret); ··· 862 861 v += wbuf_retlen; 863 862 864 863 if (vlen >= c->wbuf_pagesize) { 865 - ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen), 866 - &wbuf_retlen, v); 864 + ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), 865 + &wbuf_retlen, v); 867 866 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) 868 867 goto outfile; 869 868 ··· 949 948 int ret; 950 949 951 950 if (!jffs2_is_writebuffered(c)) 952 - return c->mtd->read(c->mtd, ofs, len, retlen, buf); 951 + return mtd_read(c->mtd, ofs, len, retlen, buf); 953 952 954 953 /* Read flash */ 955 954 down_read(&c->wbuf_sem); 956 - ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); 955 + ret = mtd_read(c->mtd, ofs, len, retlen, buf); 957 956 958 957 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { 959 958 if (ret == -EBADMSG) ··· 1032 1031 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 1033 1032 ops.datbuf = NULL; 1034 1033 1035 - ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops); 1034 + ret = mtd_read_oob(c->mtd, jeb->offset, &ops); 1036 1035 if (ret || ops.oobretlen != ops.ooblen) { 1037 1036 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" 1038 1037 " bytes, read %zd bytes, error %d\n", ··· 1075 1074 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 1076 1075 ops.datbuf = NULL; 1077 1076 1078 - ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops); 1077 + ret = mtd_read_oob(c->mtd, jeb->offset, &ops); 1079 1078 if (ret || ops.oobretlen != ops.ooblen) { 1080 1079 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" 1081 1080 " bytes, read %zd bytes, error %d\n", ··· 1101 1100 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 1102 1101 ops.datbuf = NULL; 1103 1102 1104 - ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops); 1103 + ret = mtd_write_oob(c->mtd, jeb->offset, &ops); 1105 1104 if (ret || ops.oobretlen != ops.ooblen) { 1106 1105 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd" 1107 1106 " bytes, read %zd bytes, error %d\n", ··· 1130 1129 if( ++jeb->bad_count < MAX_ERASE_FAILURES) 1131 1130 return 0; 1132 1131 1133 - if (!c->mtd->block_markbad) 1134 - return 1; // What else can we do? 1135 - 1136 1132 printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset); 1137 - ret = c->mtd->block_markbad(c->mtd, bad_offset); 1133 + ret = mtd_block_markbad(c->mtd, bad_offset); 1138 1134 1139 1135 if (ret) { 1140 1136 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+2 -30
fs/jffs2/writev.c
··· 13 13 #include <linux/mtd/mtd.h> 14 14 #include "nodelist.h" 15 15 16 - /* This ought to be in core MTD code. All registered MTD devices 17 - without writev should have this put in place. Bug the MTD 18 - maintainer */ 19 - static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs, 20 - unsigned long count, loff_t to, size_t *retlen) 21 - { 22 - unsigned long i; 23 - size_t totlen = 0, thislen; 24 - int ret = 0; 25 - 26 - for (i=0; i<count; i++) { 27 - if (!vecs[i].iov_len) 28 - continue; 29 - ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base); 30 - totlen += thislen; 31 - if (ret || thislen != vecs[i].iov_len) 32 - break; 33 - to += vecs[i].iov_len; 34 - } 35 - if (retlen) 36 - *retlen = totlen; 37 - return ret; 38 - } 39 - 40 16 int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, 41 17 unsigned long count, loff_t to, size_t *retlen) 42 18 { ··· 26 50 } 27 51 } 28 52 29 - if (c->mtd->writev) 30 - return c->mtd->writev(c->mtd, vecs, count, to, retlen); 31 - else { 32 - return mtd_fake_writev(c->mtd, vecs, count, to, retlen); 33 - } 53 + return mtd_writev(c->mtd, vecs, count, to, retlen); 34 54 } 35 55 36 56 int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, 37 57 size_t *retlen, const u_char *buf) 38 58 { 39 59 int ret; 40 - ret = c->mtd->write(c->mtd, ofs, len, retlen, buf); 60 + ret = mtd_write(c->mtd, ofs, len, retlen, buf); 41 61 42 62 if (jffs2_sum_active()) { 43 63 struct kvec vecs[1];
+40 -38
fs/logfs/dev_mtd.c
··· 13 13 14 14 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 15 15 16 - static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf) 16 + static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len, 17 + void *buf) 17 18 { 18 19 struct mtd_info *mtd = logfs_super(sb)->s_mtd; 19 20 size_t retlen; 20 21 int ret; 21 22 22 - ret = mtd->read(mtd, ofs, len, &retlen, buf); 23 + ret = mtd_read(mtd, ofs, len, &retlen, buf); 23 24 BUG_ON(ret == -EINVAL); 24 25 if (ret) 25 26 return ret; ··· 32 31 return 0; 33 32 } 34 33 35 - static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf) 34 + static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len, 35 + void *buf) 36 36 { 37 37 struct logfs_super *super = logfs_super(sb); 38 38 struct mtd_info *mtd = super->s_mtd; ··· 49 47 BUG_ON(len > PAGE_CACHE_SIZE); 50 48 page_start = ofs & PAGE_CACHE_MASK; 51 49 page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; 52 - ret = mtd->write(mtd, ofs, len, &retlen, buf); 50 + ret = mtd_write(mtd, ofs, len, &retlen, buf); 53 51 if (ret || (retlen != len)) 54 52 return -EIO; 55 53 ··· 62 60 * asynchronous properties. So just to prevent the first implementor of such 63 61 * a thing from breaking logfs in 2350, we do the usual pointless dance to 64 62 * declare a completion variable and wait for completion before returning 65 - * from mtd_erase(). What an exercise in futility! 63 + * from logfs_mtd_erase(). What an exercise in futility! 66 64 */ 67 65 static void logfs_erase_callback(struct erase_info *ei) 68 66 { 69 67 complete((struct completion *)ei->priv); 70 68 } 71 69 72 - static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len) 70 + static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs, 71 + size_t len) 73 72 { 74 73 struct logfs_super *super = logfs_super(sb); 75 74 struct address_space *mapping = super->s_mapping_inode->i_mapping; ··· 87 84 return 0; 88 85 } 89 86 90 - static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len, 87 + static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len, 91 88 int ensure_write) 92 89 { 93 90 struct mtd_info *mtd = logfs_super(sb)->s_mtd; ··· 105 102 ei.len = len; 106 103 ei.callback = logfs_erase_callback; 107 104 ei.priv = (long)&complete; 108 - ret = mtd->erase(mtd, &ei); 105 + ret = mtd_erase(mtd, &ei); 109 106 if (ret) 110 107 return -EIO; 111 108 112 109 wait_for_completion(&complete); 113 110 if (ei.state != MTD_ERASE_DONE) 114 111 return -EIO; 115 - return mtd_erase_mapping(sb, ofs, len); 112 + return logfs_mtd_erase_mapping(sb, ofs, len); 116 113 } 117 114 118 - static void mtd_sync(struct super_block *sb) 115 + static void logfs_mtd_sync(struct super_block *sb) 119 116 { 120 117 struct mtd_info *mtd = logfs_super(sb)->s_mtd; 121 118 122 - if (mtd->sync) 123 - mtd->sync(mtd); 119 + mtd_sync(mtd); 124 120 } 125 121 126 - static int mtd_readpage(void *_sb, struct page *page) 122 + static int logfs_mtd_readpage(void *_sb, struct page *page) 127 123 { 128 124 struct super_block *sb = _sb; 129 125 int err; 130 126 131 - err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 127 + err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 132 128 page_address(page)); 133 129 if (err == -EUCLEAN || err == -EBADMSG) { 134 130 /* -EBADMSG happens regularly on power failures */ ··· 145 143 return err; 146 144 } 147 145 148 - static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs) 146 + static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs) 149 147 { 150 148 struct logfs_super *super = logfs_super(sb); 151 149 struct address_space *mapping = super->s_mapping_inode->i_mapping; 152 - filler_t *filler = mtd_readpage; 150 + filler_t *filler = logfs_mtd_readpage; 153 151 struct mtd_info *mtd = super->s_mtd; 154 152 155 - if (!mtd->block_isbad) 153 + if (!mtd_can_have_bb(mtd)) 156 154 return NULL; 157 155 158 156 *ofs = 0; 159 - while (mtd->block_isbad(mtd, *ofs)) { 157 + while (mtd_block_isbad(mtd, *ofs)) { 160 158 *ofs += mtd->erasesize; 161 159 if (*ofs >= mtd->size) 162 160 return NULL; ··· 165 163 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); 166 164 } 167 165 168 - static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs) 166 + static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs) 169 167 { 170 168 struct logfs_super *super = logfs_super(sb); 171 169 struct address_space *mapping = super->s_mapping_inode->i_mapping; 172 - filler_t *filler = mtd_readpage; 170 + filler_t *filler = logfs_mtd_readpage; 173 171 struct mtd_info *mtd = super->s_mtd; 174 172 175 - if (!mtd->block_isbad) 173 + if (!mtd_can_have_bb(mtd)) 176 174 return NULL; 177 175 178 176 *ofs = mtd->size - mtd->erasesize; 179 - while (mtd->block_isbad(mtd, *ofs)) { 177 + while (mtd_block_isbad(mtd, *ofs)) { 180 178 *ofs -= mtd->erasesize; 181 179 if (*ofs <= 0) 182 180 return NULL; ··· 186 184 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); 187 185 } 188 186 189 - static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, 187 + static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, 190 188 size_t nr_pages) 191 189 { 192 190 struct logfs_super *super = logfs_super(sb); ··· 198 196 page = find_lock_page(mapping, index + i); 199 197 BUG_ON(!page); 200 198 201 - err = mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 202 - page_address(page)); 199 + err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 200 + page_address(page)); 203 201 unlock_page(page); 204 202 page_cache_release(page); 205 203 if (err) ··· 208 206 return 0; 209 207 } 210 208 211 - static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len) 209 + static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len) 212 210 { 213 211 struct logfs_super *super = logfs_super(sb); 214 212 int head; ··· 229 227 len += head; 230 228 } 231 229 len = PAGE_ALIGN(len); 232 - __mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); 230 + __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); 233 231 } 234 232 235 - static void mtd_put_device(struct logfs_super *s) 233 + static void logfs_mtd_put_device(struct logfs_super *s) 236 234 { 237 235 put_mtd_device(s->s_mtd); 238 236 } 239 237 240 - static int mtd_can_write_buf(struct super_block *sb, u64 ofs) 238 + static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs) 241 239 { 242 240 struct logfs_super *super = logfs_super(sb); 243 241 void *buf; ··· 246 244 buf = kmalloc(super->s_writesize, GFP_KERNEL); 247 245 if (!buf) 248 246 return -ENOMEM; 249 - err = mtd_read(sb, ofs, super->s_writesize, buf); 247 + err = logfs_mtd_read(sb, ofs, super->s_writesize, buf); 250 248 if (err) 251 249 goto out; 252 250 if (memchr_inv(buf, 0xff, super->s_writesize)) ··· 257 255 } 258 256 259 257 static const struct logfs_device_ops mtd_devops = { 260 - .find_first_sb = mtd_find_first_sb, 261 - .find_last_sb = mtd_find_last_sb, 262 - .readpage = mtd_readpage, 263 - .writeseg = mtd_writeseg, 264 - .erase = mtd_erase, 265 - .can_write_buf = mtd_can_write_buf, 266 - .sync = mtd_sync, 267 - .put_device = mtd_put_device, 258 + .find_first_sb = logfs_mtd_find_first_sb, 259 + .find_last_sb = logfs_mtd_find_last_sb, 260 + .readpage = logfs_mtd_readpage, 261 + .writeseg = logfs_mtd_writeseg, 262 + .erase = logfs_mtd_erase, 263 + .can_write_buf = logfs_mtd_can_write_buf, 264 + .sync = logfs_mtd_sync, 265 + .put_device = logfs_mtd_put_device, 268 266 }; 269 267 270 268 int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
+13 -15
fs/romfs/mmap-nommu.c
··· 28 28 struct inode *inode = file->f_mapping->host; 29 29 struct mtd_info *mtd = inode->i_sb->s_mtd; 30 30 unsigned long isize, offset, maxpages, lpages; 31 + int ret; 31 32 32 33 if (!mtd) 33 - goto cant_map_directly; 34 + return (unsigned long) -ENOSYS; 34 35 35 36 /* the mapping mustn't extend beyond the EOF */ 36 37 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 42 41 if ((pgoff >= maxpages) || (maxpages - pgoff < lpages)) 43 42 return (unsigned long) -EINVAL; 44 43 45 - /* we need to call down to the MTD layer to do the actual mapping */ 46 - if (mtd->get_unmapped_area) { 47 - if (addr != 0) 48 - return (unsigned long) -EINVAL; 44 + if (addr != 0) 45 + return (unsigned long) -EINVAL; 49 46 50 - if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 51 - return (unsigned long) -EINVAL; 47 + if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 48 + return (unsigned long) -EINVAL; 52 49 53 - offset += ROMFS_I(inode)->i_dataoffset; 54 - if (offset > mtd->size - len) 55 - return (unsigned long) -EINVAL; 50 + offset += ROMFS_I(inode)->i_dataoffset; 51 + if (offset > mtd->size - len) 52 + return (unsigned long) -EINVAL; 56 53 57 - return mtd->get_unmapped_area(mtd, len, offset, flags); 58 - } 59 - 60 - cant_map_directly: 61 - return (unsigned long) -ENOSYS; 54 + ret = mtd_get_unmapped_area(mtd, len, offset, flags); 55 + if (ret == -EOPNOTSUPP) 56 + ret = -ENOSYS; 57 + return (unsigned long) ret; 62 58 } 63 59 64 60 /*
+8 -8
include/linux/mtd/cfi.h
··· 354 354 onecmd = cmd; 355 355 break; 356 356 case 2: 357 - onecmd = cpu_to_cfi16(cmd); 357 + onecmd = cpu_to_cfi16(map, cmd); 358 358 break; 359 359 case 4: 360 - onecmd = cpu_to_cfi32(cmd); 360 + onecmd = cpu_to_cfi32(map, cmd); 361 361 break; 362 362 } 363 363 ··· 437 437 case 1: 438 438 break; 439 439 case 2: 440 - res = cfi16_to_cpu(res); 440 + res = cfi16_to_cpu(map, res); 441 441 break; 442 442 case 4: 443 - res = cfi32_to_cpu(res); 443 + res = cfi32_to_cpu(map, res); 444 444 break; 445 445 default: BUG(); 446 446 } ··· 480 480 if (map_bankwidth_is_1(map)) { 481 481 return val.x[0]; 482 482 } else if (map_bankwidth_is_2(map)) { 483 - return cfi16_to_cpu(val.x[0]); 483 + return cfi16_to_cpu(map, val.x[0]); 484 484 } else { 485 485 /* No point in a 64-bit byteswap since that would just be 486 486 swapping the responses from different chips, and we are 487 487 only interested in one chip (a representative sample) */ 488 - return cfi32_to_cpu(val.x[0]); 488 + return cfi32_to_cpu(map, val.x[0]); 489 489 } 490 490 } 491 491 ··· 496 496 if (map_bankwidth_is_1(map)) { 497 497 return val.x[0] & 0xff; 498 498 } else if (map_bankwidth_is_2(map)) { 499 - return cfi16_to_cpu(val.x[0]); 499 + return cfi16_to_cpu(map, val.x[0]); 500 500 } else { 501 501 /* No point in a 64-bit byteswap since that would just be 502 502 swapping the responses from different chips, and we are 503 503 only interested in one chip (a representative sample) */ 504 - return cfi32_to_cpu(val.x[0]); 504 + return cfi32_to_cpu(map, val.x[0]); 505 505 } 506 506 } 507 507
+28 -46
include/linux/mtd/cfi_endian.h
··· 19 19 20 20 #include <asm/byteorder.h> 21 21 22 - #ifndef CONFIG_MTD_CFI_ADV_OPTIONS 22 + #define CFI_HOST_ENDIAN 1 23 + #define CFI_LITTLE_ENDIAN 2 24 + #define CFI_BIG_ENDIAN 3 23 25 24 - #define CFI_HOST_ENDIAN 25 - 26 - #else 27 - 28 - #ifdef CONFIG_MTD_CFI_NOSWAP 29 - #define CFI_HOST_ENDIAN 30 - #endif 31 - 32 - #ifdef CONFIG_MTD_CFI_LE_BYTE_SWAP 33 - #define CFI_LITTLE_ENDIAN 34 - #endif 35 - 36 - #ifdef CONFIG_MTD_CFI_BE_BYTE_SWAP 37 - #define CFI_BIG_ENDIAN 38 - #endif 39 - 40 - #endif 41 - 42 - #if defined(CFI_LITTLE_ENDIAN) 43 - #define cpu_to_cfi8(x) (x) 44 - #define cfi8_to_cpu(x) (x) 45 - #define cpu_to_cfi16(x) cpu_to_le16(x) 46 - #define cpu_to_cfi32(x) cpu_to_le32(x) 47 - #define cpu_to_cfi64(x) cpu_to_le64(x) 48 - #define cfi16_to_cpu(x) le16_to_cpu(x) 49 - #define cfi32_to_cpu(x) le32_to_cpu(x) 50 - #define cfi64_to_cpu(x) le64_to_cpu(x) 51 - #elif defined (CFI_BIG_ENDIAN) 52 - #define cpu_to_cfi8(x) (x) 53 - #define cfi8_to_cpu(x) (x) 54 - #define cpu_to_cfi16(x) cpu_to_be16(x) 55 - #define cpu_to_cfi32(x) cpu_to_be32(x) 56 - #define cpu_to_cfi64(x) cpu_to_be64(x) 57 - #define cfi16_to_cpu(x) be16_to_cpu(x) 58 - #define cfi32_to_cpu(x) be32_to_cpu(x) 59 - #define cfi64_to_cpu(x) be64_to_cpu(x) 60 - #elif defined (CFI_HOST_ENDIAN) 61 - #define cpu_to_cfi8(x) (x) 62 - #define cfi8_to_cpu(x) (x) 63 - #define cpu_to_cfi16(x) (x) 64 - #define cpu_to_cfi32(x) (x) 65 - #define cpu_to_cfi64(x) (x) 66 - #define cfi16_to_cpu(x) (x) 67 - #define cfi32_to_cpu(x) (x) 68 - #define cfi64_to_cpu(x) (x) 26 + #if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP) 27 + #define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN 28 + #elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP) 29 + #define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN 30 + #elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP) 31 + #define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN 69 32 #else 70 33 #error No CFI endianness defined 71 34 #endif 35 + 36 + #define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN) 37 + #define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN) 38 + #define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN) 39 + #define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN) 40 + 41 + #define cpu_to_cfi8(map, x) (x) 42 + #define cfi8_to_cpu(map, x) (x) 43 + #define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x)) 44 + #define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x)) 45 + #define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x)) 46 + #define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x)) 47 + #define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x)) 48 + #define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x)) 49 + 50 + #define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x)) 51 + #define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x)) 52 + #define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x)) 53 + #define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x))
+2 -1
include/linux/mtd/map.h
··· 26 26 #include <linux/list.h> 27 27 #include <linux/string.h> 28 28 #include <linux/bug.h> 29 - 29 + #include <linux/kernel.h> 30 30 31 31 #include <asm/unaligned.h> 32 32 #include <asm/system.h> ··· 214 214 void __iomem *virt; 215 215 void *cached; 216 216 217 + int swap; /* this mapping's byte-swapping requirement */ 217 218 int bankwidth; /* in octets. This isn't necessarily the width 218 219 of actual bus cycles -- it's the repeat interval 219 220 in bytes, before you are talking to the first chip again.
+260 -84
include/linux/mtd/mtd.h
··· 171 171 struct mtd_erase_region_info *eraseregions; 172 172 173 173 /* 174 - * Erase is an asynchronous operation. Device drivers are supposed 175 - * to call instr->callback() whenever the operation completes, even 176 - * if it completes with a failure. 177 - * Callers are supposed to pass a callback function and wait for it 178 - * to be called before writing to the block. 174 + * Do not call via these pointers, use corresponding mtd_*() 175 + * wrappers instead. 179 176 */ 180 177 int (*erase) (struct mtd_info *mtd, struct erase_info *instr); 181 - 182 - /* This stuff for eXecute-In-Place */ 183 - /* phys is optional and may be set to NULL */ 184 178 int (*point) (struct mtd_info *mtd, loff_t from, size_t len, 185 - size_t *retlen, void **virt, resource_size_t *phys); 186 - 187 - /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 179 + size_t *retlen, void **virt, resource_size_t *phys); 188 180 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 189 - 190 - /* Allow NOMMU mmap() to directly map the device (if not NULL) 191 - * - return the address to which the offset maps 192 - * - return -ENOSYS to indicate refusal to do the mapping 193 - */ 194 181 unsigned long (*get_unmapped_area) (struct mtd_info *mtd, 195 182 unsigned long len, 196 183 unsigned long offset, 197 184 unsigned long flags); 185 + int (*read) (struct mtd_info *mtd, loff_t from, size_t len, 186 + size_t *retlen, u_char *buf); 187 + int (*write) (struct mtd_info *mtd, loff_t to, size_t len, 188 + size_t *retlen, const u_char *buf); 189 + int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, 190 + size_t *retlen, const u_char *buf); 191 + int (*read_oob) (struct mtd_info *mtd, loff_t from, 192 + struct mtd_oob_ops *ops); 193 + int (*write_oob) (struct mtd_info *mtd, loff_t to, 194 + struct mtd_oob_ops *ops); 195 + int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, 196 + size_t len); 197 + int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, 198 + size_t len, size_t *retlen, u_char *buf); 199 + int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, 200 + size_t len); 201 + int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, 202 + size_t len, size_t *retlen, u_char *buf); 203 + int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len, 204 + size_t *retlen, u_char *buf); 205 + int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, 206 + size_t len); 207 + int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, 208 + unsigned long count, loff_t to, size_t *retlen); 209 + void (*sync) (struct mtd_info *mtd); 210 + int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 211 + int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 212 + int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 213 + int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); 214 + int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); 215 + int (*suspend) (struct mtd_info *mtd); 216 + void (*resume) (struct mtd_info *mtd); 217 + /* 218 + * If the driver is something smart, like UBI, it may need to maintain 219 + * its own reference counting. The below functions are only for driver. 220 + */ 221 + int (*get_device) (struct mtd_info *mtd); 222 + void (*put_device) (struct mtd_info *mtd); 198 223 199 224 /* Backing device capabilities for this device 200 225 * - provides mmap capabilities 201 226 */ 202 227 struct backing_dev_info *backing_dev_info; 203 - 204 - 205 - int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 206 - int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 207 - 208 - /* In blackbox flight recorder like scenarios we want to make successful 209 - writes in interrupt context. panic_write() is only intended to be 210 - called when its known the kernel is about to panic and we need the 211 - write to succeed. Since the kernel is not going to be running for much 212 - longer, this function can break locks and delay to ensure the write 213 - succeeds (but not sleep). */ 214 - 215 - int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 216 - 217 - int (*read_oob) (struct mtd_info *mtd, loff_t from, 218 - struct mtd_oob_ops *ops); 219 - int (*write_oob) (struct mtd_info *mtd, loff_t to, 220 - struct mtd_oob_ops *ops); 221 - 222 - /* 223 - * Methods to access the protection register area, present in some 224 - * flash devices. The user data is one time programmable but the 225 - * factory data is read only. 226 - */ 227 - int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len); 228 - int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 229 - int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len); 230 - int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 231 - int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 232 - int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); 233 - 234 - /* kvec-based read/write methods. 235 - NB: The 'count' parameter is the number of _vectors_, each of 236 - which contains an (ofs, len) tuple. 237 - */ 238 - int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); 239 - 240 - /* Sync */ 241 - void (*sync) (struct mtd_info *mtd); 242 - 243 - /* Chip-supported device locking */ 244 - int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 245 - int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 246 - int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 247 - 248 - /* Power Management functions */ 249 - int (*suspend) (struct mtd_info *mtd); 250 - void (*resume) (struct mtd_info *mtd); 251 - 252 - /* Bad block management functions */ 253 - int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); 254 - int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); 255 228 256 229 struct notifier_block reboot_notifier; /* default mode before reboot */ 257 230 ··· 238 265 struct module *owner; 239 266 struct device dev; 240 267 int usecount; 241 - 242 - /* If the driver is something smart, like UBI, it may need to maintain 243 - * its own reference counting. The below functions are only for driver. 244 - * The driver may register its callbacks. These callbacks are not 245 - * supposed to be called by MTD users */ 246 - int (*get_device) (struct mtd_info *mtd); 247 - void (*put_device) (struct mtd_info *mtd); 248 268 }; 249 269 250 - static inline struct mtd_info *dev_to_mtd(struct device *dev) 270 + /* 271 + * Erase is an asynchronous operation. Device drivers are supposed 272 + * to call instr->callback() whenever the operation completes, even 273 + * if it completes with a failure. 274 + * Callers are supposed to pass a callback function and wait for it 275 + * to be called before writing to the block. 276 + */ 277 + static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 251 278 { 252 - return dev ? dev_get_drvdata(dev) : NULL; 279 + return mtd->erase(mtd, instr); 280 + } 281 + 282 + /* 283 + * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 284 + */ 285 + static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, 286 + size_t *retlen, void **virt, resource_size_t *phys) 287 + { 288 + *retlen = 0; 289 + if (!mtd->point) 290 + return -EOPNOTSUPP; 291 + return mtd->point(mtd, from, len, retlen, virt, phys); 292 + } 293 + 294 + /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 295 + static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 296 + { 297 + return mtd->unpoint(mtd, from, len); 298 + } 299 + 300 + /* 301 + * Allow NOMMU mmap() to directly map the device (if not NULL) 302 + * - return the address to which the offset maps 303 + * - return -ENOSYS to indicate refusal to do the mapping 304 + */ 305 + static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, 306 + unsigned long len, 307 + unsigned long offset, 308 + unsigned long flags) 309 + { 310 + if (!mtd->get_unmapped_area) 311 + return -EOPNOTSUPP; 312 + return mtd->get_unmapped_area(mtd, len, offset, flags); 313 + } 314 + 315 + static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, 316 + size_t *retlen, u_char *buf) 317 + { 318 + return mtd->read(mtd, from, len, retlen, buf); 319 + } 320 + 321 + static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, 322 + size_t *retlen, const u_char *buf) 323 + { 324 + *retlen = 0; 325 + if (!mtd->write) 326 + return -EROFS; 327 + return mtd->write(mtd, to, len, retlen, buf); 328 + } 329 + 330 + /* 331 + * In blackbox flight recorder like scenarios we want to make successful writes 332 + * in interrupt context. panic_write() is only intended to be called when its 333 + * known the kernel is about to panic and we need the write to succeed. Since 334 + * the kernel is not going to be running for much longer, this function can 335 + * break locks and delay to ensure the write succeeds (but not sleep). 336 + */ 337 + static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 338 + size_t *retlen, const u_char *buf) 339 + { 340 + *retlen = 0; 341 + if (!mtd->panic_write) 342 + return -EOPNOTSUPP; 343 + return mtd->panic_write(mtd, to, len, retlen, buf); 344 + } 345 + 346 + static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, 347 + struct mtd_oob_ops *ops) 348 + { 349 + ops->retlen = ops->oobretlen = 0; 350 + if (!mtd->read_oob) 351 + return -EOPNOTSUPP; 352 + return mtd->read_oob(mtd, from, ops); 353 + } 354 + 355 + static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, 356 + struct mtd_oob_ops *ops) 357 + { 358 + ops->retlen = ops->oobretlen = 0; 359 + if (!mtd->write_oob) 360 + return -EOPNOTSUPP; 361 + return mtd->write_oob(mtd, to, ops); 362 + } 363 + 364 + /* 365 + * Method to access the protection register area, present in some flash 366 + * devices. The user data is one time programmable but the factory data is read 367 + * only. 368 + */ 369 + static inline int mtd_get_fact_prot_info(struct mtd_info *mtd, 370 + struct otp_info *buf, size_t len) 371 + { 372 + if (!mtd->get_fact_prot_info) 373 + return -EOPNOTSUPP; 374 + return mtd->get_fact_prot_info(mtd, buf, len); 375 + } 376 + 377 + static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 378 + size_t len, size_t *retlen, 379 + u_char *buf) 380 + { 381 + *retlen = 0; 382 + if (!mtd->read_fact_prot_reg) 383 + return -EOPNOTSUPP; 384 + return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf); 385 + } 386 + 387 + static inline int mtd_get_user_prot_info(struct mtd_info *mtd, 388 + struct otp_info *buf, 389 + size_t len) 390 + { 391 + if (!mtd->get_user_prot_info) 392 + return -EOPNOTSUPP; 393 + return mtd->get_user_prot_info(mtd, buf, len); 394 + } 395 + 396 + static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 397 + size_t len, size_t *retlen, 398 + u_char *buf) 399 + { 400 + *retlen = 0; 401 + if (!mtd->read_user_prot_reg) 402 + return -EOPNOTSUPP; 403 + return mtd->read_user_prot_reg(mtd, from, len, retlen, buf); 404 + } 405 + 406 + static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, 407 + size_t len, size_t *retlen, 408 + u_char *buf) 409 + { 410 + *retlen = 0; 411 + if (!mtd->write_user_prot_reg) 412 + return -EOPNOTSUPP; 413 + return mtd->write_user_prot_reg(mtd, to, len, retlen, buf); 414 + } 415 + 416 + static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 417 + size_t len) 418 + { 419 + if (!mtd->lock_user_prot_reg) 420 + return -EOPNOTSUPP; 421 + return mtd->lock_user_prot_reg(mtd, from, len); 422 + } 423 + 424 + int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 425 + unsigned long count, loff_t to, size_t *retlen); 426 + 427 + static inline void mtd_sync(struct mtd_info *mtd) 428 + { 429 + if (mtd->sync) 430 + mtd->sync(mtd); 431 + } 432 + 433 + /* Chip-supported device locking */ 434 + static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 435 + { 436 + if (!mtd->lock) 437 + return -EOPNOTSUPP; 438 + return mtd->lock(mtd, ofs, len); 439 + } 440 + 441 + static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 442 + { 443 + if (!mtd->unlock) 444 + return -EOPNOTSUPP; 445 + return mtd->unlock(mtd, ofs, len); 446 + } 447 + 448 + static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 449 + { 450 + if (!mtd->is_locked) 451 + return -EOPNOTSUPP; 452 + return mtd->is_locked(mtd, ofs, len); 453 + } 454 + 455 + static inline int mtd_suspend(struct mtd_info *mtd) 456 + { 457 + if (!mtd->suspend) 458 + return -EOPNOTSUPP; 459 + return mtd->suspend(mtd); 460 + } 461 + 462 + static inline void mtd_resume(struct mtd_info *mtd) 463 + { 464 + if (mtd->resume) 465 + mtd->resume(mtd); 466 + } 467 + 468 + static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 469 + { 470 + if (!mtd->block_isbad) 471 + return -EOPNOTSUPP; 472 + return mtd->block_isbad(mtd, ofs); 473 + } 474 + 475 + static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 476 + { 477 + if (!mtd->block_markbad) 478 + return -EOPNOTSUPP; 479 + return mtd->block_markbad(mtd, ofs); 253 480 } 254 481 255 482 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) ··· 482 309 return do_div(sz, mtd->writesize); 483 310 } 484 311 312 + static inline int mtd_has_oob(const struct mtd_info *mtd) 313 + { 314 + return mtd->read_oob && mtd->write_oob; 315 + } 316 + 317 + static inline int mtd_can_have_bb(const struct mtd_info *mtd) 318 + { 319 + return !!mtd->block_isbad; 320 + } 321 + 485 322 /* Kernel-side ioctl definitions */ 486 323 487 324 struct mtd_partition; ··· 521 338 522 339 extern void register_mtd_user (struct mtd_notifier *new); 523 340 extern int unregister_mtd_user (struct mtd_notifier *old); 524 - 525 - int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 526 - unsigned long count, loff_t to, size_t *retlen); 527 - 528 - int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs, 529 - unsigned long count, loff_t from, size_t *retlen); 530 - 531 341 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); 532 342 533 343 void mtd_erase_callback(struct erase_info *instr);
+1
include/linux/mtd/nand.h
··· 555 555 #define NAND_MFR_HYNIX 0xad 556 556 #define NAND_MFR_MICRON 0x2c 557 557 #define NAND_MFR_AMD 0x01 558 + #define NAND_MFR_MACRONIX 0xc2 558 559 559 560 /** 560 561 * struct nand_flash_dev - NAND Flash Device ID Structure
+1
include/linux/mtd/physmap.h
··· 30 30 unsigned int pfow_base; 31 31 char *probe_type; 32 32 struct mtd_partition *parts; 33 + const char **part_probe_types; 33 34 }; 34 35 35 36 #endif /* __LINUX_MTD_PHYSMAP__ */
+2 -1
include/mtd/mtd-abi.h
··· 198 198 #define MEMISLOCKED _IOR('M', 23, struct erase_info_user) 199 199 /* 200 200 * Most generic write interface; can write in-band and/or out-of-band in various 201 - * modes (see "struct mtd_write_req") 201 + * modes (see "struct mtd_write_req"). This ioctl is not supported for flashes 202 + * without OOB, e.g., NOR flash. 202 203 */ 203 204 #define MEMWRITE _IOWR('M', 24, struct mtd_write_req) 204 205